diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 31b1cb5efe3a1..1e3b913c5cb5a 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -21,4 +21,12 @@ BWC_VERSION: - "2.8.0" - "2.8.1" - "2.9.0" + - "2.9.1" - "2.10.0" + - "2.10.1" + - "2.11.0" + - "2.11.1" + - "2.11.2" + - "2.12.0" + - "2.12.1" + - "2.13.0" diff --git a/.gitattributes b/.gitattributes index 65f909981595f..b74462afb27bd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ * text eol=lf +*.jar binary *.bat binary *.zip binary *.exe binary diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1a108c35429ae..bb12121cd3d8f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,27 @@ -* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami +# CODEOWNERS manages notifications, not PR approvals +# For PR approvals see /.github/workflows/maintainer-approval.yml + +# Files have a single rule applied, the last match decides the owner +# If you would like to more specifically apply ownership, include existing owner in new sub fields + +# To verify changes of CODEOWNERS file +# In VSCode +# 1. Install extension https://marketplace.visualstudio.com/items?itemName=jasonnutter.vscode-codeowners +# 2. Go to a file +# 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file. + +# Default ownership for all repo files +* @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah + +/modules/transport-netty4/ @peternied + +/plugins/identity-shiro/ @peternied + +/server/src/main/java/org/opensearch/extensions/ @peternied +/server/src/main/java/org/opensearch/identity/ @peternied +/server/src/main/java/org/opensearch/threadpool/ @peternied +/server/src/main/java/org/opensearch/transport/ @peternied + +/.github/ @peternied + +/MAINTAINERS.md @abbashus @adnapibar @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @ryanbogan @sachinpkale @saratvemulapalli @setiah @shwetathareja @sohami @tlfeng @VachaShah diff --git a/.github/ISSUE_TEMPLATE/bug_template.md b/.github/ISSUE_TEMPLATE/bug_template.md deleted file mode 100644 index be3ae51b237ee..0000000000000 --- a/.github/ISSUE_TEMPLATE/bug_template.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: 🐛 Bug report -about: Create a report to help us improve -title: "[BUG]" -labels: 'bug, untriaged' -assignees: '' ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Plugins** -Please list all plugins currently enabled. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Host/Environment (please complete the following information):** - - OS: [e.g. iOS] - - Version [e.g. 22] - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_template.yml b/.github/ISSUE_TEMPLATE/bug_template.yml new file mode 100644 index 0000000000000..5f0798abe0f68 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_template.yml @@ -0,0 +1,80 @@ +name: 🐛 Bug report +description: Create a report to help us improve +title: "[BUG] " +labels: ['bug, untriaged'] +body: + - type: textarea + attributes: + label: Describe the bug + description: A clear and concise description of what the bug is. + validations: + required: true + - type: dropdown + attributes: + label: Related component + description: Choose a specific OpenSearch component your bug belongs to. If you are unsure which to select or if the component is not present, select "Other". + multiple: false + options: + - # Empty first option to force selection + - Build + - Clients + - Cluster Manager + - Extensions + - Indexing:Performance + - Indexing:Replication + - Indexing + - Libraries + - Other + - Plugins + - Search:Aggregations + - Search:Performance + - Search:Query Capabilities + - Search:Query Insights + - Search:Relevance + - Search:Remote Search + - Search:Resiliency + - Search:Searchable Snapshots + - Search + - Storage:Durability + - Storage:Performance + - Storage:Remote + - Storage:Snapshots + - Storage + validations: + required: true + - type: textarea + attributes: + label: To Reproduce + description: Steps to reproduce the behavior. + value: | + 1. Go to '...' + 2. Click on '....' + 3. Scroll down to '....' + 4. See error + validations: + required: true + - type: textarea + attributes: + label: Expected behavior + description: A clear and concise description of what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: Additional Details + description: Add any other context about the problem here. + value: | + **Plugins** + Please list all plugins currently enabled. + + **Screenshots** + If applicable, add screenshots to help explain your problem. + + **Host/Environment (please complete the following information):** + - OS: [e.g. iOS] + - Version [e.g. 22] + + **Additional context** + Add any other context about the problem here. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 53b3614a34342..0000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: 🎆 Feature request -about: Suggest an idea for this project -title: '' -labels: 'enhancement, untriaged' -assignees: '' ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000000000..c9df17bad9576 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,67 @@ +name: 🎆 Feature request +description: Suggest an idea for this project +title: '[Feature Request] <title>' +labels: ['enhancement, untriaged'] +body: + - type: textarea + attributes: + label: Is your feature request related to a problem? Please describe + description: A clear and concise description of what the problem is. + placeholder: Ex. I'm always frustrated when [...] + validations: + required: true + - type: textarea + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + validations: + required: true + - type: dropdown + attributes: + label: Related component + description: Choose a specific OpenSearch component your feature request belongs to. If you are unsure of which component to select or if the component is not present, select "Other". + multiple: false + options: + - # Empty first option to force selection + - Build + - Clients + - Cluster Manager + - Extensions + - Indexing:Performance + - Indexing:Replication + - Indexing + - Libraries + - Other + - Plugins + - Search:Aggregations + - Search:Performance + - Search:Query Capabilities + - Search:Query Insights + - Search:Relevance + - Search:Remote Search + - Search:Resiliency + - Search:Searchable Snapshots + - Search + - Storage:Durability + - Storage:Performance + - Storage:Remote + - Storage:Snapshots + - Storage + - ShardManagement:Placement + - ShardManagement:Performance + - ShardManagement:Resiliency + - ShardManagement:Insights + validations: + required: true + - type: textarea + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false + - type: textarea + attributes: + label: Additional context + description: Add any other context or screenshots about the feature request here. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/meta.yml b/.github/ISSUE_TEMPLATE/meta.yml new file mode 100644 index 0000000000000..b766a26bc3ff2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/meta.yml @@ -0,0 +1,58 @@ +name: ✨ Meta Issue +description: An issue that collects other issues together to describe a larger project or activity. +title: '[META] <title>' +labels: ['Meta, untriaged'] +body: + - type: textarea + attributes: + label: Please describe the end goal of this project + description: A clear and concise description of this project/endeavor. This should be understandable to someone with no context. + placeholder: Ex. Views is a way to project indices in OpenSearch, these views act as a focal point for describing the underlying data and how the data is accessed. It allows for restricting the scope and filtering the response consistently. + validations: + required: true + - type: textarea + attributes: + label: Supporting References + description: Please provide links (and descriptions!) to RFCs, design docs, etc + validations: + required: true + - type: textarea + attributes: + label: Issues + description: Please create a list of issues that should be tracked by this meta issue, including a short description. The purpose is to provide everyone on the project with an "at a glance" update of the state us the work being tracked. If you use the format "- [ ]" it will put your list into a checklist. + placeholder: Ex. - [ ] https://github.com/opensearch-project/security/issues/3888 Add views to the cluster metadata schema + validations: + required: true + - type: dropdown + attributes: + label: Related component + description: Choose a specific OpenSearch component your project belongs to. If you are unsure of which component to select or if the component is not present, select "Other". + multiple: false + options: + - # Empty first option to force selection + - Build + - Clients + - Cluster Manager + - Extensions + - Indexing:Performance + - Indexing:Replication + - Indexing + - Libraries + - Other + - Plugins + - Search:Aggregations + - Search:Performance + - Search:Query Capabilities + - Search:Query Insights + - Search:Relevance + - Search:Remote Search + - Search:Resiliency + - Search:Searchable Snapshots + - Search + - Storage:Durability + - Storage:Performance + - Storage:Remote + - Storage:Snapshots + - Storage + validations: + required: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f49d899170d49..8c4f4d59ea1fc 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,6 +7,14 @@ updates: labels: - "dependabot" - "dependencies" + - directory: / + open-pull-requests-limit: 1 + package-ecosystem: "github-actions" + schedule: + interval: "weekly" + labels: + - "dependabot" + - "dependencies" - directory: /benchmarks/ open-pull-requests-limit: 1 package-ecosystem: gradle @@ -703,6 +711,14 @@ updates: labels: - "dependabot" - "dependencies" + - directory: /modules/crypto/ + open-pull-requests-limit: 1 + package-ecosystem: gradle + schedule: + interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle @@ -967,6 +983,14 @@ updates: labels: - "dependabot" - "dependencies" + - directory: /plugins/crypto-kms/ + open-pull-requests-limit: 1 + package-ecosystem: gradle + schedule: + interval: weekly + labels: + - "dependabot" + - "dependencies" - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 69616e533d1ed..908a032bf833e 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,8 +17,10 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added +- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/add-untriaged.yml b/.github/workflows/add-untriaged.yml deleted file mode 100644 index 15b9a55651254..0000000000000 --- a/.github/workflows/add-untriaged.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Apply 'untriaged' label during issue lifecycle - -on: - issues: - types: [opened, reopened, transferred] - -jobs: - apply-label: - runs-on: ubuntu-latest - steps: - - uses: actions/github-script@v6 - with: - script: | - github.rest.issues.addLabels({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - labels: ['untriaged'] - }) diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml new file mode 100644 index 0000000000000..382105364c048 --- /dev/null +++ b/.github/workflows/assemble.yml @@ -0,0 +1,27 @@ +name: Gradle Assemble +on: [pull_request] + +jobs: + assemble: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ${{ matrix.os }} + strategy: + matrix: + java: [ 11, 17, 21 ] + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 + with: + java-version: ${{ matrix.java }} + distribution: temurin + - name: Setup docker (missing on MacOS) + if: runner.os == 'macos' + run: | + brew install docker + colima start + sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + - name: Run Gradle (assemble) + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index b8d3912c5864a..252cbda1392f8 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} @@ -22,7 +22,7 @@ jobs: - name: Get tag id: tag uses: dawidd6/action-get-tag@v1 - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: ncipollo/release-action@v1 with: github_token: ${{ steps.github_app_token.outputs.token }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 2b95fb2510bdd..2a95177174e9b 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -26,14 +26,15 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Backport - uses: VachaShah/backport@v2.1.0 + uses: VachaShah/backport@v2.2.0 with: github_token: ${{ steps.github_app_token.outputs.token }} head_template: backport/backport-<%= number %>-to-<%= base %> + failure_labels: backport-failed diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 992a38b624d7a..9456fbf8b4ca0 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -6,9 +6,10 @@ on: jobs: # Enforces the update of a changelog file on every pull request verify-changelog: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml new file mode 100644 index 0000000000000..b2f22a90938cc --- /dev/null +++ b/.github/workflows/check-compatibility.yml @@ -0,0 +1,69 @@ +--- +name: Check Compatibility + +on: + pull_request_target + +jobs: + check-compatibility: + if: github.repository == 'opensearch-project/OpenSearch' + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Increase swapfile + run: | + sudo swapoff -a + sudo fallocate -l 10G /swapfile + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + sudo swapon --show + + - name: Run compatibility task + run: ./gradlew checkCompatibility -i | tee $HOME/gradlew-check.out + + - name: Get results + run: | + echo '## Compatibility status:' > "${{ github.workspace }}/results.txt" + echo "Checks if related components are compatible with change $(git rev-parse --short HEAD)" >> "${{ github.workspace }}/results.txt" + echo "### Incompatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Incompatible component' $HOME/gradlew-check.out | sed -e 's/Incompatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" + echo "### Skipped components" >> "${{ github.workspace }}/results.txt" && grep -e 'Skipped component' $HOME/gradlew-check.out | sed -e 's/Skipped component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" + echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" + + - name: Upload results + uses: actions/upload-artifact@v4 + with: + name: results.txt + path: ${{ github.workspace }}/results.txt + + add-comment: + needs: [check-compatibility] + permissions: + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Download results + uses: actions/download-artifact@v4 + with: + name: results.txt + + - name: Find Comment + uses: peter-evans/find-comment@v3 + id: fc + with: + issue-number: ${{ github.event.number }} + comment-author: 'github-actions[bot]' + body-includes: 'Compatibility status:' + + - name: Add comment on the PR + uses: peter-evans/create-or-update-comment@v4 + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.number }} + body-path: results.txt + edit-mode: replace diff --git a/.github/workflows/copy-linked-issue-labels.yml b/.github/workflows/copy-linked-issue-labels.yml new file mode 100644 index 0000000000000..33b5e92dc10da --- /dev/null +++ b/.github/workflows/copy-linked-issue-labels.yml @@ -0,0 +1,21 @@ +name: Copy labels from linked issues +on: + pull_request_target: + types: [opened, edited, review_requested, synchronize, reopened, ready_for_review] + +jobs: + copy-issue-labels: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ubuntu-latest + permissions: + issues: read + contents: read + pull-requests: write + steps: + - name: copy-issue-labels + uses: michalvankodev/copy-issue-labels@v1.3.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + labels-to-exclude: | + untriaged + triaged diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index c81f7355a0d22..b45e053cc25c2 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -14,14 +14,14 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Edit the issue template run: | @@ -29,7 +29,7 @@ jobs: - name: Create Issue From File id: create-issue - uses: peter-evans/create-issue-from-file@v4 + uses: peter-evans/create-issue-from-file@v5 with: title: Add documentation related to new feature content-filepath: ./ci/documentation/issue.md diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 6f5058d9df54c..e6feb3b852ad0 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -11,14 +11,14 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Check out code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: token: ${{ steps.github_app_token.outputs.token }} @@ -27,7 +27,7 @@ jobs: ./gradlew updateSHAs - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Updating SHAs branch: ${{ github.head_ref }} @@ -40,7 +40,7 @@ jobs: ./gradlew spotlessApply - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4.7.2 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: Spotless formatting branch: ${{ github.head_ref }} @@ -54,7 +54,7 @@ jobs: version: 'Unreleased 2.x' - name: Commit the changes - uses: stefanzweifel/git-auto-commit-action@v4 + uses: stefanzweifel/git-auto-commit-action@v5 with: commit_message: "Update changelog" branch: ${{ github.head_ref }} diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index f895dfc2c1f4d..1f5c187c28e7d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -13,6 +13,7 @@ permissions: jobs: gradle-check: + if: github.repository == 'opensearch-project/OpenSearch' permissions: contents: read # to fetch code (actions/checkout) pull-requests: write # to create or update comment (peter-evans/create-or-update-comment) @@ -22,7 +23,7 @@ jobs: timeout-minutes: 130 steps: - name: Checkout OpenSearch repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} @@ -49,7 +50,7 @@ jobs: echo "pr_number=Null" >> $GITHUB_ENV - name: Checkout opensearch-build repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: opensearch-project/opensearch-build ref: main @@ -71,20 +72,17 @@ jobs: - name: Upload Coverage Report if: success() - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v4 with: files: ./codeCoverage.xml - name: Create Comment Success if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :white_check_mark: - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :white_check_mark: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) - name: Extract Test Failure if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} @@ -103,28 +101,23 @@ jobs: - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :grey_exclamation: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + :grey_exclamation: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) ${{ env.test_failures }} + Please review all [flaky tests](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) that succeeded after retry and create an issue if one does not already exist to track the flaky failure. - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + :x: Gradle check result for ${{ env.pr_from_sha }}: [${{ env.result }}](${{ env.workflow_url }}) + + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - name: Create Issue On Push Failure if: ${{ github.event_name == 'push' && failure() }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index ac94f5ef5ec5e..1c83821e22804 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -10,10 +10,10 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.2.0 + uses: lycheeverse/lychee-action@v1.9.3 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 78e7b7b269cf4..05ca93e7be2aa 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -7,12 +7,14 @@ on: # Inputs the workflow accepts. inputs: ref: - description: + description: 'Lucene ref in github.com/apache/lucene' + type: string required: false default: 'main' jobs: publish-snapshots: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest # These permissions are needed to interact with GitHub's OIDC Token endpoint. permissions: @@ -20,40 +22,60 @@ jobs: contents: read steps: - - uses: actions/checkout@v2 - - name: Set up JDK 17 - uses: actions/setup-java@v2 - with: - java-version: '17' - distribution: 'adopt' - - - name: Checkout Lucene - uses: actions/checkout@v2 + - name: Checkout Lucene ref:${{ github.event.inputs.ref }} + uses: actions/checkout@v4 with: repository: 'apache/lucene' - path: lucene ref: ${{ github.event.inputs.ref }} - - name: Set hash - working-directory: ./lucene + - name: Get Java Min Version and Lucene Revision from Lucene Repository run: | - echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" - id: version + java_version=`cat build.gradle | grep minJavaVersion | head -1 | grep -Eo '_[0-9]+$' | tr -d '_'` + echo "JAVA_VERSION=$java_version" >> $GITHUB_ENV + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_ENV + + - name: Setup JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v4 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'temurin' - name: Initialize gradle settings - working-directory: ./lucene run: ./gradlew localSettings - name: Publish Lucene to local maven repo. - working-directory: ./lucene - run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} + run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ env.REVISION }} + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_SECRET_ROLE }} + aws-region: us-east-1 + + - name: Get S3 Bucket + id: get_s3_bucket + run: | + lucene_snapshots_bucket=`aws secretsmanager get-secret-value --secret-id jenkins-artifact-bucket-name --query SecretString --output text` + echo "::add-mask::$lucene_snapshots_bucket" + echo "LUCENE_SNAPSHOTS_BUCKET=$lucene_snapshots_bucket" >> $GITHUB_OUTPUT + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_S3_ROLE }} + aws-region: us-east-1 + + - name: Copy files to S3 with the aws CLI (New) + run: | + aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ steps.get_s3_bucket.outputs.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.LUCENE_SNAPSHOTS_ROLE }} aws-region: us-west-2 - - name: Copy files to S3 with the aws CLI. + # We will remove this step once all the lucene snapshots old links are updated with the new one + - name: Copy files to S3 with the aws CLI (Old) run: | aws s3 cp ~/.m2/repository/org/apache/lucene/ s3://${{ secrets.LUCENE_SNAPSHOTS_BUCKET }}/snapshots/lucene/org/apache/lucene/ --recursive --no-progress diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml new file mode 100644 index 0000000000000..fdc2bf16937b4 --- /dev/null +++ b/.github/workflows/maintainer-approval.yml @@ -0,0 +1,32 @@ +name: Maintainers approval + +on: + pull_request_review: + +jobs: + maintainer-approved-check: + name: Minimum approval count + runs-on: ubuntu-latest + steps: + - id: find-maintainers + uses: actions/github-script@v7.0.1 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: string + script: | + // Get the collaborators - filtered to maintainer permissions + const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { + owner: context.repo.owner, + repo: context.repo.repo, + permission: 'maintain', + affiliation: 'all', + per_page: 100 + }); + + return maintainersResponse.data.map(item => item.login).join(', '); + + - uses: peternied/required-approval@v1.3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + min-required: 1 + required-approvers-list: ${{ steps.find-maintainers.outputs.result }} diff --git a/.github/workflows/poc-checklist.yml b/.github/workflows/poc-checklist.yml index 2dfb1bbe5cdce..1b4f6b31e02f8 100644 --- a/.github/workflows/poc-checklist.yml +++ b/.github/workflows/poc-checklist.yml @@ -11,7 +11,7 @@ jobs: issues: write steps: - name: Add comment - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v4 with: issue-number: ${{ github.event.issue.number }} body: | diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 8bbba657737c8..800aacec98516 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -3,17 +3,20 @@ on: [pull_request] jobs: precommit: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ${{ matrix.os }} strategy: matrix: - os: [windows-latest, macos-latest] # precommit on ubuntu-latest is run as a part of the gradle-check workflow + java: [ 11, 17, 21 ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - - uses: actions/checkout@v2 - - name: Set up JDK 11 - uses: actions/setup-java@v2 + - uses: actions/checkout@v4 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v4 with: - java-version: 11 - distribution: adopt - - name: Run Gradle + java-version: ${{ matrix.java }} + distribution: temurin + cache: gradle + - name: Run Gradle (precommit) run: | ./gradlew javadoc precommit --parallel diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 08007931a9c42..1b2db22c7c20b 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -10,6 +10,7 @@ on: jobs: build-and-publish-snapshots: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest permissions: @@ -17,15 +18,15 @@ jobs: contents: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up JDK 17 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: adopt java-version: 17 - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 + uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} aws-region: us-east-1 diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml new file mode 100644 index 0000000000000..7efcf529588ed --- /dev/null +++ b/.github/workflows/pull-request-checks.yml @@ -0,0 +1,28 @@ +name: Pull Request Checks + +on: + pull_request: + types: + [ + opened, + edited, + review_requested, + synchronize, + reopened, + ready_for_review, + ] + +jobs: + verify-description-checklist: + name: Verify Description Checklist + runs-on: ubuntu-latest + steps: + - uses: peternied/check-pull-request-description-checklist@v1.1 + with: + checklist-items: | + New functionality includes testing. + All tests pass + New functionality has been documented. + New functionality has javadoc added + Commits are signed per the DCO using --signoff + Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) diff --git a/.github/workflows/stalled.yml b/.github/workflows/stalled.yml index 0d03049a2e23c..d171332b402f1 100644 --- a/.github/workflows/stalled.yml +++ b/.github/workflows/stalled.yml @@ -1,4 +1,4 @@ -name: Close Stalled PRs +name: Label Stalled PRs on: schedule: - cron: '15 15 * * *' # Run every day at 15:15 UTC / 7:15 PST / 8:15 PDT @@ -6,23 +6,23 @@ permissions: pull-requests: write jobs: stale: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Stale PRs - uses: actions/stale@v8 + uses: actions/stale@v9 with: repo-token: ${{ steps.github_app_token.outputs.token }} stale-pr-label: 'stalled' - stale-pr-message: 'This PR is stalled because it has been open for 30 days with no activity. Remove stalled label or comment or this will be closed in 7 days.' - close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.' + stale-pr-message: 'This PR is stalled because it has been open for 30 days with no activity.' days-before-pr-stale: 30 - days-before-pr-close: 7 days-before-issue-stale: -1 + days-before-pr-close: -1 days-before-issue-close: -1 diff --git a/.github/workflows/triage.yml b/.github/workflows/triage.yml new file mode 100644 index 0000000000000..83bf4926a8c2d --- /dev/null +++ b/.github/workflows/triage.yml @@ -0,0 +1,34 @@ +name: Auto triage based on the component label in issue + +on: + issues: + types: [opened, reopened, transferred] + +jobs: + apply-label: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7.0.1 + with: + script: | + const { issue, repository } = context.payload; + const { number, body } = issue; + const { owner, name } = repository; + const regex = /###\sRelated\scomponent\n\n(\w.*)\n/gm; + let match; + while ( ( match = regex.exec( body ) ) ) { + const [ , component_label ] = match; + await github.rest.issues.addLabels( { + owner: owner.login, + repo: name, + issue_number: number, + labels: [ `${ component_label }` ], + } ); + } + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['untriaged'] + }) diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 764a365e7411c..be2a89ac931e9 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -1,27 +1,32 @@ name: Increment Version on: + workflow_dispatch: + inputs: + tag: + description: 'the tag' + required: true + type: string push: tags: - '*.*.*' -permissions: {} +permissions: + contents: write + issues: write + pull-requests: write + jobs: build: + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - name: GitHub App token - id: github_app_token - uses: tibdex/github-app-token@v1.5.0 - with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PRIVATE_KEY }} - installation_id: 22958780 - - - uses: actions/checkout@v2 - - name: Fetch Tag and Version Information + - name: Fetch tag and version information run: | TAG=$(echo "${GITHUB_REF#refs/*/}") + if [ -n ${{ github.event.inputs.tag }} ]; then + TAG=${{ github.event.inputs.tag }} + fi CURRENT_VERSION_ARRAY=($(echo "$TAG" | tr . '\n')) BASE=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:2}") BASE_X=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:1}.x") @@ -43,24 +48,22 @@ jobs: echo "NEXT_VERSION=$NEXT_VERSION" >> $GITHUB_ENV echo "NEXT_VERSION_UNDERSCORE=$NEXT_VERSION_UNDERSCORE" >> $GITHUB_ENV echo "NEXT_VERSION_ID=$NEXT_VERSION_ID" >> $GITHUB_ENV - - uses: actions/checkout@v2 + + - uses: actions/checkout@v4 with: ref: ${{ env.BASE }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Increment Patch Version - run: | - echo Incrementing $CURRENT_VERSION to $NEXT_VERSION - echo " - \"$CURRENT_VERSION\"" >> .ci/bwcVersions - sed -i "s/opensearch = $CURRENT_VERSION/opensearch = $NEXT_VERSION/g" buildSrc/version.properties - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Increment Patch Version on Major.Minor branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: true - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + - name: Create PR for BASE + id: base_pr + uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} branch: 'create-pull-request/patch-${{ env.BASE }}' commit-message: Increment version to ${{ env.NEXT_VERSION }} @@ -72,22 +75,21 @@ jobs: body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and incremented the version from ${{ env.CURRENT_VERSION }} to ${{ env.NEXT_VERSION }}. - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: ref: ${{ env.BASE_X }} - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to .X branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on Major.X branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + - name: Create PR for BASE_X + id: base_x_pr + uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} branch: 'create-pull-request/patch-${{ env.BASE_X }}' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -99,22 +101,21 @@ jobs: body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: ref: main - token: ${{ steps.github_app_token.outputs.token }} - - name: Add bwc version to main branch - run: | - echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION - sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions - echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE - sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java + - name: Add Patch Version on main branch + uses: peternied/opensearch-core-version-updater@v1 + with: + previous-version: ${{ env.CURRENT_VERSION }} + new-version: ${{ env.NEXT_VERSION }} + update-current: false - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + - name: Create PR for main + id: main_pr + uses: peter-evans/create-pull-request@v5 with: - token: ${{ steps.github_app_token.outputs.token }} base: main branch: 'create-pull-request/patch-main' commit-message: Add bwc version ${{ env.NEXT_VERSION }} @@ -125,3 +126,32 @@ jobs: title: '[AUTO] [main] Add bwc version ${{ env.NEXT_VERSION }}.' body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. + + - name: Create tracking issue + id: create-issue + uses: actions/github-script@v7.0.1 + with: + script: | + const body = ` + ### Description + A new version of OpenSearch was released, to prepare for the next release new version numbers need to be updated in all active branches of development. + + ### Exit Criteria + Review and merged the following pull requests + - [ ] ${{ steps.base_pr.outputs.pull-request-url }} + - [ ] ${{ steps.base_x_pr.outputs.pull-request-url }} + - [ ] ${{ steps.main_pr.outputs.pull-request-url }} + + ### Additional Context + See project wide guidance on branching and versions [[link]](https://github.com/opensearch-project/.github/blob/main/RELEASING.md). + ` + const { data: issue }= await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["Build"], + title: "Increment version for ${{ env.NEXT_VERSION }}", + body: body + }); + console.error(JSON.stringify(issue)); + return issue.number; + result-encoding: string diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index c3e0aae98cde2..dcf2a09717d28 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -4,7 +4,8 @@ on: [pull_request] jobs: validate: name: Validate + if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: gradle/wrapper-validation-action@v1 + - uses: actions/checkout@v4 + - uses: gradle/wrapper-validation-action@v2 diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml index 89e81c64fe205..f090532aecfc2 100644 --- a/.idea/inspectionProfiles/Project_Default.xml +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -3,7 +3,7 @@ <option name="myName" value="Project Default" /> <inspection_tool class="GroovyPointlessBoolean" enabled="false" level="WARNING" enabled_by_default="false" /> <inspection_tool class="JavadocDeclaration" enabled="true" level="WARNING" enabled_by_default="true"> - <option name="ADDITIONAL_TAGS" value="opensearch.experimental" /> + <option name="ADDITIONAL_TAGS" value="opensearch.experimental,opensearch.internal,opensearch.api" /> </inspection_tool> <inspection_tool class="PointlessBooleanExpression" enabled="false" level="WARNING" enabled_by_default="false"> <option name="m_ignoreExpressionsContainingConstants" value="true" /> diff --git a/CHANGELOG.md b/CHANGELOG.md index 12dae4fca545e..7d3880b4fe3bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- [Admission Control] Integrated IO Based AdmissionController to AdmissionControl Framework ([#12583](https://github.com/opensearch-project/OpenSearch/pull/12583)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -16,7 +27,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `avro` from 1.11.1 to 1.11.2 - Bump `woodstox-core` from 6.3.0 to 6.3.1 - Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) - Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) - Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) @@ -28,7 +38,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -38,6 +47,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -45,9 +63,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) -- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) -- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) +- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) +- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) ### Deprecated @@ -66,46 +86,80 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) -- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) +- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) ### Security ## [Unreleased 2.x] ### Added -- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) -- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) +- [Tiered caching] Introducing cache plugins and exposing Ehcache as one of the pluggable disk cache option ([#11874](https://github.com/opensearch-project/OpenSearch/pull/11874)) +- Add support for dependencies in plugin descriptor properties with semver range ([#11441](https://github.com/opensearch-project/OpenSearch/pull/11441)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Introduce query level setting `index.query.max_nested_depth` limiting nested queries ([#3268](https://github.com/opensearch-project/OpenSearch/issues/3268) +- Add toString methods to MultiSearchRequest, MultiGetRequest and CreateIndexRequest ([#12163](https://github.com/opensearch-project/OpenSearch/pull/12163)) +- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626)) +- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063)) +- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269)) +- [Tiered caching] Make IndicesRequestCache implementation configurable [EXPERIMENTAL] ([#12533](https://github.com/opensearch-project/OpenSearch/pull/12533)) +- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835)) +- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586)) +- Make search query counters dynamic to support all query types ([#12601](https://github.com/opensearch-project/OpenSearch/pull/12601)) ### Dependencies -- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) -- Bump `io.grpc:grpc-context` from 1.46.0 to 1.56.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726)) -- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) -- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) -- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.8.2 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844)) -- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) -- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) -- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) -- OpenJDK Update (July 2023 Patch releases) ([#8868](https://github.com/opensearch-project/OpenSearch/pull/8868) -- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) +- Bump `peter-evans/find-comment` from 2 to 3 ([#12288](https://github.com/opensearch-project/OpenSearch/pull/12288)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.1 to 2.33.0 ([#12289](https://github.com/opensearch-project/OpenSearch/pull/12289)) +- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290)) +- Bump `gradle/wrapper-validation-action` from 1 to 2 ([#12367](https://github.com/opensearch-project/OpenSearch/pull/12367)) +- Bump `netty` from 4.1.106.Final to 4.1.107.Final ([#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `opentelemetry` from 1.34.1 to 1.36.0 ([#12388](https://github.com/opensearch-project/OpenSearch/pull/12388), [#12618](https://github.com/opensearch-project/OpenSearch/pull/12618)) +- Bump Apache Lucene from 9.9.2 to 9.10.0 ([#12392](https://github.com/opensearch-project/OpenSearch/pull/12392)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.22.1 to 2.23.1 ([#12464](https://github.com/opensearch-project/OpenSearch/pull/12464), [#12587](https://github.com/opensearch-project/OpenSearch/pull/12587)) +- Bump `antlr4` from 4.11.1 to 4.13.1 ([#12445](https://github.com/opensearch-project/OpenSearch/pull/12445)) +- Bump `com.netflix.nebula.ospackage-base` from 11.8.0 to 11.8.1 ([#12461](https://github.com/opensearch-project/OpenSearch/pull/12461)) +- Bump `peter-evans/create-or-update-comment` from 3 to 4 ([#12462](https://github.com/opensearch-project/OpenSearch/pull/12462)) +- Bump `lycheeverse/lychee-action` from 1.9.1 to 1.9.3 ([#12521](https://github.com/opensearch-project/OpenSearch/pull/12521)) +- Bump `com.azure:azure-core` from 1.39.0 to 1.47.0 ([#12520](https://github.com/opensearch-project/OpenSearch/pull/12520)) +- Bump `ch.qos.logback:logback-core` from 1.2.13 to 1.5.3 ([#12519](https://github.com/opensearch-project/OpenSearch/pull/12519)) +- Bump `codecov/codecov-action` from 3 to 4 ([#12585](https://github.com/opensearch-project/OpenSearch/pull/12585)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) +- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) +- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633)) ### Changed -- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) -- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) -- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) -- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) -- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) -- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) +- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499)) +- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643)) ### Deprecated ### Removed ### Fixed +- Fix for deserilization bug in weighted round-robin metadata ([#11679](https://github.com/opensearch-project/OpenSearch/pull/11679)) +- [Revert] [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Add support of special WrappingSearchAsyncActionPhase so the onPhaseStart() will always be followed by onPhaseEnd() within AbstractSearchAsyncAction ([#12293](https://github.com/opensearch-project/OpenSearch/pull/12293)) +- Add a system property to configure YamlParser codepoint limits ([#12298](https://github.com/opensearch-project/OpenSearch/pull/12298)) +- Prevent read beyond slice boundary in ByteArrayIndexInput ([#10481](https://github.com/opensearch-project/OpenSearch/issues/10481)) +- Fix the "highlight.max_analyzer_offset" request parameter with "plain" highlighter ([#10919](https://github.com/opensearch-project/OpenSearch/pull/10919)) +- Prevent unnecessary fetch sub phase processor initialization during fetch phase execution ([#12503](https://github.com/opensearch-project/OpenSearch/pull/12503)) +- Warn about deprecated and ignored index.mapper.dynamic index setting ([#11193](https://github.com/opensearch-project/OpenSearch/pull/11193)) +- Fix `terms` query on `float` field when `doc_values` are turned off by reverting back to `FloatPoint` from `FloatField` ([#12499](https://github.com/opensearch-project/OpenSearch/pull/12499)) +- Fix get task API does not refresh resource stats ([#11531](https://github.com/opensearch-project/OpenSearch/pull/11531)) +- onShardResult and onShardFailure are executed on one shard causes opensearch jvm crashed ([#12158](https://github.com/opensearch-project/OpenSearch/pull/12158)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d379d78829318..4a1162cf2558b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ - [Developer Certificate of Origin](#developer-certificate-of-origin) - [Changelog](#changelog) - [Review Process](#review-process) + - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -162,3 +163,14 @@ During the PR process, expect that there will be some back-and-forth. Please try If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and usually take care of backporting it to appropriate branches ourselves. If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. + +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: + - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question + - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time + - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label + - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) + - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 0baf626142238..f0851fc58d444 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -183,6 +183,12 @@ Run OpenSearch using `gradlew run`. ./gradlew run ``` +[Plugins](plugins/) may be installed by passing a `-PinstalledPlugins` property: + +```bash +./gradlew run -PinstalledPlugins="['plugin1', 'plugin2']" +``` + That will build OpenSearch and start it, writing its log above Gradle's status message. We log a lot of stuff on startup, specifically these lines tell you that OpenSearch is ready. ``` @@ -264,7 +270,10 @@ This repository is split into many top level directories. The most important one ### `distribution` -Builds our tar and zip archives and our rpm and deb packages. +Builds our tar and zip archives and our rpm and deb packages. There are several flavors of the distributions, with the classifier included in the name of the final deliverable (archive or package): + - default (no classifier), the distribution with bundled JDK + - `-no-jdk-` - the distribution without bundled JDK/JRE, assumes the JDK/JRE is going to be pre-installed on the target systems + - `-jre-` - the distribution bundled with JRE (smaller footprint), supported as experimental feature for some platforms ### `libs` @@ -339,7 +348,7 @@ Please follow these formatting guidelines: * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. * If *absolutely* necessary, you can disable formatting for regions of code with the `// tag::NAME` and `// end::NAME` directives, but note that these are intended for use in documentation, so please make it clear what you have done, and only do this where the benefit clearly outweighs the decrease in consistency. * Note that JavaDoc and block comments i.e. `/* ... */` are not formatted, but line comments i.e `// ...` are. -* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, if might get called out in PR reviews as something to change. +* There is an implicit rule that negative boolean expressions should use the form `foo == false` instead of `!foo` for better readability of the code. While this isn't strictly enforced, it might get called out in PR reviews as something to change. ## Adding Dependencies @@ -566,13 +575,19 @@ use Version checks accordingly (e.g., `Version.onOrAfter`, `Version.before`) to #### Developer API -The Developer API consists of interfaces and foundation software implementations that enable external users to develop new OpenSearch features. This includes -obvious components such as the Plugin framework and less obvious components such as REST Action Handlers. When developing a new feature of OpenSearch it is important -to explicitly mark which implementation components may, or may not, be extended by external implementations. For example, all new API classes with `@opensearch.api` -signal that the new component may be extended by an external implementation and therefore provide backwards compatibility guarantees. Similarly, any class explicitly -marked with the `@opensearch.internal` annotation, or not explicitly marked by an annotation should not be extended by external implementation components as it does not -guarantee backwards compatibility and may change at any time. The `@deprecated` annotation should also be added to any `@opensearch.api` classes or methods that are -either changed or planned to be removed across minor versions. +The Developer API consists of interfaces and foundation software implementations that enable external users to develop new OpenSearch features. This includes obvious +components such as the Plugin and Extension frameworks and less obvious components such as REST Action Handlers. When developing a new feature of OpenSearch it is +important to explicitly mark which implementation components may, or may not, be extended by external implementations. For example, all new API classes with +`@PublicApi` annotation (or documented as `@opensearch.api`) signal that the new component may be extended by an external implementation and therefore provide +backwards compatibility guarantees. Similarly, any class explicitly marked with the `@InternalApi` (or documented as `@opensearch.internal`) annotation, or not +explicitly marked by an annotation should not be extended by external implementation components as it does not guarantee backwards compatibility and may change at +any time. The `@DeprecatedApi` annotation could also be added to any classes annotated with `@PublicApi` (or documented as `@opensearch.api`) or their methods that +are either changed (with replacement) or planned to be removed across major versions. + +The APIs which are designated to be public but have not been stabilized yet should be marked with `@ExperimentalApi` (or documented as `@opensearch.experimental`) +annotation. The presence of this annotation signals that API may change at any time (major, minor or even patch releases). In general, the classes annotated with +`@PublicApi` may expose other classes or methods annotated with `@ExperimentalApi`, in such cases the backward compatibility guarantees would not apply to latter +(see please [Experimental Development](#experimental-development) for more details). #### User API @@ -589,8 +604,8 @@ and a log message to the OpenSearch deprecation log files using the `Deprecation Rapidly developing new features often benefit from several release cycles before committing to an official and long term supported (LTS) API. To enable this cycle OpenSearch uses an Experimental Development process leveraging [Feature Flags](https://featureflags.io/feature-flags/). This allows a feature to be developed using the same process as a LTS feature but with additional guard rails and communication mechanisms to signal to the users and development community the feature is not yet stable, may change in a future -release, or be removed altogether. Any Developer or User APIs implemented along with the experimental feature should be marked with the `@opensearch.experimental` annotation to -signal the implementation is not subject to LTS and does not follow backwards compatibility guidelines. +release, or be removed altogether. Any Developer or User APIs implemented along with the experimental feature should be marked with `@ExperimentalApi` (or documented as +`@opensearch.experimental`) annotation to signal the implementation is not subject to LTS and does not follow backwards compatibility guidelines. ### Backports diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 95e87f4be43bf..5535c2fa26eae 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -6,6 +6,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Maintainer | GitHub ID | Affiliation | |--------------------------| ------------------------------------------------------- | ----------- | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -15,11 +16,13 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Himanshu Setia | [setiah](https://github.com/setiah) | Amazon | -| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | +| Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | +| Peter Nied | [peternied](https://github.com/peternied) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | @@ -33,8 +36,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Emeritus | Maintainer | GitHub ID | Affiliation | -|-------------------------|---------------------------------------------| ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | +|-------------------------|---------------------------------------------|-------------| | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | +| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | diff --git a/NOTICE.txt b/NOTICE.txt index 6c7dc983f8c7a..d463b8f28561f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10,3 +10,6 @@ Foundation (http://www.apache.org/). This product includes software developed by Joda.org (http://www.joda.org/). + +This product includes software developed by +Morten Haraldsen (ethlo) (https://github.com/ethlo) under the Apache License, version 2.0. diff --git a/TESTING.md b/TESTING.md index a76ee07e1faac..1c91d60840d61 100644 --- a/TESTING.md +++ b/TESTING.md @@ -23,6 +23,7 @@ OpenSearch uses [jUnit](https://junit.org/junit5/) for testing, it also uses ran - [Iterating on packaging tests](#iterating-on-packaging-tests) - [Testing backwards compatibility](#testing-backwards-compatibility) - [BWC Testing against a specific remote/branch](#bwc-testing-against-a-specific-remotebranch) + - [BWC Testing with security](#bwc-testing-with-security) - [Skip fetching latest](#skip-fetching-latest) - [How to write good tests?](#how-to-write-good-tests) - [Base classes for test cases](#base-classes-for-test-cases) @@ -406,6 +407,29 @@ Example: Say you need to make a change to `main` and have a BWC layer in `5.x`. You will need to: . Create a branch called `index_req_change` off your remote `${remote}`. This will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. +## BWC Testing with security + +You may want to run BWC tests for a secure OpenSearch cluster. In order to do this, you will need to follow a few additional steps: + +1. Clone the OpenSearch Security repository from https://github.com/opensearch-project/security. +2. Get both the old version of the Security plugin (the version you wish to come from) and the new version of the Security plugin (the version you wish to go to). This can be done either by fetching the maven artifact with a command like `wget https://repo1.maven.org/maven2/org/opensearch/plugin/opensearch-security/<TARGET_VERSION>.0/opensearch-security-<TARGET_VERSION>.0.zip` or by running `./gradlew assemble` from the base of the Security repository. +3. Move both of the Security artifacts into new directories at the path `/security/bwc-test/src/test/resources/<TARGET_VERSION>.0`. You should end up with two different directories in `/security/bwc-test/src/test/resources/`, one named the old version and one the new version. +4. Run the following command from the base of the Security repository: + +``` + ./gradlew -p bwc-test clean bwcTestSuite \ + -Dtests.security.manager=false \ + -Dtests.opensearch.http.protocol=https \ + -Dtests.opensearch.username=admin \ + -Dtests.opensearch.password=admin \ + -PcustomDistributionUrl="/OpenSearch/distribution/archives/linux-tar/build/distributions/opensearch-min-<TARGET_VERSION>-SNAPSHOT-linux-x64.tar.gz" \ + -i +``` + +`-Dtests.security.manager=false` handles access issues when attempting to read the certificates from the file system. +`-Dtests.opensearch.http.protocol=https` tells the wait for cluster startup task to do the right thing. +`-PcustomDistributionUrl=...` uses a custom build of the distribution of OpenSearch. This is unnecessary when running against standard/unmodified OpenSearch core distributions. + ### Skip fetching latest For some BWC testing scenarios, you want to use the local clone of the repository without fetching latest. For these use cases, you can set the system property `tests.bwc.git_fetch_latest` to `false` and the BWC builds will skip fetching the latest from the remote. diff --git a/TRIAGING.md b/TRIAGING.md new file mode 100644 index 0000000000000..3917f1e1442b9 --- /dev/null +++ b/TRIAGING.md @@ -0,0 +1,83 @@ +<img src="https://opensearch.org/assets/img/opensearch-logo-themed.svg" height="64px"> + +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. + +### Do I need to attend for my issue to be addressed/triaged? + +Attendance is not required for your issue to be triaged or addressed. If not accepted the issue will be updated with a comment for next steps. All new issues are triaged weekly. + +You can track if your issue was triaged by watching your GitHub notifications for updates. + +### What happens if my issue does not get covered this time? + +Each meeting we seek to address all new issues. However, should we run out of time before your issue is discussed, you are always welcome to attend the next meeting or to follow up on the issue post itself. + +### How do I join the Triage meeting? + +Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Chime](https://aws.amazon.com/chime/), with this [meeting link](https://chime.aws/1988437365). + +After joining the Chime meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. + +If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting. + +### Is there an agenda for each week? + +Meetings are 55 minutes and follows this structure: + +Yes, each 55-minute meeting follows this structure: +1. **Initial Gathering:** Feel free to turn on your video and engage in informal conversation. Shortly, a volunteer triage [facilitator](#what-is-the-role-of-the-facilitator) will begin the meeting and share their screen. +2. **Record Attendees:** The facilitator will request attendees to share their GitHub profile links. These links will be collected and assembled into a [tag](#how-do-triage-facilitator-tag-comments-during-the-triage-meeting) to annotate comments during the meeting. +3. **Announcements:** Any announcements will be made at the beginning of the meeting. +4. **Review of New Issues:** We start by reviewing all untriaged [issues](https://github.com/search?q=label%3Auntriaged+is%3Aopen++repo%3Aopensearch-project%2FOpenSearch+&type=issues&ref=advsearch&s=created&o=desc) for the OpenSearch repo. +5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. +6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. + +### What is the role of the facilitator? + +The facilitator is crucial in driving the meeting, ensuring a smooth flow of issues into OpenSearch for future contributions. They maintain the meeting's agenda, solicit input from attendees, and record outcomes using the triage tag as items are discussed. + +### Do I need to have already contributed to the project to attend a triage meeting? + +No prior contributions are required. All interested individuals are welcome and encouraged to attend. Triage meetings offer a fantastic opportunity for new contributors to understand the project and explore various contribution avenues. + +### What if I have an issue that is almost a duplicate, should I open a new one to be triaged? + +You can always open an [issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) including one that you think may be a duplicate. If you believe your issue is similar but distinct from an existing one, you are encouraged to file it and explain the differences during the triage meeting. + +### What if I have follow-up questions on an issue? + +If you have an existing issue you would like to discuss, you can always comment on the issue itself. Alternatively, you are welcome to come to the triage meeting to discuss. + +### Is this meeting a good place to get help setting up features on my OpenSearch instance? + +While we are always happy to help the community, the best resource for implementation questions is [the OpenSearch forum](https://forum.opensearch.org/). + +There you can find answers to many common questions as well as speak with implementation experts. + +### What are the issue labels associated with triaging? + +Yes, there are several labels that are used to identify the 'state' of issues filed in OpenSearch . +| Label | When Applied | Meaning | +|---------------|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| `Untriaged` | When issues are created or re-opened. | Issues labeled as 'Untriaged' require the attention of the repository maintainers and may need to be prioritized for quicker resolution. It's crucial to keep the count of 'Untriaged' labels low to ensure all potential security issues are addressed in a timely manner. See [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) for more details on handling these issues. | +| `Help Wanted` | Anytime. | Issues marked as 'Help Wanted' signal that they are actionable and not the current focus of the project maintainers. Community contributions are especially encouraged for these issues. | +| `Good First Issue` | Anytime. | Issues labeled as 'Good First Issue' are small in scope and can be resolved with a single pull request. These are recommended starting points for newcomers looking to make their first contributions. | + +### What are the typical outcomes of a triaged issue? + +| Outcome | Label | Description | Canned Response | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Accepted | `-untriaged` | The issue has the details needed to be directed towards area owners. | "Thanks for filing this issue, please feel free to submit a pull request." | +| Rejected | N/A | The issue will be closed with a reason for why it was rejected. Reasons might include lack of details, or being outside the scope of the project. | "Thanks for creating this issue; however, it isn't being accepted due to {REASON}. Please feel free to open a new issue after addressing the reason." | +| Area Triage | `+{AREALABEL}` | OpenSearch has many different areas. If it's unclear whether an issue should be accepted, it will be labeled with the area and an owner will be @mentioned for follow-up. | "Thanks for creating this issue; the triage meeting was unsure if this issue should be accepted, @{PERSON} or someone from the area please review and then accept or reject this issue?" | +| Transfer | N/A | If the issue applies to another repository within the OpenSearch Project, it will be transferred accordingly. | "@opensearch-project/triage, can you please transfer this issue to project {REPOSITORY}." Or, if someone at the meeting has permissions, they can start the transfer. | + +### Is this where I should bring up potential security vulnerabilities? + +Due to the sensitive nature of security vulnerabilities, please report all potential vulnerabilities directly by following the steps outlined on the [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) document. + +### How do triage facilitator tag comments during the triage meeting? + +During the triage meeting, facilitators should use the tag _[Triage - attendees [1](#Profile_link) [2](#Profile_link)]_ to indicate a collective decision. This ensures contributors know the decision came from the meeting rather than an individual and identifies participants for any follow-up queries. + +This tag should not be used outside triage meetings. diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 02aa9319cc583..be4579b4e5324 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -40,7 +40,7 @@ application { } base { - archivesBaseName = 'opensearch-benchmarks' + archivesName = 'opensearch-benchmarks' } test.enabled = false @@ -84,3 +84,45 @@ spotless { targetExclude 'src/main/generated/**/*.java' } } + +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + // Add support for incubator modules on supported Java versions. + run.jvmArgs += ['--add-modules=jdk.incubator.vector'] + run.classpath += files(jar.archiveFile) + run.classpath -= sourceSets.main.output + evaluationDependsOn(':libs:opensearch-common') + + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + java20Implementation project(':libs:opensearch-common').sourceSets.java20.output + java20AnnotationProcessor "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"]) + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes + disableTasks('forbiddenApisJava20') +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java new file mode 100644 index 0000000000000..4e995f5a5067c --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterConstructionBenchmark.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterConstructionBenchmark { + + private List<BytesRef> items; + + @Param({ "1000000", "10000000", "50000000" }) + private int numIds; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySetFactory fuzzySetFactory; + private String fieldName; + + @Setup + public void setupIds() { + this.fieldName = IdFieldMapper.NAME; + this.items = IntStream.range(0, numIds).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + this.fuzzySetFactory = new FuzzySetFactory(Map.of(fieldName, parameters)); + } + + @Benchmark + public FuzzySet buildFilter() throws IOException { + return fuzzySetFactory.createFuzzySet(items.size(), fieldName, () -> items.iterator()); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java new file mode 100644 index 0000000000000..383539219830e --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/codec/fuzzy/FilterLookupBenchmark.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.UUIDs; +import org.opensearch.index.codec.fuzzy.FuzzySet; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; +import org.opensearch.index.mapper.IdFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +@Fork(3) +@Warmup(iterations = 2) +@Measurement(iterations = 5, time = 60, timeUnit = TimeUnit.SECONDS) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class FilterLookupBenchmark { + + @Param({ "50000000", "1000000" }) + private int numItems; + + @Param({ "1000000" }) + private int searchKeyCount; + + @Param({ "0.0511", "0.1023", "0.2047" }) + private double fpp; + + private FuzzySet fuzzySet; + private List<BytesRef> items; + private Random random = new Random(); + + @Setup + public void setupFilter() throws IOException { + String fieldName = IdFieldMapper.NAME; + items = IntStream.range(0, numItems).mapToObj(i -> new BytesRef(UUIDs.base64UUID())).collect(Collectors.toList()); + FuzzySetParameters parameters = new FuzzySetParameters(() -> fpp); + fuzzySet = new FuzzySetFactory(Map.of(fieldName, parameters)).createFuzzySet(numItems, fieldName, () -> items.iterator()); + } + + @Benchmark + public void contains_withExistingKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(items.get(random.nextInt(items.size()))) == FuzzySet.Result.MAYBE); + } + } + + @Benchmark + public void contains_withRandomKeys(Blackhole blackhole) throws IOException { + for (int i = 0; i < searchKeyCount; i++) { + blackhole.consume(fuzzySet.contains(new BytesRef(UUIDs.base64UUID()))); + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java new file mode 100644 index 0000000000000..7307bec088d02 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.index.mapper.BinaryFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 1) +@Measurement(iterations = 1) +@Fork(1) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Thread) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class CustomBinaryDocValuesFieldBenchmark { + + static final String FIELD_NAME = "dummy"; + static final String SEED_VALUE = "seed"; + + @Benchmark + public void add(CustomBinaryDocValuesFieldBenchmark.BenchmarkParameters parameters, Blackhole blackhole) { + // Don't use the parameter binary doc values object. + // Start with a fresh object every call and add maximum number of entries + BinaryFieldMapper.CustomBinaryDocValuesField customBinaryDocValuesField = new BinaryFieldMapper.CustomBinaryDocValuesField( + FIELD_NAME, + new BytesRef(SEED_VALUE).bytes + ); + for (int i = 0; i < parameters.maximumNumberOfEntries; ++i) { + ThreadLocalRandom.current().nextBytes(parameters.bytes); + customBinaryDocValuesField.add(parameters.bytes); + } + } + + @Benchmark + public void binaryValue(CustomBinaryDocValuesFieldBenchmark.BenchmarkParameters parameters, Blackhole blackhole) { + blackhole.consume(parameters.customBinaryDocValuesField.binaryValue()); + } + + @State(Scope.Benchmark) + public static class BenchmarkParameters { + @Param({ "8", "32", "128", "512" }) + int maximumNumberOfEntries; + + @Param({ "8", "32", "128", "512" }) + int entrySize; + + BinaryFieldMapper.CustomBinaryDocValuesField customBinaryDocValuesField; + byte[] bytes; + + @Setup + public void setup() { + customBinaryDocValuesField = new BinaryFieldMapper.CustomBinaryDocValuesField(FIELD_NAME, new BytesRef(SEED_VALUE).bytes); + bytes = new byte[entrySize]; + for (int i = 0; i < maximumNumberOfEntries; ++i) { + ThreadLocalRandom.current().nextBytes(bytes); + customBinaryDocValuesField.add(bytes); + } + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/AllocationBenchmark.java index c89bf3d1b577c..1faf522f3a1c9 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -31,17 +31,6 @@ package org.opensearch.benchmark.routing.allocation; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -53,6 +42,17 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.settings.Settings; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; import java.util.HashMap; import java.util.Map; diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java index d700b9dab2cf3..482a58f87086e 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java @@ -45,8 +45,8 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.snapshots.EmptySnapshotsInfoService; diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/TermsReduceBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/TermsReduceBenchmark.java index 76851881730a3..5073858848e05 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/TermsReduceBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/TermsReduceBenchmark.java @@ -40,14 +40,14 @@ import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchProgressListener; import org.opensearch.action.search.SearchRequest; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; @@ -57,6 +57,7 @@ import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.terms.StringTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.query.QuerySearchResult; @@ -170,15 +171,14 @@ private StringTerms newTerms(Random rand, BytesRef[] dict, boolean withNested) { "terms", BucketOrder.key(true), BucketOrder.count(false), - topNSize, - 1, Collections.emptyMap(), DocValueFormat.RAW, numShards, true, 0, buckets, - 0 + 0, + new TermsAggregator.BucketCountThresholds(1, 0, topNSize, numShards) ); } diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java index 8f86a0f3afbc6..b98a257dfbf48 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/search/aggregations/bucket/terms/StringTermsSerializationBenchmark.java @@ -40,6 +40,7 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.bucket.terms.StringTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -86,15 +87,14 @@ private StringTerms newTerms(boolean withNested) { "test", BucketOrder.key(true), BucketOrder.key(true), - buckets, - 1, null, DocValueFormat.RAW, buckets, false, 100000, resultBuckets, - 0 + 0, + new TermsAggregator.BucketCountThresholds(1, 0, buckets, buckets) ); } diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java index d3bfc9348cdb3..7cd8c672f45df 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java @@ -8,12 +8,12 @@ package org.opensearch.benchmark.store.remote.filecache; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; - import org.apache.lucene.store.IndexInput; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.index.store.remote.filecache.CachedIndexInput; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -27,11 +27,11 @@ import org.openjdk.jmh.annotations.Threads; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.infra.Blackhole; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; -import org.opensearch.index.store.remote.filecache.CachedIndexInput; -import org.opensearch.index.store.remote.filecache.FileCache; -import org.opensearch.index.store.remote.filecache.FileCacheFactory; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; /** * Simple benchmark test of {@link FileCache}. It uses a uniform random distribution diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java index 4c87734255b8a..dfa0a8538108e 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java @@ -8,15 +8,15 @@ package org.opensearch.benchmark.time; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; import java.util.concurrent.TimeUnit; diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java deleted file mode 100644 index cdbcbfc163191..0000000000000 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.benchmark.time; - -import org.opensearch.common.Rounding; -import org.opensearch.common.rounding.DateTimeUnit; -import org.opensearch.common.time.DateUtils; -import org.opensearch.common.unit.TimeValue; -import org.joda.time.DateTimeZone; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; - -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.Rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.Rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.QUARTER_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.YEAR_OF_CENTURY; - -@Fork(3) -@Warmup(iterations = 10) -@Measurement(iterations = 10) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Benchmark) -@SuppressWarnings("unused") // invoked by benchmarking framework -public class RoundingBenchmark { - - private final ZoneId zoneId = ZoneId.of("Europe/Amsterdam"); - private final DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); - - private long timestamp = 1548879021354L; - - private final org.opensearch.common.rounding.Rounding jodaRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.HOUR_OF_DAY - ).timeZone(timeZone).build(); - private final Rounding javaRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitJoda() { - return jodaRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitJava() { - return javaRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding jodaDayOfMonthRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.DAY_OF_MONTH - ).timeZone(timeZone).build(); - private final Rounding javaDayOfMonthRounding = Rounding.builder(DAY_OF_MONTH).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJoda() { - return jodaDayOfMonthRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJava() { - return javaDayOfMonthRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeIntervalRoundingJoda = org.opensearch.common.rounding.Rounding.builder( - TimeValue.timeValueMinutes(60) - ).timeZone(timeZone).build(); - private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)).timeZone(zoneId).build(); - - @Benchmark - public long timeIntervalRoundingJava() { - return timeIntervalRoundingJava.round(timestamp); - } - - @Benchmark - public long timeIntervalRoundingJoda() { - return timeIntervalRoundingJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcDayOfMonthJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.DAY_OF_MONTH) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcDayOfMonthJava = Rounding.builder(DAY_OF_MONTH).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJava() { - return timeUnitRoundingUtcDayOfMonthJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJoda() { - return timeUnitRoundingUtcDayOfMonthJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcQuarterOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.QUARTER) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcQuarterOfYearJava = Rounding.builder(QUARTER_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJava() { - return timeUnitRoundingUtcQuarterOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJoda() { - return timeUnitRoundingUtcQuarterOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcMonthOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.MONTH_OF_YEAR) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcMonthOfYearJava = Rounding.builder(MONTH_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJava() { - return timeUnitRoundingUtcMonthOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJoda() { - return timeUnitRoundingUtcMonthOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcYearOfCenturyJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.YEAR_OF_CENTURY) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcYearOfCenturyJava = Rounding.builder(YEAR_OF_CENTURY).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJava() { - return timeUnitRoundingUtcYearOfCenturyJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJoda() { - return timeUnitRoundingUtcYearOfCenturyJoda.round(timestamp); - } -} diff --git a/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java new file mode 100644 index 0000000000000..8842337a468a1 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 1) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.Throughput) +public class HashFunctionBenchmark { + + @Benchmark + public void hash(Blackhole bh, Options opts) { + bh.consume(opts.type.hash(opts.data)); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "21", + "24", + "28", + "32", + "36", + "41", + "47", + "54", + "62", + "71", + "81", + "90", + "100", + "112", + "125", + "139", + "156", + "174", + "194", + "220", + "245", + "272", + "302", + "339", + "384", + "431", + "488", + "547", + "608", + "675", + "763", + "863", + "967", + "1084", + "1225", + "1372", + "1537", + "1737", + "1929", + "2142", + "2378", + "2664", + "3011", + "3343", + "3778", + "4232", + "4783", + "5310", + "5895", + "6662", + "7529", + "8508", + "9444", + "10483", + "11741", + "13150", + "14597", + "16495", + "18475", + "20877", + "23383", + "25956", + "29071", + "32560", + "36142", + "40841", + "46151", + "52151", + "57888", + "65414", + "72610", + "82050", + "91076", + "102006", + "114247", + "127957", + "143312", + "159077", + "176576", + "199531", + "223475", + "250292", + "277825", + "313943", + "351617", + "393812" }) + public Integer length; + public byte[] data; + + @Setup + public void setup() { + data = new byte[length]; + new Random(0).nextBytes(data); + } + } + + public enum Type { + MURMUR3((data, offset, length) -> StringHelper.murmurhash3_x86_32(data, offset, length, 0)), + T1HA1((data, offset, length) -> T1ha1.hash(data, offset, length, 0)); + + private final Hasher hasher; + + Type(Hasher hasher) { + this.hasher = hasher; + } + + public long hash(byte[] data) { + return hasher.hash(data, 0, data.length); + } + } + + @FunctionalInterface + interface Hasher { + long hash(byte[] data, int offset, int length); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java new file mode 100644 index 0000000000000..3909a3f4eb8fc --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableBenchmark.java @@ -0,0 +1,137 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; + +@Fork(value = 3) +@Warmup(iterations = 3, time = 1) +@Measurement(iterations = 1, time = 1) +@BenchmarkMode(Mode.Throughput) +public class RoundableBenchmark { + + @Benchmark + public void floor(Blackhole bh, Options opts) { + Roundable roundable = opts.supplier.get(); + for (long key : opts.queries) { + bh.consume(roundable.floor(key)); + } + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "20", + "22", + "24", + "26", + "29", + "32", + "37", + "41", + "45", + "49", + "54", + "60", + "64", + "74", + "83", + "90", + "98", + "108", + "118", + "128", + "144", + "159", + "171", + "187", + "204", + "229", + "256" }) + public Integer size; + + @Param({ "binary", "linear", "btree" }) + public String type; + + @Param({ "uniform", "skewed_edge", "skewed_center" }) + public String distribution; + + public long[] queries; + public RoundableSupplier supplier; + + @Setup + public void setup() throws ClassNotFoundException { + Random random = new Random(size); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + 100; + } + + long range = values[values.length - 1] - values[0] + 100; + long mean, stddev; + queries = new long[1000000]; + + switch (distribution) { + case "uniform": // all values equally likely. + for (int i = 0; i < queries.length; i++) { + queries[i] = values[0] + (nextPositiveLong(random) % range); + } + break; + case "skewed_edge": // distribution centered at p90 with ± 5% stddev. + mean = values[0] + (long) (range * 0.9); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + case "skewed_center": // distribution centered at p50 with ± 5% stddev. + mean = values[0] + (long) (range * 0.5); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + default: + throw new IllegalArgumentException("invalid distribution: " + distribution); + } + + supplier = new RoundableSupplier(type, values, size); + } + + private static long nextPositiveLong(Random random) { + return random.nextLong() & Long.MAX_VALUE; + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..44ac42810996f --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) throws ClassNotFoundException { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + throw new ClassNotFoundException("BtreeSearcher is not supported below JDK 20"); + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java new file mode 100644 index 0000000000000..fef12b6d9f84a --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java @@ -0,0 +1,249 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.opensearch.common.hash.T1ha1; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Stream; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 2) +@Measurement(iterations = 3, time = 5) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class BytesRefHashBenchmark { + private static final int NUM_TABLES = 20; // run across many tables so that caches aren't effective + private static final int NUM_HITS = 1_000_000; // num hits per table + + @Benchmark + public void add(Blackhole bh, Options opts) { + HashTable[] tables = Stream.generate(opts.type::create).limit(NUM_TABLES).toArray(HashTable[]::new); + + for (int hit = 0; hit < NUM_HITS; hit++) { + BytesRef key = opts.keys[hit % opts.keys.length]; + for (HashTable table : tables) { + bh.consume(table.add(key)); + } + } + + Releasables.close(tables); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "10", + "12", + "14", + "16", + "19", + "22", + "25", + "29", + "33", + "38", + "43", + "50", + "57", + "65", + "75", + "86", + "97", + "109", + "124", + "141", + "161", + "182", + "204", + "229", + "262", + "297", + "336", + "380", + "430", + "482", + "550", + "610", + "704", + "801", + "914", + "1042", + "1178", + "1343", + "1532", + "1716", + "1940", + "2173", + "2456", + "2751", + "3082", + "3514", + "4006", + "4487", + "5026", + "5730", + "6418", + "7317", + "8196", + "9180", + "10374", + "11723", + "13247", + "14837", + "16915", + "19114", + "21599", + "24623", + "28071", + "32001", + "36482", + "41590", + "46581", + "52637", + "58954", + "67208", + "76618", + "86579", + "97835", + "109576", + "122726", + "138681", + "156710", + "175516", + "198334", + "222135", + "248792", + "281135", + "320494", + "365364", + "409208", + "466498", + "527143", + "595672", + "667153", + "753883", + "851888", + "971153" }) + public Integer size; + + @Param({ "5", "28", "59", "105" }) + public Integer length; + + private BytesRef[] keys; + + @Setup + public void setup() { + assert size <= Math.pow(26, length) : "key length too small to generate the required number of keys"; + // Seeding with size will help produce deterministic results for the same size, and avoid similar + // looking clusters for different sizes, in case one hash function got unlucky. + Random random = new Random(size); + Set<BytesRef> seen = new HashSet<>(); + keys = new BytesRef[size]; + for (int i = 0; i < size; i++) { + BytesRef key; + do { + key = new BytesRef( + random.ints(97, 123) + .limit(length) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString() + ); + } while (seen.contains(key)); + keys[i] = key; + seen.add(key); + } + } + } + + public enum Type { + MURMUR3(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash(1, 0.6f, key -> { + // Repeating the lower bits into upper bits to make the fingerprint work. + // Alternatively, use a 64-bit murmur3 hash, but that won't represent the baseline. + long h = StringHelper.murmurhash3_x86_32(key.bytes, key.offset, key.length, 0) & 0xFFFFFFFFL; + return h | (h << 32); + }, BigArrays.NON_RECYCLING_INSTANCE); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }), + + T1HA1(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash( + 1, + 0.6f, + key -> T1ha1.hash(key.bytes, key.offset, key.length, 0), + BigArrays.NON_RECYCLING_INSTANCE + ); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }); + + private final Supplier<HashTable> supplier; + + Type(Supplier<HashTable> supplier) { + this.supplier = supplier; + } + + public HashTable create() { + return supplier.get(); + } + } + + interface HashTable extends Releasable { + long add(BytesRef key); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java index fa75dd2c91f5a..4d746b790a348 100644 --- a/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java @@ -8,6 +8,7 @@ package org.opensearch.common.util; +import org.opensearch.common.lease.Releasable; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -20,7 +21,6 @@ import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.infra.Blackhole; -import org.opensearch.common.lease.Releasable; import java.util.Random; import java.util.concurrent.TimeUnit; diff --git a/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java new file mode 100644 index 0000000000000..e81c1b137bd30 --- /dev/null +++ b/benchmarks/src/main/java20/org/opensearch/common/round/RoundableSupplier.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import java.util.function.Supplier; + +public class RoundableSupplier implements Supplier<Roundable> { + private final Supplier<Roundable> delegate; + + RoundableSupplier(String type, long[] values, int size) { + switch (type) { + case "binary": + delegate = () -> new BinarySearcher(values, size); + break; + case "linear": + delegate = () -> new BidirectionalLinearSearcher(values, size); + break; + case "btree": + delegate = () -> new BtreeSearcher(values, size); + break; + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + @Override + public Roundable get() { + return delegate.get(); + } +} diff --git a/build.gradle b/build.gradle index c7b6987b1103f..2aac4a1e893e9 100644 --- a/build.gradle +++ b/build.gradle @@ -54,8 +54,8 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.19.0" apply false - id "org.gradle.test-retry" version "1.5.4" apply false + id "com.diffplug.spotless" version "6.25.0" apply false + id "org.gradle.test-retry" version "1.5.8" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' } @@ -375,7 +375,7 @@ allprojects { } else { // Link to non-shadowed dependant projects project.javadoc.dependsOn "${upstreamProject.path}:javadoc" - String externalLinkName = upstreamProject.base.archivesBaseName + String externalLinkName = upstreamProject.base.archivesName String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + externalLinkName.replaceAll('\\.', '/') + '/' + dep.version String projectRelativePath = project.relativePath(upstreamProject.buildDir) project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${projectRelativePath}/docs/javadoc/" @@ -470,7 +470,7 @@ subprojects { maxFailures = 10 } failOnPassedAfterRetry = false - classRetry { + filter { includeClasses.add("org.opensearch.action.admin.cluster.node.tasks.ResourceAwareTasksTests") includeClasses.add("org.opensearch.action.admin.cluster.tasks.PendingTasksBlocksIT") includeClasses.add("org.opensearch.action.admin.indices.create.CreateIndexIT") @@ -487,7 +487,6 @@ subprojects { includeClasses.add("org.opensearch.cluster.metadata.IndexGraveyardTests") includeClasses.add("org.opensearch.cluster.routing.MovePrimaryFirstTests") includeClasses.add("org.opensearch.cluster.routing.allocation.decider.DiskThresholdDeciderIT") - includeClasses.add("org.opensearch.cluster.service.MasterServiceTests") includeClasses.add("org.opensearch.common.util.concurrent.QueueResizableOpenSearchThreadPoolExecutorTests") includeClasses.add("org.opensearch.gateway.RecoveryFromGatewayIT") includeClasses.add("org.opensearch.gateway.ReplicaShardAllocatorIT") @@ -502,6 +501,7 @@ subprojects { includeClasses.add("org.opensearch.index.reindex.DeleteByQueryBasicTests") includeClasses.add("org.opensearch.index.reindex.UpdateByQueryBasicTests") includeClasses.add("org.opensearch.index.shard.IndexShardIT") + includeClasses.add("org.opensearch.index.shard.RemoteIndexShardTests") includeClasses.add("org.opensearch.index.shard.RemoteStoreRefreshListenerTests") includeClasses.add("org.opensearch.index.translog.RemoteFSTranslogTests") includeClasses.add("org.opensearch.indices.DateMathIndexExpressionsIntegrationIT") @@ -516,7 +516,6 @@ subprojects { includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexClusterDefaultDocRep") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexIT") includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexTranslogDisabledIT") - includeClasses.add("org.opensearch.remotestore.RemoteIndexPrimaryRelocationIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreBackpressureIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreIT") includeClasses.add("org.opensearch.remotestore.RemoteStoreRefreshListenerIT") @@ -545,6 +544,7 @@ subprojects { includeClasses.add("org.opensearch.snapshots.SnapshotStatusApisIT") includeClasses.add("org.opensearch.test.rest.ClientYamlTestSuiteIT") includeClasses.add("org.opensearch.upgrade.DetectEsInstallationTaskTests") + includeClasses.add("org.opensearch.cluster.MinimumClusterManagerNodesIT") } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 4ca7cde1cebac..0562ecc6ee61b 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,38 +103,44 @@ dependencies { api localGroovy() api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.23.0' - api 'org.apache.ant:ant:1.10.13' + api 'org.apache.commons:commons-compress:1.25.0' + api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' - api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' + api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' - api 'commons-io:commons-io:2.13.0' - api "net.java.dev.jna:jna:5.13.0" + api 'commons-io:commons-io:2.15.1' + api "net.java.dev.jna:jna:5.14.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.5.1' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' + api 'de.thetaphi:forbiddenapis:3.6' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.9.3' - api 'com.networknt:json-schema-validator:1.0.86' + api 'org.apache.maven:maven-model:3.9.6' + api 'com.networknt:json-schema-validator:1.2.0' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" - api "org.ajoberstar.grgit:grgit-core:5.2.0" + api "org.ajoberstar.grgit:grgit-core:5.2.1" testFixturesApi "junit:junit:${props.getProperty('junit')}" testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.35.0' + testImplementation 'org.wiremock:wiremock-standalone:3.3.1' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } } +configurations.all { + resolutionStrategy { + force "com.google.guava:guava:${props.getProperty('guava')}" + } +} + /***************************************************************************** * Bootstrap repositories * *****************************************************************************/ diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java index c5b4de157c75c..662510fbbf61c 100644 --- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java @@ -45,17 +45,16 @@ /** * A standalone process that will reap external services after a build dies. - * * <h2>Input</h2> * Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be * executed with {@link Runtime#exec(String)}. - * + * <p> * The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the * pipe will be broken and read will return. - * + * <p> * The reaper will then iterate over the files in the configured directory, * and execute the given commands. If any commands fail, a failure message is * written to stderr. Otherwise, the input file will be deleted. If no inputs diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy index ee6446fec6d57..11a57ab776562 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy @@ -40,9 +40,12 @@ class CheckCompatibilityTask extends DefaultTask { @TaskAction void checkCompatibility() { - logger.info("Checking compatibility for: $repositoryUrls for $ref") repositoryUrls.parallelStream().forEach { repositoryUrl -> + logger.lifecycle("Checking compatibility for: $repositoryUrl with ref: $ref") def tempDir = File.createTempDir() + def stdout = new ByteArrayOutputStream() + def errout = new ByteArrayOutputStream() + def skipped = false; try { if (cloneAndCheckout(repositoryUrl, tempDir)) { if (repositoryUrl.toString().endsWithAny('notifications', 'notifications.git')) { @@ -50,29 +53,34 @@ class CheckCompatibilityTask extends DefaultTask { } project.exec { workingDir = tempDir - def stdout = new ByteArrayOutputStream() executable = (OperatingSystem.current().isWindows()) ? 'gradlew.bat' : './gradlew' - args 'assemble' + args ('assemble') standardOutput stdout + errorOutput errout } compatibleComponents.add(repositoryUrl) } else { - logger.lifecycle("Skipping compatibility check for $repositoryUrl") + skipped = true } } catch (ex) { failedComponents.add(repositoryUrl) logger.info("Gradle assemble failed for $repositoryUrl", ex) } finally { + if (skipped) { + logger.lifecycle("Skipping compatibility check for $repositoryUrl") + } else { + logger.lifecycle("Finished compatibility check for $repositoryUrl") + logger.info("Standard output for $repositoryUrl build:\n\n" + stdout.toString()) + logger.error("Error output for $repositoryUrl build:\n\n" + errout.toString()) + } tempDir.deleteDir() } } if (!failedComponents.isEmpty()) { logger.lifecycle("Incompatible components: $failedComponents") - logger.info("Compatible components: $compatibleComponents") } if (!gitFailedComponents.isEmpty()) { logger.lifecycle("Components skipped due to git failures: $gitFailedComponents") - logger.info("Compatible components: $compatibleComponents") } if (!compatibleComponents.isEmpty()) { logger.lifecycle("Compatible components: $compatibleComponents") @@ -81,8 +89,16 @@ class CheckCompatibilityTask extends DefaultTask { protected static List getRepoUrls() { def json = new JsonSlurper().parse(REPO_URL.toURL()) - def labels = json.projects.values() - return labels as List + def repository = json.projects.values() + def repoUrls = replaceSshWithHttps(repository as List) + return repoUrls + } + + protected static replaceSshWithHttps(List<String> repoList) { + repoList.replaceAll { element -> + element.replace("git@github.com:", "https://github.com/") + } + return repoList } protected boolean cloneAndCheckout(repoUrl, directory) { diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 556763333d279..13f5f8724c6f2 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -89,7 +89,7 @@ class PluginBuildPlugin implements Plugin<Project> { String name = extension1.name BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) - base.archivesBaseName = name + base.archivesName = name project.description = extension1.description if (extension1.name == null) { @@ -155,7 +155,7 @@ class PluginBuildPlugin implements Plugin<Project> { // Only configure publishing if applied externally if (extension.hasClientJar) { project.pluginManager.apply('com.netflix.nebula.maven-base-publish') - // Only change Jar tasks, we don't want a -client zip so we can't change archivesBaseName + // Only change Jar tasks, we don't want a -client zip so we can't change archivesName project.tasks.withType(Jar) { archiveBaseName = archiveBaseName.get() + "-client" } @@ -163,7 +163,7 @@ class PluginBuildPlugin implements Plugin<Project> { project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> - generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesBaseName}-client-${project.versions.opensearch}.pom" + generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index cddd03ccc2019..4d45640b75e3d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -52,15 +52,15 @@ /** * A container for opensearch supported version information used in BWC testing. - * + * <p> * Parse the Java source file containing the versions declarations and use the known rules to figure out which are all * the version the current one is wire and index compatible with. * On top of this, figure out which of these are unreleased and provide the branch they can be built from. - * + * <p> * Note that in this context, currentVersion is the unreleased version this build operates on. * At any point in time there will surely be four such unreleased versions being worked on, * thus currentVersion will be one of these. - * + * <p> * Considering: * <dl> * <dt>M, M > 0</dt> @@ -84,7 +84,7 @@ * Each build is only concerned with versions before it, as those are the ones that need to be tested * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous * version. - * + * <p> * Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/ConcatFilesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/ConcatFilesTask.java index 96e3702d5a729..e5cd767ba0e67 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/ConcatFilesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/ConcatFilesTask.java @@ -31,6 +31,14 @@ package org.opensearch.gradle; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.Optional; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; + import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -40,14 +48,6 @@ import java.util.LinkedHashSet; import java.util.List; -import org.gradle.api.DefaultTask; -import org.gradle.api.file.FileTree; -import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.Optional; -import org.gradle.api.tasks.OutputFile; -import org.gradle.api.tasks.TaskAction; - /** * Concatenates a list of files into one and removes duplicate lines. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java index f16b667f96ed4..96d7c69699c68 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java @@ -31,16 +31,16 @@ package org.opensearch.gradle; -import java.io.File; - -import javax.inject.Inject; - import org.gradle.api.DefaultTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.TaskAction; import org.gradle.internal.file.Chmod; +import javax.inject.Inject; + +import java.io.File; + /** * Creates an empty directory. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/JavaPackageType.java b/buildSrc/src/main/java/org/opensearch/gradle/JavaPackageType.java new file mode 100644 index 0000000000000..2acc335d80df0 --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/JavaPackageType.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +public enum JavaPackageType { + NONE, + JRE, + JDK +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/JavaVariant.java b/buildSrc/src/main/java/org/opensearch/gradle/JavaVariant.java new file mode 100644 index 0000000000000..5f576984627a8 --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/JavaVariant.java @@ -0,0 +1,197 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +import org.gradle.api.Buildable; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.TaskDependency; + +import java.io.File; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +abstract class JavaVariant implements Buildable, Iterable<File> { + + private static final List<String> ALLOWED_ARCHITECTURES = Collections.unmodifiableList( + Arrays.asList("aarch64", "x64", "s390x", "ppc64le") + ); + private static final List<String> ALLOWED_VENDORS = Collections.unmodifiableList(Arrays.asList("adoptium", "adoptopenjdk", "openjdk")); + private static final List<String> ALLOWED_PLATFORMS = Collections.unmodifiableList( + Arrays.asList("darwin", "freebsd", "linux", "mac", "windows") + ); + private static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?"); + private static final Pattern LEGACY_VERSION_PATTERN = Pattern.compile("(\\d)(u\\d+)(?:\\+|\\-)(b\\d+?)(@([a-f0-9]{32}))?"); + + private final String name; + private final Configuration configuration; + + private final Property<String> vendor; + private final Property<String> version; + private final Property<String> platform; + private final Property<String> architecture; + private String baseVersion; + private String major; + private String build; + private String hash; + + JavaVariant(String name, Configuration configuration, ObjectFactory objectFactory) { + this.name = name; + this.configuration = configuration; + this.vendor = objectFactory.property(String.class); + this.version = objectFactory.property(String.class); + this.platform = objectFactory.property(String.class); + this.architecture = objectFactory.property(String.class); + } + + public String getName() { + return name; + } + + public String getVendor() { + return vendor.get(); + } + + public void setVendor(final String vendor) { + if (ALLOWED_VENDORS.contains(vendor) == false) { + throw new IllegalArgumentException("unknown vendor [" + vendor + "] for jdk [" + name + "], must be one of " + ALLOWED_VENDORS); + } + this.vendor.set(vendor); + } + + public String getVersion() { + return version.get(); + } + + public void setVersion(String version) { + if (VERSION_PATTERN.matcher(version).matches() == false && LEGACY_VERSION_PATTERN.matcher(version).matches() == false) { + throw new IllegalArgumentException("malformed version [" + version + "] for jdk [" + name + "]"); + } + parseVersion(version); + this.version.set(version); + } + + public String getPlatform() { + return platform.get(); + } + + public void setPlatform(String platform) { + if (ALLOWED_PLATFORMS.contains(platform) == false) { + throw new IllegalArgumentException( + "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS + ); + } + this.platform.set(platform); + } + + public String getArchitecture() { + return architecture.get(); + } + + public void setArchitecture(final String architecture) { + String jdkArchitecture = translateJdkArchitecture(architecture); + if (ALLOWED_ARCHITECTURES.contains(jdkArchitecture) == false) { + throw new IllegalArgumentException( + "unknown architecture [" + jdkArchitecture + "] for jdk [" + name + "], must be one of " + ALLOWED_ARCHITECTURES + ); + } + this.architecture.set(jdkArchitecture); + } + + public String getBaseVersion() { + return baseVersion; + } + + public String getMajor() { + return major; + } + + public String getBuild() { + return build; + } + + public String getHash() { + return hash; + } + + public String getPath() { + return configuration.getSingleFile().toString(); + } + + public String getConfigurationName() { + return configuration.getName(); + } + + @Override + public String toString() { + return getPath(); + } + + @Override + public TaskDependency getBuildDependencies() { + return configuration.getBuildDependencies(); + } + + // internal, make this jdks configuration unmodifiable + void finalizeValues() { + if (version.isPresent() == false) { + throw new IllegalArgumentException("version not specified for jdk [" + name + "]"); + } + if (platform.isPresent() == false) { + throw new IllegalArgumentException("platform not specified for jdk [" + name + "]"); + } + if (vendor.isPresent() == false) { + throw new IllegalArgumentException("vendor not specified for jdk [" + name + "]"); + } + if (architecture.isPresent() == false) { + throw new IllegalArgumentException("architecture not specified for jdk [" + name + "]"); + } + version.finalizeValue(); + platform.finalizeValue(); + vendor.finalizeValue(); + architecture.finalizeValue(); + } + + @Override + public Iterator<File> iterator() { + return configuration.iterator(); + } + + private void parseVersion(String version) { + // decompose the bundled jdk version, broken into elements as: [feature, interim, update, build] + // Note the "patch" version is not yet handled here, as it has not yet been used by java. + Matcher jdkVersionMatcher = VERSION_PATTERN.matcher(version); + if (jdkVersionMatcher.matches() == false) { + // Try again with the pre-Java9 version format + jdkVersionMatcher = LEGACY_VERSION_PATTERN.matcher(version); + + if (jdkVersionMatcher.matches() == false) { + throw new IllegalArgumentException("Malformed jdk version [" + version + "]"); + } + } + + baseVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); + major = jdkVersionMatcher.group(1); + build = jdkVersionMatcher.group(3); + hash = jdkVersionMatcher.group(5); + } + + private String translateJdkArchitecture(String architecture) { + /* + * Jdk uses aarch64 from ARM. Translating from arm64 to aarch64 which Jdk understands. + */ + return "arm64".equals(architecture) ? "aarch64" : architecture; + } + +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java index 06e857744be2d..3218abe726639 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java @@ -32,140 +32,13 @@ package org.opensearch.gradle; -import org.gradle.api.Buildable; import org.gradle.api.artifacts.Configuration; import org.gradle.api.model.ObjectFactory; -import org.gradle.api.provider.Property; -import org.gradle.api.tasks.TaskDependency; import org.gradle.internal.os.OperatingSystem; -import java.io.File; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class Jdk implements Buildable, Iterable<File> { - - private static final List<String> ALLOWED_ARCHITECTURES = Collections.unmodifiableList( - Arrays.asList("aarch64", "x64", "s390x", "ppc64le") - ); - private static final List<String> ALLOWED_VENDORS = Collections.unmodifiableList(Arrays.asList("adoptium", "adoptopenjdk", "openjdk")); - private static final List<String> ALLOWED_PLATFORMS = Collections.unmodifiableList( - Arrays.asList("darwin", "freebsd", "linux", "mac", "windows") - ); - private static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?"); - private static final Pattern LEGACY_VERSION_PATTERN = Pattern.compile("(\\d)(u\\d+)(?:\\+|\\-)(b\\d+?)(@([a-f0-9]{32}))?"); - - private final String name; - private final Configuration configuration; - - private final Property<String> vendor; - private final Property<String> version; - private final Property<String> platform; - private final Property<String> architecture; - private String baseVersion; - private String major; - private String build; - private String hash; - +public class Jdk extends JavaVariant { Jdk(String name, Configuration configuration, ObjectFactory objectFactory) { - this.name = name; - this.configuration = configuration; - this.vendor = objectFactory.property(String.class); - this.version = objectFactory.property(String.class); - this.platform = objectFactory.property(String.class); - this.architecture = objectFactory.property(String.class); - } - - public String getName() { - return name; - } - - public String getVendor() { - return vendor.get(); - } - - public void setVendor(final String vendor) { - if (ALLOWED_VENDORS.contains(vendor) == false) { - throw new IllegalArgumentException("unknown vendor [" + vendor + "] for jdk [" + name + "], must be one of " + ALLOWED_VENDORS); - } - this.vendor.set(vendor); - } - - public String getVersion() { - return version.get(); - } - - public void setVersion(String version) { - if (VERSION_PATTERN.matcher(version).matches() == false && LEGACY_VERSION_PATTERN.matcher(version).matches() == false) { - throw new IllegalArgumentException("malformed version [" + version + "] for jdk [" + name + "]"); - } - parseVersion(version); - this.version.set(version); - } - - public String getPlatform() { - return platform.get(); - } - - public void setPlatform(String platform) { - if (ALLOWED_PLATFORMS.contains(platform) == false) { - throw new IllegalArgumentException( - "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS - ); - } - this.platform.set(platform); - } - - public String getArchitecture() { - return architecture.get(); - } - - public void setArchitecture(final String architecture) { - String jdkArchitecture = translateJdkArchitecture(architecture); - if (ALLOWED_ARCHITECTURES.contains(jdkArchitecture) == false) { - throw new IllegalArgumentException( - "unknown architecture [" + jdkArchitecture + "] for jdk [" + name + "], must be one of " + ALLOWED_ARCHITECTURES - ); - } - this.architecture.set(jdkArchitecture); - } - - public String getBaseVersion() { - return baseVersion; - } - - public String getMajor() { - return major; - } - - public String getBuild() { - return build; - } - - public String getHash() { - return hash; - } - - public String getPath() { - return configuration.getSingleFile().toString(); - } - - public String getConfigurationName() { - return configuration.getName(); - } - - @Override - public String toString() { - return getPath(); - } - - @Override - public TaskDependency getBuildDependencies() { - return configuration.getBuildDependencies(); + super(name, configuration, objectFactory); } public Object getBinJavaPath() { @@ -190,56 +63,4 @@ private String getHomeRoot() { boolean isOSX = "mac".equals(getPlatform()) || "darwin".equals(getPlatform()); return getPath() + (isOSX ? "/Contents/Home" : ""); } - - // internal, make this jdks configuration unmodifiable - void finalizeValues() { - if (version.isPresent() == false) { - throw new IllegalArgumentException("version not specified for jdk [" + name + "]"); - } - if (platform.isPresent() == false) { - throw new IllegalArgumentException("platform not specified for jdk [" + name + "]"); - } - if (vendor.isPresent() == false) { - throw new IllegalArgumentException("vendor not specified for jdk [" + name + "]"); - } - if (architecture.isPresent() == false) { - throw new IllegalArgumentException("architecture not specified for jdk [" + name + "]"); - } - version.finalizeValue(); - platform.finalizeValue(); - vendor.finalizeValue(); - architecture.finalizeValue(); - } - - @Override - public Iterator<File> iterator() { - return configuration.iterator(); - } - - private void parseVersion(String version) { - // decompose the bundled jdk version, broken into elements as: [feature, interim, update, build] - // Note the "patch" version is not yet handled here, as it has not yet been used by java. - Matcher jdkVersionMatcher = VERSION_PATTERN.matcher(version); - if (jdkVersionMatcher.matches() == false) { - // Try again with the pre-Java9 version format - jdkVersionMatcher = LEGACY_VERSION_PATTERN.matcher(version); - - if (jdkVersionMatcher.matches() == false) { - throw new IllegalArgumentException("Malformed jdk version [" + version + "]"); - } - } - - baseVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); - major = jdkVersionMatcher.group(1); - build = jdkVersionMatcher.group(3); - hash = jdkVersionMatcher.group(5); - } - - private String translateJdkArchitecture(String architecture) { - /* - * Jdk uses aarch64 from ARM. Translating from arm64 to aarch64 which Jdk understands. - */ - return "arm64".equals(architecture) ? "aarch64" : architecture; - } - } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jre.java b/buildSrc/src/main/java/org/opensearch/gradle/Jre.java new file mode 100644 index 0000000000000..473bfc4860b80 --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jre.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.model.ObjectFactory; + +public class Jre extends JavaVariant { + Jre(String name, Configuration configuration, ObjectFactory objectFactory) { + super(name, configuration, objectFactory); + } +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/JreDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/JreDownloadPlugin.java new file mode 100644 index 0000000000000..5a00f41f07a60 --- /dev/null +++ b/buildSrc/src/main/java/org/opensearch/gradle/JreDownloadPlugin.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gradle; + +import org.opensearch.gradle.transform.SymbolicLinkPreservingUntarTransform; +import org.opensearch.gradle.transform.UnzipTransform; +import org.gradle.api.GradleException; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.dsl.RepositoryHandler; +import org.gradle.api.artifacts.repositories.IvyArtifactRepository; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.attributes.Attribute; +import org.gradle.api.internal.artifacts.ArtifactAttributes; + +public class JreDownloadPlugin implements Plugin<Project> { + public static final String VENDOR_ADOPTIUM = "adoptium"; + + private static final String REPO_NAME_PREFIX = "jre_repo_"; + private static final String EXTENSION_NAME = "jres"; + public static final String JRE_TRIMMED_PREFIX = "jdk-?\\d.*-jre"; + + @Override + public void apply(Project project) { + Attribute<Boolean> jreAttribute = Attribute.of("jre", Boolean.class); + project.getDependencies().getAttributesSchema().attribute(jreAttribute); + project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.ZIP_TYPE); + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.ZIP_TYPE) + .attribute(jreAttribute, true); + transformSpec.getTo() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE) + .attribute(jreAttribute, true); + transformSpec.parameters(parameters -> parameters.setTrimmedPrefixPattern(JRE_TRIMMED_PREFIX)); + }); + + ArtifactTypeDefinition tarArtifactTypeDefinition = project.getDependencies().getArtifactTypes().maybeCreate("tar.gz"); + project.getDependencies().registerTransform(SymbolicLinkPreservingUntarTransform.class, transformSpec -> { + transformSpec.getFrom() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, tarArtifactTypeDefinition.getName()) + .attribute(jreAttribute, true); + transformSpec.getTo() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE) + .attribute(jreAttribute, true); + transformSpec.parameters(parameters -> parameters.setTrimmedPrefixPattern(JRE_TRIMMED_PREFIX)); + }); + + NamedDomainObjectContainer<Jre> jresContainer = project.container(Jre.class, name -> { + Configuration configuration = project.getConfigurations().create("jre_" + name); + configuration.setCanBeConsumed(false); + configuration.getAttributes().attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE); + configuration.getAttributes().attribute(jreAttribute, true); + Jre jre = new Jre(name, configuration, project.getObjects()); + configuration.defaultDependencies(dependencies -> { + jre.finalizeValues(); + setupRepository(project, jre); + dependencies.add(project.getDependencies().create(dependencyNotation(jre))); + }); + return jre; + }); + project.getExtensions().add(EXTENSION_NAME, jresContainer); + } + + private void setupRepository(Project project, Jre jre) { + RepositoryHandler repositories = project.getRepositories(); + + /* + * Define the appropriate repository for the given JRE vendor and version + * + * For Oracle/OpenJDK/AdoptOpenJDK we define a repository per-version. + */ + String repoName = REPO_NAME_PREFIX + jre.getVendor() + "_" + jre.getVersion(); + String repoUrl; + String artifactPattern; + + if (jre.getVendor().equals(VENDOR_ADOPTIUM)) { + repoUrl = "https://github.com/adoptium/temurin" + jre.getMajor() + "-binaries/releases/download/"; + + if (jre.getMajor().equals("8")) { + // JDK-8 updates are always suffixed with 'U' (fe OpenJDK8U). + artifactPattern = "jdk" + + jre.getBaseVersion() + + "-" + + jre.getBuild() + + "/OpenJDK" + + jre.getMajor() + + "U" + + "-jre_[classifier]_[module]_hotspot_" + + jre.getBaseVersion() + + jre.getBuild() + + ".[ext]"; + } else { + // JDK updates are suffixed with 'U' (fe OpenJDK17U), whereas GA releases are not (fe OpenJDK17). + // To distinguish between those, the GA releases have only major version component (fe 17+32), + // the updates always have minor/patch components (fe 17.0.1+12), checking for the presence of + // version separator '.' should be enough. + artifactPattern = "jdk-" + jre.getBaseVersion() + "+" + jre.getBuild() + "/OpenJDK" + jre.getMajor() + // JDK-20 does use 'U' suffix all the time, no matter it is update or GA release + + (jre.getBaseVersion().contains(".") || jre.getBaseVersion().matches("^2\\d+$") ? "U" : "") + + "-jre_[classifier]_[module]_hotspot_" + + jre.getBaseVersion() + + "_" + + jre.getBuild() + + ".[ext]"; + } + } else { + throw new GradleException("Unknown JDK vendor [" + jre.getVendor() + "]"); + } + + // Define the repository if we haven't already + if (repositories.findByName(repoName) == null) { + repositories.ivy(repo -> { + repo.setName(repoName); + repo.setUrl(repoUrl); + repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + repo.patternLayout(layout -> layout.artifact(artifactPattern)); + repo.content(repositoryContentDescriptor -> repositoryContentDescriptor.includeGroup(groupName(jre))); + }); + } + } + + @SuppressWarnings("unchecked") + public static NamedDomainObjectContainer<Jre> getContainer(Project project) { + return (NamedDomainObjectContainer<Jre>) project.getExtensions().getByName(EXTENSION_NAME); + } + + private static String dependencyNotation(Jre jre) { + String platformDep = jre.getPlatform().equals("darwin") || jre.getPlatform().equals("mac") ? "mac" : jre.getPlatform(); + String extension = jre.getPlatform().equals("windows") ? "zip" : "tar.gz"; + + return groupName(jre) + ":" + platformDep + ":" + jre.getBaseVersion() + ":" + jre.getArchitecture() + "@" + extension; + } + + private static String groupName(Jre jre) { + return jre.getVendor() + "_" + jre.getMajor() + "_jre"; + } + +} diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java index 0512ed72f5e47..1a78a7dbb2d10 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java @@ -47,6 +47,7 @@ import org.gradle.process.JavaExecSpec; import javax.inject.Inject; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java index 5ae7ad1595e2f..5259700b3a63d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java @@ -38,7 +38,7 @@ /** * Writes data passed to this stream as log messages. - * + * <p> * The stream will be flushed whenever a newline is detected. * Allows setting an optional prefix before each line of output. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchDistribution.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchDistribution.java index 968bd13bd4011..0575c23fee9f6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchDistribution.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchDistribution.java @@ -101,7 +101,7 @@ public boolean shouldExtract() { private final Property<String> version; private final Property<Type> type; private final Property<Platform> platform; - private final Property<Boolean> bundledJdk; + private final Property<JavaPackageType> bundledJdk; private final Property<Boolean> failIfUnavailable; private final Configuration extracted; @@ -120,7 +120,7 @@ public boolean shouldExtract() { this.type = objectFactory.property(Type.class); this.type.convention(Type.ARCHIVE); this.platform = objectFactory.property(Platform.class); - this.bundledJdk = objectFactory.property(Boolean.class); + this.bundledJdk = objectFactory.property(JavaPackageType.class); this.failIfUnavailable = objectFactory.property(Boolean.class).convention(true); this.extracted = extractedConfiguration; } @@ -154,8 +154,8 @@ public void setType(Type type) { this.type.set(type); } - public boolean getBundledJdk() { - return bundledJdk.getOrElse(true); + public JavaPackageType getBundledJdk() { + return bundledJdk.getOrElse(JavaPackageType.JDK); } public boolean isDocker() { @@ -163,7 +163,7 @@ public boolean isDocker() { return type == Type.DOCKER; } - public void setBundledJdk(Boolean bundledJdk) { + public void setBundledJdk(JavaPackageType bundledJdk) { this.bundledJdk.set(bundledJdk); } @@ -266,7 +266,7 @@ void finalizeValues() { } if (bundledJdk.isPresent() == false) { - bundledJdk.set(true); + bundledJdk.set(JavaPackageType.JDK); } version.finalizeValue(); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java index edfaa06cea265..018781c7b30c4 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchJavaPlugin.java @@ -34,6 +34,7 @@ import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar; import nebula.plugin.info.InfoBrokerPlugin; + import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.info.GlobalBuildInfoPlugin; import org.opensearch.gradle.precommit.PrecommitTaskPlugin; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 451bd9cf83053..2ea8c2d015ecc 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.gradle; import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin; + import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.info.GlobalBuildInfoPlugin; import org.opensearch.gradle.jvm.JvmTestSuiteHelper; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 97e923c366598..7ec21bba18c64 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -77,7 +77,7 @@ public void apply(Project project) { } private static String getArchivesBaseName(Project project) { - return project.getExtensions().getByType(BasePluginExtension.class).getArchivesBaseName(); + return project.getExtensions().getByType(BasePluginExtension.class).getArchivesName().get(); } /**Configuration generation of maven poms. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java index 63b88f671c84c..0c901b9726992 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java @@ -94,7 +94,7 @@ public static void configureRepositories(Project project) { String revision = matcher.group(1); MavenArtifactRepository luceneRepo = repos.maven(repo -> { repo.setName("lucene-snapshots"); - repo.setUrl("https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/"); + repo.setUrl("https://ci.opensearch.org/ci/dbc/snapshots/lucene/"); }); repos.exclusiveContent(exclusiveRepo -> { exclusiveRepo.filter( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/VersionProperties.java b/buildSrc/src/main/java/org/opensearch/gradle/VersionProperties.java index 7c942358e12c3..4d8b62d95dff1 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/VersionProperties.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/VersionProperties.java @@ -76,6 +76,10 @@ public static String getBundledJdk(final String platform) { return getBundledJdk(platform, null); } + public static String getBundledJre(final String platform, final String arch) { + return getBundledJdk(platform, arch); + } + public static String getBundledJdkVendor() { return bundledJdkVendor; } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java index bb2a6d37362e1..08f0e7488a43c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java @@ -52,6 +52,7 @@ import org.gradle.workers.WorkerExecutor; import javax.inject.Inject; + import java.io.IOException; import java.util.Arrays; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java index 2eb2852e3e55e..fc78792bb3551 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.docker; +import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.Version; import org.opensearch.gradle.info.BuildParams; import org.gradle.api.GradleException; @@ -40,9 +41,9 @@ import org.gradle.api.services.BuildServiceParameters; import org.gradle.process.ExecOperations; import org.gradle.process.ExecResult; -import org.apache.tools.ant.taskdefs.condition.Os; import javax.inject.Inject; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; @@ -68,7 +69,9 @@ public abstract class DockerSupportService implements BuildService<DockerSupport // Defines the possible locations of the Docker CLI. These will be searched in order. private static String[] DOCKER_BINARIES_UNIX = { "/usr/bin/docker", "/usr/local/bin/docker" }; - private static String[] DOCKER_BINARIES_WINDOWS = { System.getenv("PROGRAMFILES") + "\\Docker\\Docker\\resources\\bin\\docker.exe" }; + private static String[] DOCKER_BINARIES_WINDOWS = { + System.getenv("PROGRAMFILES") + "\\Docker\\Docker\\resources\\bin\\docker.exe", + System.getenv("SystemRoot") + "\\System32\\docker.exe" /* Github Actions */ }; private static String[] DOCKER_BINARIES = Os.isFamily(Os.FAMILY_WINDOWS) ? DOCKER_BINARIES_WINDOWS : DOCKER_BINARIES_UNIX; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/http/WaitForHttpResource.java b/buildSrc/src/main/java/org/opensearch/gradle/http/WaitForHttpResource.java index 41f4054910d97..54c544a299b84 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/http/WaitForHttpResource.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/http/WaitForHttpResource.java @@ -39,6 +39,7 @@ import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManagerFactory; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -82,6 +83,24 @@ public WaitForHttpResource(String protocol, String host, int numberOfNodes) thro this(new URL(protocol + "://" + host + "/_cluster/health?wait_for_nodes=>=" + numberOfNodes + "&wait_for_status=yellow")); } + public WaitForHttpResource(String protocol, String host, String username, String password, int numberOfNodes) + throws MalformedURLException { + this( + new URL( + protocol + + "://" + + username + + ":" + + password + + "@" + + host + + "/_cluster/health?wait_for_nodes=>=" + + numberOfNodes + + "&wait_for_status=yellow" + ) + ); + } + public WaitForHttpResource(URL url) { this.url = url; } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java index bde7e916013cd..448ba8a96ef02 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java @@ -49,6 +49,7 @@ import org.gradle.util.GradleVersion; import javax.inject.Inject; + import java.io.File; import java.io.FileInputStream; import java.io.IOException; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java index 11270e5c9a51d..c6e49dc44d6bd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java @@ -50,10 +50,12 @@ import org.gradle.process.ExecSpec; import javax.inject.Inject; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; + import static java.util.Arrays.asList; public class InternalBwcGitPlugin implements Plugin<Project> { @@ -74,7 +76,7 @@ public InternalBwcGitPlugin(ProviderFactory providerFactory, ExecOperations exec public void apply(Project project) { this.project = project; this.gitExtension = project.getExtensions().create("bwcGitConfig", BwcGitExtension.class); - Provider<String> remote = providerFactory.systemProperty("bwc.remote").forUseAtConfigurationTime().orElse("opensearch-project"); + Provider<String> remote = providerFactory.systemProperty("bwc.remote").orElse("opensearch-project"); TaskContainer tasks = project.getTasks(); TaskProvider<LoggedExec> createCloneTaskProvider = tasks.register("createClone", LoggedExec.class, createClone -> { @@ -103,7 +105,6 @@ public void apply(Project project) { String remoteRepo = remote.get(); // for testing only we can override the base remote url String remoteRepoUrl = providerFactory.systemProperty("testRemoteRepo") - .forUseAtConfigurationTime() .getOrElse("https://github.com/" + remoteRepo + "/OpenSearch.git"); addRemote.setCommandLine(asList("git", "remote", "add", remoteRepo, remoteRepoUrl)); }); @@ -111,7 +112,6 @@ public void apply(Project project) { TaskProvider<LoggedExec> fetchLatestTaskProvider = tasks.register("fetchLatest", LoggedExec.class, fetchLatest -> { Provider<Object> gitFetchLatest = project.getProviders() .systemProperty("tests.bwc.git_fetch_latest") - .forUseAtConfigurationTime() .orElse("true") .map(fetchProp -> { if ("true".equals(fetchProp)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java index 96a2928b6e71e..4949476038bdb 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveCheckPlugin.java @@ -44,6 +44,7 @@ import org.gradle.api.tasks.TaskProvider; import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index 0944f3960467b..7ab91448252f2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -41,9 +41,9 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; -import org.gradle.api.plugins.BasePlugin; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.api.plugins.BasePlugin; import org.gradle.api.tasks.AbstractCopyTask; import org.gradle.api.tasks.Sync; import org.gradle.api.tasks.TaskContainer; @@ -52,6 +52,7 @@ import org.gradle.api.tasks.bundling.Compression; import org.gradle.api.tasks.bundling.Zip; import org.gradle.internal.os.OperatingSystem; + import java.io.File; import static org.opensearch.gradle.util.Util.capitalize; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 33869e76680cd..6892af1b17f97 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -48,6 +48,7 @@ import org.gradle.language.base.plugins.LifecycleBasePlugin; import javax.inject.Inject; + import java.io.File; import java.util.ArrayList; import java.util.List; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionDownloadPlugin.java index f4368b1cecc59..6a54612320c6c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -37,6 +37,7 @@ import org.opensearch.gradle.DistributionDependency; import org.opensearch.gradle.DistributionDownloadPlugin; import org.opensearch.gradle.DistributionResolution; +import org.opensearch.gradle.JavaPackageType; import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.Version; import org.opensearch.gradle.VersionProperties; @@ -99,7 +100,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { BwcVersions.UnreleasedVersionInfo unreleasedInfo = bwcVersions.unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { - if (!distribution.getBundledJdk()) { + if (distribution.getBundledJdk() == JavaPackageType.NONE) { throw new GradleException( "Configuring a snapshot bwc distribution ('" + distribution.getName() @@ -167,8 +168,10 @@ private static String distributionProjectName(OpenSearchDistribution distributio ? "" : "-" + architecture.toString().toLowerCase(); - if (distribution.getBundledJdk() == false) { + if (distribution.getBundledJdk() == JavaPackageType.NONE) { projectName += "no-jdk-"; + } else if (distribution.getBundledJdk() == JavaPackageType.JRE) { + projectName += "jre-"; } switch (distribution.getType()) { case ARCHIVE: diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 4849065c78a6d..5d7e78589306f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -9,16 +9,15 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; +import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; import java.nio.file.Path; import java.util.Set; import java.util.stream.Collectors; -import org.gradle.api.Task; -import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; - public class Publish implements Plugin<Project> { public final static String PUBLICATION_NAME = "pluginZip"; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java index d525a4a1e2c69..2c17666d8ee0c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java @@ -31,15 +31,6 @@ package org.opensearch.gradle.precommit; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.attribute.PosixFileAttributeView; -import java.nio.file.attribute.PosixFilePermission; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; @@ -57,6 +48,15 @@ import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + /** * Checks source files for correct file permissions. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java index 328edda8b1787..6b89aa8b60197 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenApisPrecommitPlugin.java @@ -34,7 +34,9 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis; import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin; + import groovy.lang.Closure; + import org.opensearch.gradle.ExportOpenSearchBuildResourcesTask; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.util.GradleUtils; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java index c6a0d7abf7da1..7726133562e9f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java @@ -33,7 +33,6 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.LoggedExec; - import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.CompileClasspath; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java index aa81ef75701fa..db46d2e3edc55 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java @@ -40,7 +40,7 @@ /** * Represent rules for tests enforced by the @{link {@link TestingConventionsTasks}} - * + * <p> * Rules are identified by name, tests must have this name as a suffix and implement one of the base classes * and be part of all the specified tasks. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java index dad3bfca5f67d..d66b1f9d25cdd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java @@ -53,6 +53,8 @@ import org.gradle.api.tasks.util.PatternSet; import org.gradle.internal.Factory; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.lang.annotation.Annotation; @@ -76,8 +78,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.inject.Inject; - public class TestingConventionsTasks extends DefaultTask { private static final String TEST_METHOD_PREFIX = "test"; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index 6139291b9be1b..efcd01f163089 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -32,6 +32,7 @@ package org.opensearch.gradle.precommit; import de.thetaphi.forbiddenapis.cli.CliMain; + import org.apache.commons.io.output.NullOutputStream; import org.opensearch.gradle.LoggedExec; import org.opensearch.gradle.OS; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonAgainstSchemaTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonAgainstSchemaTask.java index 1ecc712ab27ca..d829071c07e3c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonAgainstSchemaTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonAgainstSchemaTask.java @@ -33,12 +33,14 @@ package org.opensearch.gradle.precommit; import com.fasterxml.jackson.databind.ObjectMapper; + import com.networknt.schema.JsonSchema; import com.networknt.schema.JsonSchemaException; import com.networknt.schema.JsonSchemaFactory; import com.networknt.schema.SchemaValidatorsConfig; import com.networknt.schema.SpecVersion; import com.networknt.schema.ValidationMessage; + import org.gradle.api.DefaultTask; import org.gradle.api.UncheckedIOException; import org.gradle.api.file.FileCollection; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonNoKeywordsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonNoKeywordsTask.java index b3ac804566e29..c69420f2216b2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonNoKeywordsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ValidateJsonNoKeywordsTask.java @@ -36,6 +36,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; + import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.file.FileCollection; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index 1423b52c443d9..e82d8ed73ced2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -61,7 +61,7 @@ /** * A custom archive task that assembles a tar archive that preserves symbolic links. - * + * <p> * This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. */ public class SymbolicLinkPreservingTar extends Tar { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index d32172758cfce..bc44f81a81aff 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -34,9 +34,10 @@ import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; -import org.opensearch.gradle.OpenSearchDistribution; +import org.opensearch.gradle.JavaPackageType; import org.opensearch.gradle.Jdk; import org.opensearch.gradle.JdkDownloadPlugin; +import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.opensearch.gradle.Version; import org.opensearch.gradle.VersionProperties; @@ -48,6 +49,7 @@ import org.opensearch.gradle.util.Util; import org.opensearch.gradle.vagrant.VagrantBasePlugin; import org.opensearch.gradle.vagrant.VagrantExtension; +import org.opensearch.gradle.vagrant.VagrantMachine; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; @@ -61,7 +63,6 @@ import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; -import org.opensearch.gradle.vagrant.VagrantMachine; import java.io.File; import java.util.ArrayList; @@ -71,13 +72,14 @@ import java.util.Locale; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.function.Supplier; import java.util.stream.Stream; public class DistroTestPlugin implements Plugin<Project> { - private static final String SYSTEM_JDK_VERSION = "11.0.20+8"; + private static final String SYSTEM_JDK_VERSION = "21.0.2+13"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "17.0.8+7"; + private static final String GRADLE_JDK_VERSION = "21.0.2+13"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution @@ -137,7 +139,7 @@ public void apply(Project project) { } if ((distribution.getType() == OpenSearchDistribution.Type.DEB || distribution.getType() == OpenSearchDistribution.Type.RPM) - && distribution.getBundledJdk()) { + && distribution.getBundledJdk() != JavaPackageType.NONE) { for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { if (version.before("6.3.0")) { continue; // before opening xpack @@ -379,8 +381,8 @@ private List<OpenSearchDistribution> configureDistributions(Project project) { OpenSearchDistribution.Type.RPM, OpenSearchDistribution.Type.DOCKER )) { - for (boolean bundledJdk : Arrays.asList(true, false)) { - if (bundledJdk == false) { + for (JavaPackageType bundledJdk : Set.of(JavaPackageType.NONE, JavaPackageType.JDK)) { + if (bundledJdk == JavaPackageType.NONE) { // We'll never publish an ARM (arm64) build without a bundled JDK. if (architecture == Architecture.ARM64) { continue; @@ -403,8 +405,8 @@ private List<OpenSearchDistribution> configureDistributions(Project project) { OpenSearchDistribution.Platform.LINUX, OpenSearchDistribution.Platform.WINDOWS )) { - for (boolean bundledJdk : Arrays.asList(true, false)) { - if (bundledJdk == false && architecture != Architecture.X64) { + for (JavaPackageType bundledJdk : Set.of(JavaPackageType.NONE, JavaPackageType.JDK)) { + if (bundledJdk == JavaPackageType.NONE && architecture != Architecture.X64) { // We will never publish distributions for non-x86 (amd64) platforms // without a bundled JDK continue; @@ -432,7 +434,7 @@ private static OpenSearchDistribution createDistro( Architecture architecture, OpenSearchDistribution.Type type, OpenSearchDistribution.Platform platform, - boolean bundledJdk, + JavaPackageType bundledJdk, String version ) { String name = distroId(type, platform, bundledJdk, architecture) + "-" + version; @@ -466,11 +468,12 @@ private static boolean isWindows(Project project) { private static String distroId( OpenSearchDistribution.Type type, OpenSearchDistribution.Platform platform, - boolean bundledJdk, + JavaPackageType bundledJdk, Architecture architecture ) { - return (type == OpenSearchDistribution.Type.ARCHIVE ? platform + "-" : "") + type + (bundledJdk ? "" : "-no-jdk") - + (architecture == Architecture.X64 ? "" : "-" + architecture.toString().toLowerCase()); + return (type == OpenSearchDistribution.Type.ARCHIVE ? platform + "-" : "") + type + (bundledJdk != JavaPackageType.NONE + ? (bundledJdk == JavaPackageType.JDK ? "" : "-jre") + : "-no-jdk") + (architecture == Architecture.X64 ? "" : "-" + architecture.toString().toLowerCase()); } private static String destructiveDistroTestTaskName(OpenSearchDistribution distro) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java index 2443a30fc05fb..fa417da1a1007 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java @@ -32,10 +32,10 @@ package org.opensearch.gradle.test; +import org.opensearch.gradle.vagrant.VagrantMachine; import org.opensearch.gradle.vagrant.VagrantShellTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.options.Option; -import org.opensearch.gradle.vagrant.VagrantMachine; import java.util.ArrayList; import java.util.Collections; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/JNAKernel32Library.java b/buildSrc/src/main/java/org/opensearch/gradle/test/JNAKernel32Library.java index 9d575364f5546..1484dcba1e290 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/JNAKernel32Library.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/JNAKernel32Library.java @@ -34,6 +34,7 @@ import com.sun.jna.Native; import com.sun.jna.WString; + import org.apache.tools.ant.taskdefs.condition.Os; public class JNAKernel32Library { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java index 87aa3ea4eae8d..aec31d02b9bee 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java @@ -32,10 +32,9 @@ package org.opensearch.gradle.test; -import org.opensearch.gradle.testclusters.StandaloneRestIntegTestTask; - import groovy.lang.Closure; +import org.opensearch.gradle.testclusters.StandaloneRestIntegTestTask; import org.gradle.api.Task; import org.gradle.api.tasks.CacheableTask; @@ -45,7 +44,7 @@ * conventional configured tasks of {@link RestIntegTestTask} */ @CacheableTask -public class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { +public abstract class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestSuiteConventionMappings.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestSuiteConventionMappings.java index 12096b8d365f4..f03b59509a2f9 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestSuiteConventionMappings.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestSuiteConventionMappings.java @@ -8,9 +8,9 @@ package org.opensearch.gradle.test; +import org.opensearch.gradle.jvm.JvmTestSuiteHelper; import org.gradle.api.Project; import org.gradle.api.internal.ConventionMapping; -import org.opensearch.gradle.jvm.JvmTestSuiteHelper; // Temporary workaround for https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath interface TestSuiteConventionMappings { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java index 837660d4618be..f7511a2ac7f1c 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java @@ -15,7 +15,7 @@ import org.gradle.api.tasks.testing.Test; @CacheableTask -public class TestTask extends Test implements TestSuiteConventionMappings { +public abstract class TestTask extends Test implements TestSuiteConventionMappings { @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java index 0275664276877..485561a305291 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java @@ -57,6 +57,7 @@ import org.gradle.internal.Factory; import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java index ebd6c49fd6157..0d5af7ca06b50 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java @@ -57,6 +57,7 @@ import org.gradle.internal.Factory; import javax.inject.Inject; + import java.io.File; import java.util.Objects; import java.util.Optional; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index 728e36ce98bff..fcadf35593ce6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -65,7 +65,7 @@ * <strong>Rest YAML tests :</strong> <br> * When the {@link RestResourcesPlugin} has been applied the {@link CopyRestTestsTask} will copy the Rest YAML tests if explicitly * configured with `includeCore` through the `restResources.restTests` extension. - * + * <p> * Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set. * @see CopyRestApiTask * @see CopyRestTestsTask diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java index ffb3360e3cc55..505f773f6d9da 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java @@ -81,7 +81,6 @@ public class OpenSearchCluster implements TestClusterConfiguration, Named { private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; private int nodeIndex = 0; - private int zoneCount = 1; public OpenSearchCluster( @@ -100,7 +99,6 @@ public OpenSearchCluster( this.archiveOperations = archiveOperations; this.workingDirBase = workingDirBase; this.nodes = project.container(OpenSearchNode.class); - // Always add the first node String zone = hasZoneProperty() ? "zone-1" : ""; addNode(clusterName + "-0", zone); @@ -265,6 +263,11 @@ public void keystorePassword(String password) { nodes.all(each -> each.keystorePassword(password)); } + @Override + public void setSecure(boolean secure) { + nodes.all(each -> each.setSecure(secure)); + } + @Override public void cliSetup(String binTool, CharSequence... args) { nodes.all(each -> each.cliSetup(binTool, args)); @@ -367,6 +370,7 @@ private void commonNodeConfig() { } else { nodeNames = nodes.stream().map(OpenSearchNode::getName).map(this::safeName).collect(Collectors.joining(",")); } + OpenSearchNode firstNode = null; for (OpenSearchNode node : nodes) { // Can only configure master nodes if we have node names defined @@ -554,12 +558,25 @@ public OpenSearchNode singleNode() { private void addWaitForClusterHealth() { waitConditions.put("cluster health yellow", (node) -> { try { - WaitForHttpResource wait = new WaitForHttpResource("http", getFirstNode().getHttpSocketURI(), nodes.size()); - - List<Map<String, String>> credentials = getFirstNode().getCredentials(); - if (getFirstNode().getCredentials().isEmpty() == false) { - wait.setUsername(credentials.get(0).get("useradd")); - wait.setPassword(credentials.get(0).get("-p")); + WaitForHttpResource wait; + if (!getFirstNode().isSecure()) { + wait = new WaitForHttpResource("http", getFirstNode().getHttpSocketURI(), nodes.size()); + List<Map<String, String>> credentials = getFirstNode().getCredentials(); + if (getFirstNode().getCredentials().isEmpty() == false) { + wait.setUsername(credentials.get(0).get("useradd")); + wait.setPassword(credentials.get(0).get("-p")); + } + } else { + wait = new WaitForHttpResource( + "https", + getFirstNode().getHttpSocketURI(), + getFirstNode().getCredentials().get(0).get("username"), + getFirstNode().getCredentials().get(0).get("password"), + nodes.size() + ); + wait.setUsername(getFirstNode().getCredentials().get(0).get("username")); + wait.setPassword(getFirstNode().getCredentials().get(0).get("password")); + wait.setCertificateAuthorities(getFirstNode().getExtraConfigFilesMap().get("root-ca.pem")); } return wait.wait(500); } catch (IOException e) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index 97c97c18bb012..268de50340cbf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -35,12 +35,12 @@ import org.apache.commons.lang3.StringUtils; import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; -import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.FileSupplier; import org.opensearch.gradle.LazyPropertyList; import org.opensearch.gradle.LazyPropertyMap; import org.opensearch.gradle.LoggedExec; import org.opensearch.gradle.OS; +import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.PropertyNormalization; import org.opensearch.gradle.ReaperService; import org.opensearch.gradle.Version; @@ -160,6 +160,7 @@ public class OpenSearchNode implements TestClusterConfiguration { private final Path httpPortsFile; private final Path tmpDir; + private boolean secure = false; private int currentDistro = 0; private TestDistribution testDistribution; private final List<OpenSearchDistribution> distributions = new ArrayList<>(); @@ -209,6 +210,7 @@ public class OpenSearchNode implements TestClusterConfiguration { setTestDistribution(TestDistribution.INTEG_TEST); setVersion(VersionProperties.getOpenSearch()); this.zone = zone; + this.credentials.add(new HashMap<>()); } @Input @@ -217,6 +219,11 @@ public String getName() { return nameCustomization.apply(name); } + @Internal + public boolean isSecure() { + return secure; + } + @Internal public Version getVersion() { return Version.fromString(distributions.get(currentDistro).getVersion()); @@ -452,6 +459,11 @@ public void setPreserveDataDir(boolean preserveDataDir) { this.preserveDataDir = preserveDataDir; } + @Override + public void setSecure(boolean secure) { + this.secure = secure; + } + @Override public void freeze() { requireNonNull(testDistribution, "null testDistribution passed when configuring test cluster `" + this + "`"); @@ -471,6 +483,18 @@ public Stream<String> logLines() throws IOException { @Override public synchronized void start() { LOGGER.info("Starting `{}`", this); + if (System.getProperty("tests.opensearch.secure") != null + && System.getProperty("tests.opensearch.secure").equalsIgnoreCase("true")) { + secure = true; + } + if (System.getProperty("tests.opensearch.username") != null) { + this.credentials.get(0).put("username", System.getProperty("tests.opensearch.username")); + LOGGER.info("Overwriting username to: " + this.getCredentials().get(0).get("username")); + } + if (System.getProperty("tests.opensearch.password") != null) { + this.credentials.get(0).put("password", System.getProperty("tests.opensearch.password")); + LOGGER.info("Overwriting password to: " + this.getCredentials().get(0).get("password")); + } if (Files.exists(getExtractedDistributionDir()) == false) { throw new TestClustersException("Can not start " + this + ", missing: " + getExtractedDistributionDir()); } @@ -1349,6 +1373,11 @@ public List<?> getExtraConfigFiles() { return extraConfigFiles.getNormalizedCollection(); } + @Internal + public Map<String, File> getExtraConfigFilesMap() { + return extraConfigFiles; + } + @Override @Internal public boolean isProcessAlive() { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index abc92c10285ee..ddcbf77b0d5e6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -38,8 +38,8 @@ import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.Task; import org.gradle.api.provider.Provider; -import org.gradle.api.services.internal.BuildServiceRegistryInternal; import org.gradle.api.services.internal.BuildServiceProvider; +import org.gradle.api.services.internal.BuildServiceRegistryInternal; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; @@ -62,7 +62,7 @@ * {@link Nested} inputs. */ @CacheableTask -public class StandaloneRestIntegTestTask extends Test implements TestClustersAware, FileSystemOperationsAware { +public abstract class StandaloneRestIntegTestTask extends Test implements TestClustersAware, FileSystemOperationsAware { private Collection<OpenSearchCluster> clusters = new HashSet<>(); private Closure<Void> beforeStart; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClusterConfiguration.java index 87cce2f0b32c0..22c4185a39a98 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClusterConfiguration.java @@ -36,7 +36,6 @@ import org.gradle.api.file.RegularFile; import org.gradle.api.logging.Logging; import org.gradle.api.provider.Provider; -import org.slf4j.Logger; import java.io.File; import java.util.LinkedHashMap; @@ -47,6 +46,8 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import org.slf4j.Logger; + public interface TestClusterConfiguration { void setVersion(String version); @@ -107,6 +108,8 @@ public interface TestClusterConfiguration { void setPreserveDataDir(boolean preserveDataDir); + void setSecure(boolean secure); + void freeze(); void start(); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java index 8735970b0d65b..354c2946889ee 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java @@ -54,6 +54,7 @@ import org.gradle.api.tasks.TaskState; import javax.inject.Inject; + import java.io.File; import static org.opensearch.gradle.util.GradleUtils.noop; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index ae1db26fbc48d..c9e18426966f9 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -37,6 +37,8 @@ import com.avast.gradle.dockercompose.tasks.ComposeDown; import com.avast.gradle.dockercompose.tasks.ComposePull; import com.avast.gradle.dockercompose.tasks.ComposeUp; + +import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.SystemPropertyCommandLineArgumentProvider; import org.opensearch.gradle.docker.DockerSupportPlugin; import org.opensearch.gradle.docker.DockerSupportService; @@ -57,17 +59,17 @@ import org.gradle.api.tasks.TaskContainer; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.testing.Test; -import org.apache.tools.ant.taskdefs.condition.Os; import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.Arrays; import java.util.Collections; -import java.util.function.BiConsumer; import java.util.Optional; +import java.util.function.BiConsumer; public class TestFixturesPlugin implements Plugin<Project> { @@ -168,6 +170,7 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); + composeExtension.getUseDockerComposeV2().set(false); tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java index a4d2c59cf8cad..a2fd1e247b660 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java @@ -42,6 +42,7 @@ import org.gradle.api.tasks.util.PatternFilterable; import javax.annotation.Nullable; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java index 75827922fa007..7abf9bf5fbef6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java @@ -43,6 +43,7 @@ import org.gradle.internal.logging.progress.ProgressLoggerFactory; import javax.inject.Inject; + import java.io.File; import java.io.OutputStream; import java.nio.file.Paths; @@ -52,7 +53,7 @@ /** * An helper to manage a vagrant box. - * + * <p> * This is created alongside a {@link VagrantExtension} for a project to manage starting and * stopping a single vagrant box. */ @@ -184,7 +185,7 @@ public void setArgs(String... args) { /** * A function to translate output from the vagrant command execution to the progress line. - * + * <p> * The function takes the current line of output from vagrant, and returns a new * progress line, or {@code null} if there is no update. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index 85d3e340c50e7..ca1b95183505f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -47,7 +47,7 @@ /** * A shell script to run within a vagrant VM. - * + * <p> * The script is run as root within the VM. */ public abstract class VagrantShellTask extends DefaultTask { diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.jre-download.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.jre-download.properties new file mode 100644 index 0000000000000..e9253488ffbeb --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/opensearch.jre-download.properties @@ -0,0 +1,12 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# + +implementation-class=org.opensearch.gradle.JreDownloadPlugin diff --git a/buildSrc/src/test/java/org/opensearch/gradle/ConcatFilesTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/ConcatFilesTaskTests.java index 3b9c1c81e0345..0484939ca4698 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/ConcatFilesTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/ConcatFilesTaskTests.java @@ -31,16 +31,16 @@ package org.opensearch.gradle; +import org.opensearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.Project; +import org.gradle.testfixtures.ProjectBuilder; + import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.Arrays; -import org.opensearch.gradle.test.GradleUnitTestCase; -import org.gradle.api.Project; -import org.gradle.testfixtures.ProjectBuilder; - public class ConcatFilesTaskTests extends GradleUnitTestCase { public void testHeaderAdded() throws IOException { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java index 1a9647573f948..f2f0d59f5f9ab 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java @@ -32,19 +32,20 @@ package org.opensearch.gradle; -import org.gradle.api.internal.artifacts.repositories.DefaultIvyArtifactRepository; import org.opensearch.gradle.OpenSearchDistribution.Platform; import org.opensearch.gradle.OpenSearchDistribution.Type; import org.opensearch.gradle.info.BuildParams; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; +import org.gradle.api.internal.artifacts.repositories.DefaultIvyArtifactRepository; import org.gradle.testfixtures.ProjectBuilder; import java.io.File; import java.util.Arrays; import java.util.TreeSet; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; public class DistributionDownloadPluginTests extends GradleUnitTestCase { @@ -76,7 +77,14 @@ public class DistributionDownloadPluginTests extends GradleUnitTestCase { ); public void testVersionDefault() { - OpenSearchDistribution distro = checkDistro(createProject(null, false), "testdistro", null, Type.ARCHIVE, Platform.LINUX, true); + OpenSearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + null, + Type.ARCHIVE, + Platform.LINUX, + JavaPackageType.JDK + ); assertEquals(distro.getVersion(), VersionProperties.getOpenSearch()); } @@ -123,18 +131,32 @@ public void testBadVersionFormat() { "badversion", Type.ARCHIVE, Platform.LINUX, - true, + JavaPackageType.JDK, "Invalid version format: 'badversion'" ); } public void testTypeDefault() { - OpenSearchDistribution distro = checkDistro(createProject(null, false), "testdistro", "5.0.0", null, Platform.LINUX, true); + OpenSearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + null, + Platform.LINUX, + JavaPackageType.JDK + ); assertEquals(distro.getType(), Type.ARCHIVE); } public void testPlatformDefault() { - OpenSearchDistribution distro = checkDistro(createProject(null, false), "testdistro", "5.0.0", Type.ARCHIVE, null, true); + OpenSearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + Type.ARCHIVE, + null, + JavaPackageType.JDK + ); assertEquals(distro.getPlatform(), OpenSearchDistribution.CURRENT_PLATFORM); } @@ -151,8 +173,15 @@ public void testPlatformForIntegTest() { } public void testBundledJdkDefault() { - OpenSearchDistribution distro = checkDistro(createProject(null, false), "testdistro", "5.0.0", Type.ARCHIVE, Platform.LINUX, true); - assertTrue(distro.getBundledJdk()); + OpenSearchDistribution distro = checkDistro( + createProject(null, false), + "testdistro", + "5.0.0", + Type.ARCHIVE, + Platform.LINUX, + JavaPackageType.JDK + ); + assertThat(distro.getBundledJdk(), equalTo(JavaPackageType.JDK)); } public void testBundledJdkForIntegTest() { @@ -162,7 +191,7 @@ public void testBundledJdkForIntegTest() { "5.0.0", Type.INTEG_TEST_ZIP, null, - true, + JavaPackageType.JDK, "bundledJdk cannot be set on opensearch distribution [testdistro]" ); } @@ -178,7 +207,7 @@ public void testLocalCurrentVersionIntegTestZip() { public void testLocalCurrentVersionArchives() { for (Platform platform : Platform.values()) { - for (boolean bundledJdk : new boolean[] { true, false }) { + for (JavaPackageType bundledJdk : JavaPackageType.values()) { for (Architecture architecture : Architecture.values()) { // create a new project in each iteration, so that we know we are resolving the only additional project being created Project project = createProject(BWC_MINOR, true); @@ -204,7 +233,7 @@ public void testLocalCurrentVersionArchives() { public void testLocalCurrentVersionPackages() { for (Type packageType : new Type[] { Type.RPM, Type.DEB }) { - for (boolean bundledJdk : new boolean[] { true, false }) { + for (JavaPackageType bundledJdk : JavaPackageType.values()) { Project project = createProject(BWC_MINOR, true); String projectName = projectName(packageType.toString(), bundledJdk); Project packageProject = ProjectBuilder.builder().withParent(packagesProject).withName(projectName).build(); @@ -219,7 +248,7 @@ public void testLocalCurrentVersionPackages() { public void testLocalBwcArchives() { for (Platform platform : Platform.values()) { // note: no non bundled jdk for bwc - String configName = projectName(platform.toString(), true); + String configName = projectName(platform.toString(), JavaPackageType.JDK); configName += (platform == Platform.WINDOWS ? "-zip" : "-tar"); checkBwc("minor", configName, BWC_MINOR_VERSION, Type.ARCHIVE, platform, BWC_MINOR, true); @@ -232,7 +261,7 @@ public void testLocalBwcArchives() { public void testLocalBwcPackages() { for (Type packageType : new Type[] { Type.RPM, Type.DEB }) { // note: no non bundled jdk for bwc - String configName = projectName(packageType.toString(), true); + String configName = projectName(packageType.toString(), JavaPackageType.JDK); checkBwc("minor", configName, BWC_MINOR_VERSION, packageType, null, BWC_MINOR, true); checkBwc("staged", configName, BWC_STAGED_VERSION, packageType, null, BWC_STAGED, true); @@ -247,7 +276,7 @@ private void assertDistroError( String version, Type type, Platform platform, - Boolean bundledJdk, + JavaPackageType bundledJdk, String message ) { IllegalArgumentException e = expectThrows( @@ -263,7 +292,7 @@ private OpenSearchDistribution createDistro( String version, Type type, Platform platform, - Boolean bundledJdk + JavaPackageType bundledJdk ) { NamedDomainObjectContainer<OpenSearchDistribution> distros = DistributionDownloadPlugin.getContainer(project); return distros.create(name, distro -> { @@ -289,7 +318,7 @@ private OpenSearchDistribution checkDistro( String version, Type type, Platform platform, - Boolean bundledJdk + JavaPackageType bundledJdk ) { OpenSearchDistribution distribution = createDistro(project, name, version, type, platform, bundledJdk); distribution.finalizeValues(); @@ -315,7 +344,7 @@ private void checkBwc( Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build(); archiveProject.getConfigurations().create(config); archiveProject.getArtifacts().add(config, new File("doesnotmatter")); - final OpenSearchDistribution distro = createDistro(project, "distro", version.toString(), type, platform, true); + final OpenSearchDistribution distro = createDistro(project, "distro", version.toString(), type, platform, JavaPackageType.JDK); distro.setArchitecture(Architecture.current()); checkPlugin(project); } @@ -335,7 +364,7 @@ private Project createProject(BwcVersions bwcVersions, boolean isInternal) { return project; } - private static String projectName(String base, boolean bundledJdk) { - return bundledJdk ? base : ("no-jdk-" + base); + private static String projectName(String base, JavaPackageType bundledJdk) { + return (bundledJdk == JavaPackageType.JDK) ? base : ((bundledJdk == JavaPackageType.NONE) ? ("no-jdk-" + base) : "jre-" + base); } } diff --git a/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java index a6dc268b90557..ad5b8385abaf8 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/EmptyDirTaskTests.java @@ -31,18 +31,19 @@ package org.opensearch.gradle; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.InvalidPathException; -import java.nio.file.Path; - import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; +import java.nio.file.Path; + public class EmptyDirTaskTests extends GradleUnitTestCase { public void testCreateEmptyDir() throws Exception { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java index 55a1eaec98d82..bb394cf51429f 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/JdkDownloadPluginTests.java @@ -33,14 +33,13 @@ package org.opensearch.gradle; import org.opensearch.gradle.test.GradleUnitTestCase; - -import java.util.UUID; - import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; import org.junit.BeforeClass; +import java.util.UUID; + import static org.hamcrest.CoreMatchers.equalTo; public class JdkDownloadPluginTests extends GradleUnitTestCase { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/plugin/OptionalDependenciesPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/plugin/OptionalDependenciesPluginTests.java index ee29f5653d18a..76b669a73610e 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/plugin/OptionalDependenciesPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/plugin/OptionalDependenciesPluginTests.java @@ -12,12 +12,12 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.codehaus.plexus.util.xml.pull.XmlPullParserException; +import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; import org.junit.After; import org.junit.Before; import org.junit.rules.TemporaryFolder; -import org.opensearch.gradle.test.GradleUnitTestCase; import java.io.File; import java.io.FileNotFoundException; @@ -28,8 +28,8 @@ import java.io.OutputStream; import java.util.Optional; -import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; import static org.hamcrest.CoreMatchers.is; +import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; public class OptionalDependenciesPluginTests extends GradleUnitTestCase { private TemporaryFolder projectDir; diff --git a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java index 8772a9fbd65ee..fa0693a258222 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/plugin/PluginBuildPluginTests.java @@ -31,19 +31,20 @@ package org.opensearch.gradle.plugin; -import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.opensearch.gradle.BwcVersions; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.internal.project.ProjectInternal; +import org.gradle.api.tasks.bundling.AbstractArchiveTask; import org.gradle.testfixtures.ProjectBuilder; import org.junit.Before; import org.junit.Ignore; -import org.mockito.Mockito; import java.util.stream.Collectors; +import org.mockito.Mockito; + public class PluginBuildPluginTests extends GradleUnitTestCase { private Project project; diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index ce33b6bdd5b04..8e246ff9ecd11 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -8,12 +8,15 @@ package org.opensearch.gradle.pluginzip; +import org.apache.maven.model.Model; +import org.apache.maven.model.io.xpp3.MavenXpp3Reader; +import org.codehaus.plexus.util.xml.pull.XmlPullParserException; +import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.Project; import org.gradle.testfixtures.ProjectBuilder; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.UnexpectedBuildFailure; -import org.opensearch.gradle.test.GradleUnitTestCase; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -28,17 +31,12 @@ import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; - -import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; - -import org.apache.maven.model.Model; -import org.apache.maven.model.io.xpp3.MavenXpp3Reader; -import org.codehaus.plexus.util.xml.pull.XmlPullParserException; - import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; +import static org.gradle.testkit.runner.TaskOutcome.SUCCESS; + public class PublishTests extends GradleUnitTestCase { private TemporaryFolder projectDir; private static final String TEMPLATE_RESOURCE_FOLDER = "pluginzip"; @@ -60,7 +58,7 @@ public void tearDown() { * This test is used to verify that adding the 'opensearch.pluginzip' to the project * adds some other transitive plugins and tasks under the hood. This is basically * a behavioral test of the {@link Publish#apply(Project)} method. - * + * <p> * This is equivalent of having a build.gradle script with just the following section: * <pre> * plugins { @@ -204,7 +202,7 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa GradleRunner runner = prepareGradleRunnerFromTemplate("useDefaultValues.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); - /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + /* Check if build and ZIP_PUBLISH_TASK tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); @@ -279,7 +277,7 @@ public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullPa GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); - /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); @@ -314,7 +312,7 @@ public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPull GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); - /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); @@ -350,7 +348,7 @@ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullPa GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); - /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); @@ -397,7 +395,7 @@ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPu GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle", "build", ZIP_PUBLISH_TASK); BuildResult result = runner.build(); - /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ + /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */ assertEquals(SUCCESS, result.task(":" + "build").getOutcome()); assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome()); diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/FilePermissionsTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/FilePermissionsTaskTests.java index f47964dd17a7a..0a8cff734ea97 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/FilePermissionsTaskTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/FilePermissionsTaskTests.java @@ -31,12 +31,8 @@ package org.opensearch.gradle.precommit; -import java.io.File; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.util.List; - import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.tools.ant.taskdefs.condition.Os; import org.opensearch.gradle.test.GradleUnitTestCase; import org.gradle.api.GradleException; @@ -45,6 +41,11 @@ import org.gradle.testfixtures.ProjectBuilder; import org.junit.Assert; +import java.io.File; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.util.List; + public class FilePermissionsTaskTests extends GradleUnitTestCase { public void testCheckPermissionsWhenAnExecutableFileExists() throws Exception { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/BaseTestCase.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/BaseTestCase.java index 8e06a1cad0241..f285dcbfd3bd2 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/BaseTestCase.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/BaseTestCase.java @@ -35,10 +35,12 @@ import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; -import junit.framework.AssertionFailedError; + import org.junit.Assert; import org.junit.runner.RunWith; +import junit.framework.AssertionFailedError; + @RunWith(RandomizedRunner.class) @TestMethodProviders({ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) @ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java index b64c719440733..def5248c1f255 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java @@ -36,7 +36,7 @@ /** * Filter out threads controlled by gradle that may be created during unit tests. - * + * <p> * Currently this includes pooled threads for Exec as well as file system event watcher threads. */ public class GradleThreadsFilter implements ThreadFilter { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleUnitTestCase.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleUnitTestCase.java index a50a14a0ea932..f8032ba53df4c 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleUnitTestCase.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleUnitTestCase.java @@ -35,6 +35,7 @@ import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.junit.runner.RunWith; @RunWith(RandomizedRunner.class) diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 163a903d31832..1a2e36aa78e9f 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -43,7 +43,7 @@ /** * Backwards compatible test* method provider (public, non-static). - * + * <p> * copy of org.apache.lucene.util.LuceneJUnit3MethodProvider to avoid a dependency between build and test fw. */ public final class JUnit3MethodProvider implements TestMethodProvider { diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index cb8050d1718c4..48dfb206375ca 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -15,8 +15,9 @@ plugins { repositories { mavenCentral() } + dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.20.0" + implementation "org.apache.logging.log4j:log4j-core:2.23.1" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e4a9293c59b8f..536a95d69ed83 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,36 +1,38 @@ opensearch = 3.0.0 -lucene = 9.8.0-snapshot-4373c3b +lucene = 9.11.0-snapshot-8a555eb bundled_jdk_vendor = adoptium -bundled_jdk = 20.0.2+9 -# See please https://github.com/adoptium/temurin-build/issues/3371 -bundled_jdk_linux_ppc64le = 20+36 +bundled_jdk = 21.0.2+13 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.15.2 -jackson_databind = 2.15.2 -snakeyaml = 2.0 +jackson = 2.16.2 +jackson_databind = 2.16.2 +snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 -log4j = 2.20.0 +log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.5 +asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 -antlr4 = 4.11.1 +antlr4 = 4.13.1 guava = 32.1.1-jre protobuf = 3.22.3 jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.5.0 +jna = 5.13.0 -netty = 4.1.94.Final +netty = 4.1.107.Final joda = 2.12.2 +# project reactor +reactor_netty = 1.1.17 +reactor = 3.5.15 + # client dependencies httpclient5 = 5.2.1 httpcore5 = 5.2.2 @@ -39,33 +41,34 @@ httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 commonscodec = 1.15 - +commonslang = 3.13.0 +commonscompress = 1.24.0 # plugin dependencies -aws = 2.20.55 +aws = 2.20.86 reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.75 +bouncycastle=1.77 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.4.0 +mockito = 5.10.0 objenesis = 3.2 -bytebuddy = 1.14.3 +bytebuddy = 1.14.7 # benchmark dependencies jmh = 1.35 # compression -zstd = 1.5.5-3 +zstd = 1.5.5-5 jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.26.0 - +opentelemetry = 1.36.0 +opentelemetrysemconv = 1.23.1-alpha diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 6fd5262f0ab4f..c1af5fa92e35c 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'application' base { group = 'org.opensearch.client' - archivesBaseName = 'client-benchmarks' + archivesName = 'client-benchmarks' } // Not published so no need to assemble diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java index e53e4f1ad692d..9cd12f5e78bd0 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java @@ -37,7 +37,7 @@ /** * Stores measurement samples. - * + * <p> * This class is NOT threadsafe. */ public final class SampleRecorder { diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java index 459cba1ce5f23..5d2b9cb764a6f 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/ops/bulk/BulkBenchmarkTask.java @@ -31,8 +31,8 @@ package org.opensearch.client.benchmark.ops.bulk; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; import org.opensearch.client.benchmark.BenchmarkTask; import org.opensearch.client.benchmark.metrics.Sample; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/NoopPlugin.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/NoopPlugin.java index e9d0fff2a9dc9..56bf91d1b2360 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/NoopPlugin.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/NoopPlugin.java @@ -31,17 +31,17 @@ package org.opensearch.plugin.noop; -import org.opensearch.plugin.noop.action.bulk.NoopBulkAction; -import org.opensearch.plugin.noop.action.bulk.RestNoopBulkAction; -import org.opensearch.plugin.noop.action.bulk.TransportNoopBulkAction; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.noop.action.bulk.NoopBulkAction; +import org.opensearch.plugin.noop.action.bulk.RestNoopBulkAction; +import org.opensearch.plugin.noop.action.bulk.TransportNoopBulkAction; import org.opensearch.plugin.noop.action.search.NoopSearchAction; import org.opensearch.plugin.noop.action.search.RestNoopSearchAction; import org.opensearch.plugin.noop.action.search.TransportNoopSearchAction; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java index 332e089ad9e76..8bd35a0bfed6a 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -40,8 +40,8 @@ import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; @@ -54,9 +54,9 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; +import static org.opensearch.core.rest.RestStatus.OK; import static org.opensearch.rest.RestRequest.Method.POST; import static org.opensearch.rest.RestRequest.Method.PUT; -import static org.opensearch.core.rest.RestStatus.OK; public class RestNoopBulkAction extends BaseRestHandler { diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/TransportNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/TransportNoopBulkAction.java index 4df9a0465b1a6..77d4d3d095b29 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/TransportNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/bulk/TransportNoopBulkAction.java @@ -31,7 +31,6 @@ package org.opensearch.plugin.noop.action.bulk; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkItemResponse; @@ -41,6 +40,7 @@ import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/search/TransportNoopSearchAction.java index 1b35787d226e7..99efd31dfcaa5 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/opensearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -32,13 +32,13 @@ package org.opensearch.plugin.noop.action.search; import org.apache.lucene.search.TotalHits; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 770cb3f78ca47..fdc93d8037ce6 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -39,7 +39,7 @@ apply plugin: 'opensearch.rest-resources' base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-high-level-client' + archivesName = 'opensearch-rest-high-level-client' } restResources { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 45511f2bc2a47..eb0a8b0e8f40a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; @@ -47,6 +46,7 @@ import org.opensearch.client.indices.GetComponentTemplatesRequest; import org.opensearch.client.indices.GetComponentTemplatesResponse; import org.opensearch.client.indices.PutComponentTemplateRequest; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import java.io.IOException; @@ -170,8 +170,8 @@ public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestO /** * Asynchronously get cluster health using the Cluster Health API. - * * If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT + * * @param healthRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/GetAliasesResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/GetAliasesResponse.java index ee829e2be1f11..c79be0a668896 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/GetAliasesResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/GetAliasesResponse.java @@ -35,11 +35,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java index f20a6f627a680..281f020533d51 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; @@ -88,6 +87,7 @@ import org.opensearch.client.indices.SimulateIndexTemplateResponse; import org.opensearch.client.indices.rollover.RolloverRequest; import org.opensearch.client.indices.rollover.RolloverResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index ed719909e3acc..6c6a8c1bed9c4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -52,22 +52,22 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.CloseIndexRequest; +import org.opensearch.client.indices.ComposableIndexTemplateExistRequest; import org.opensearch.client.indices.CreateDataStreamRequest; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.client.indices.DataStreamsStatsRequest; -import org.opensearch.client.indices.GetDataStreamRequest; import org.opensearch.client.indices.DeleteAliasRequest; import org.opensearch.client.indices.DeleteComposableIndexTemplateRequest; import org.opensearch.client.indices.DeleteDataStreamRequest; +import org.opensearch.client.indices.GetComposableIndexTemplateRequest; +import org.opensearch.client.indices.GetDataStreamRequest; import org.opensearch.client.indices.GetFieldMappingsRequest; import org.opensearch.client.indices.GetIndexRequest; -import org.opensearch.client.indices.GetComposableIndexTemplateRequest; import org.opensearch.client.indices.GetIndexTemplatesRequest; import org.opensearch.client.indices.GetMappingsRequest; -import org.opensearch.client.indices.ComposableIndexTemplateExistRequest; import org.opensearch.client.indices.IndexTemplatesExistRequest; -import org.opensearch.client.indices.PutIndexTemplateRequest; import org.opensearch.client.indices.PutComposableIndexTemplateRequest; +import org.opensearch.client.indices.PutIndexTemplateRequest; import org.opensearch.client.indices.PutMappingRequest; import org.opensearch.client.indices.ResizeRequest; import org.opensearch.client.indices.SimulateIndexTemplateRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java index cd304019e771c..29e5c5369f184 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.GetPipelineResponse; @@ -40,6 +39,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index d23a5976fada6..35d9929a649ff 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -73,19 +73,19 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; @@ -119,7 +119,7 @@ * @opensearch.api */ final class RequestConverters { - static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; + static final MediaType REQUEST_BODY_CONTENT_TYPE = MediaTypeRegistry.JSON; private RequestConverters() { // Contains only status utility methods @@ -177,7 +177,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } if (bulkContentType == null) { - bulkContentType = XContentType.JSON; + bulkContentType = MediaTypeRegistry.JSON; } final byte separator = bulkContentType.xContent().streamSeparator(); @@ -266,7 +266,12 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } } } else if (opType == DocWriteRequest.OpType.UPDATE) { - source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, ToXContent.EMPTY_PARAMS, false).toBytesRef(); + source = org.opensearch.core.xcontent.XContentHelper.toXContent( + (UpdateRequest) action, + bulkContentType, + ToXContent.EMPTY_PARAMS, + false + ).toBytesRef(); } if (source != null) { @@ -446,9 +451,9 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.withIndicesOptions(searchRequest.indicesOptions()); } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - /** - * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - - * refer to org.opensearch.action.search.SearchResponseMerger + /* + Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + refer to org.opensearch.action.search.SearchResponseMerger */ if (searchRequest.pointInTimeBuilder() != null) { params.putParam("ccs_minimize_roundtrips", "false"); @@ -821,7 +826,8 @@ static HttpEntity createEntity(ToXContent toXContent, MediaType mediaType) throw } static HttpEntity createEntity(ToXContent toXContent, MediaType mediaType, ToXContent.Params toXContentParams) throws IOException { - BytesRef source = XContentHelper.toXContent(toXContent, mediaType, toXContentParams, false).toBytesRef(); + BytesRef source = org.opensearch.core.xcontent.XContentHelper.toXContent(toXContent, mediaType, toXContentParams, false) + .toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(mediaType)); } @@ -868,12 +874,12 @@ static String endpoint(String[] indices, String endpoint, String type) { } /** - * Returns a {@link ContentType} from a given {@link XContentType}. + * Returns a {@link ContentType} from a given {@link MediaType}. * * @param mediaType the {@link MediaType} * @return the {@link ContentType} */ - @SuppressForbidden(reason = "Only allowed place to convert a XContentType to a ContentType") + @SuppressForbidden(reason = "Only allowed place to convert a MediaType to a ContentType") public static ContentType createContentType(final MediaType mediaType) { return ContentType.create(mediaType.mediaTypeWithoutParameters(), (Charset) null); } @@ -1179,14 +1185,14 @@ Params withActions(List<String> actions) { return this; } - Params withTaskId(org.opensearch.tasks.TaskId taskId) { + Params withTaskId(org.opensearch.core.tasks.TaskId taskId) { if (taskId != null && taskId.isSet()) { return putParam("task_id", taskId.toString()); } return this; } - Params withParentTaskId(org.opensearch.tasks.TaskId parentTaskId) { + Params withParentTaskId(org.opensearch.core.tasks.TaskId parentTaskId) { if (parentTaskId != null && parentTaskId.isSet()) { return putParam("parent_task_id", parentTaskId.toString()); } @@ -1252,7 +1258,7 @@ Params withWaitForEvents(Priority waitForEvents) { */ static MediaType enforceSameContentType(IndexRequest indexRequest, @Nullable MediaType mediaType) { MediaType requestContentType = indexRequest.getContentType(); - if (requestContentType != XContentType.JSON && requestContentType != XContentType.SMILE) { + if (requestContentType != MediaTypeRegistry.JSON && requestContentType != MediaTypeRegistry.fromFormat("smile")) { throw new IllegalArgumentException( "Unsupported content-type found for request with content-type [" + requestContentType diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 1ff5c81c4a0e3..9d8d771f1eaed 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -35,7 +35,6 @@ import org.apache.hc.core5.http.HttpEntity; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -86,6 +85,8 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.CheckedFunction; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -99,7 +100,6 @@ import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.plugins.spi.NamedXContentProvider; import org.opensearch.rest.BytesRestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.MultiSearchTemplateResponse; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -139,21 +139,21 @@ import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; import org.opensearch.search.aggregations.bucket.sampler.InternalSampler; import org.opensearch.search.aggregations.bucket.sampler.ParsedSampler; +import org.opensearch.search.aggregations.bucket.terms.DoubleTerms; import org.opensearch.search.aggregations.bucket.terms.LongRareTerms; +import org.opensearch.search.aggregations.bucket.terms.LongTerms; import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.ParsedDoubleTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedLongRareTerms; +import org.opensearch.search.aggregations.bucket.terms.ParsedLongTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedMultiTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedStringRareTerms; -import org.opensearch.search.aggregations.bucket.terms.SignificantLongTerms; -import org.opensearch.search.aggregations.bucket.terms.SignificantStringTerms; -import org.opensearch.search.aggregations.bucket.terms.DoubleTerms; -import org.opensearch.search.aggregations.bucket.terms.LongTerms; -import org.opensearch.search.aggregations.bucket.terms.ParsedDoubleTerms; -import org.opensearch.search.aggregations.bucket.terms.ParsedLongTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedStringTerms; import org.opensearch.search.aggregations.bucket.terms.ParsedUnsignedLongTerms; +import org.opensearch.search.aggregations.bucket.terms.SignificantLongTerms; +import org.opensearch.search.aggregations.bucket.terms.SignificantStringTerms; import org.opensearch.search.aggregations.bucket.terms.StringRareTerms; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.UnsignedLongTerms; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RethrottleRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/RethrottleRequest.java index 958e6ce4cda1c..6e453a5c7f343 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RethrottleRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RethrottleRequest.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import java.util.Objects; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java index b6c28f57d6bd9..0014bdb8c8182 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java @@ -8,12 +8,12 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.DeleteSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java index 85a793dec24ce..87a0e45eafe49 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; @@ -52,6 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.core.action.ActionListener; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/TasksClient.java index 51764e3339394..ec862aead794a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TasksClient.java @@ -32,13 +32,13 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.client.tasks.CancelTasksResponse; import org.opensearch.client.tasks.GetTaskRequest; import org.opensearch.client.tasks.GetTaskResponse; +import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.util.Optional; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index dad5b6a3679ec..d40445b2daa81 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -37,7 +37,7 @@ /** * A base request for any requests that supply timeouts. - * + * <p> * Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/BroadcastResponse.java index 42011a0da7ab2..a91d1461685f8 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/BroadcastResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/BroadcastResponse.java @@ -32,8 +32,8 @@ package org.opensearch.client.core; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.ParseField; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java index 10d318afd9da0..5540c3c284271 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java @@ -34,8 +34,8 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.core.ParseField; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java index 5e671c416e4ef..ad2e3dd550880 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/TermVectorsResponse.java @@ -36,14 +36,15 @@ import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import java.util.Collections; -import java.util.List; import java.util.Comparator; +import java.util.List; import java.util.Objects; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class TermVectorsResponse { private final String index; private final String id; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java index a9a851474c424..e32c33484140d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java @@ -32,10 +32,10 @@ package org.opensearch.client.indices; import org.opensearch.OpenSearchException; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.core.ParseField; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java index 3405e7e81e122..62c5b54c0e75e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java @@ -38,16 +38,15 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.TimedRequest; import org.opensearch.client.Validatable; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -135,7 +134,7 @@ public CreateIndexRequest settings(String source, MediaType mediaType) { * Allows to set the settings using a json builder. */ public CreateIndexRequest settings(XContentBuilder builder) { - settings(Strings.toString(builder), builder.contentType()); + settings(builder.toString(), builder.contentType()); return this; } @@ -157,7 +156,7 @@ public MediaType mappingsMediaType() { /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -169,7 +168,7 @@ public CreateIndexRequest mapping(String source, MediaType mediaType) { /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -180,14 +179,14 @@ public CreateIndexRequest mapping(XContentBuilder source) { /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source */ public CreateIndexRequest mapping(Map<String, ?> source) { try { - XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeParserRegistry.getDefaultMediaType()); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.getDefaultMediaType()); builder.map(source); return mapping(BytesReference.bytes(builder), builder.contentType()); } catch (IOException e) { @@ -197,7 +196,7 @@ public CreateIndexRequest mapping(Map<String, ?> source) { /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -283,7 +282,7 @@ public CreateIndexRequest aliases(Collection<Alias> aliases) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -292,7 +291,7 @@ public CreateIndexRequest source(String source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(XContentBuilder source) { @@ -301,7 +300,7 @@ public CreateIndexRequest source(XContentBuilder source) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { @@ -312,7 +311,7 @@ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/DataStreamsStatsResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/DataStreamsStatsResponse.java index 2c90d5a734aa0..327836160cceb 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/DataStreamsStatsResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/DataStreamsStatsResponse.java @@ -34,7 +34,7 @@ import org.opensearch.client.core.BroadcastResponse; import org.opensearch.core.ParseField; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/DetailAnalyzeResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/DetailAnalyzeResponse.java index c2ac8169b0a4e..92ab2d7b744b2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/DetailAnalyzeResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/DetailAnalyzeResponse.java @@ -32,8 +32,8 @@ package org.opensearch.client.indices; -import org.opensearch.core.common.Strings; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetFieldMappingsResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetFieldMappingsResponse.java index 1359c68fc1311..ec3101b7e7543 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetFieldMappingsResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetFieldMappingsResponse.java @@ -32,14 +32,14 @@ package org.opensearch.client.indices; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.Mapper; import java.io.IOException; @@ -47,8 +47,8 @@ import java.util.Map; import java.util.Objects; -import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** Response object for {@link GetFieldMappingsRequest} API */ @@ -150,7 +150,7 @@ public String fullName() { * Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */ public Map<String, Object> sourceAsMap() { - return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); + return XContentHelper.convertToMap(source, true, MediaTypeRegistry.JSON).v2(); } // pkg-private for testing diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/IndexTemplateMetadata.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/IndexTemplateMetadata.java index adc2e2d4e3621..017ad0089704e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/IndexTemplateMetadata.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/IndexTemplateMetadata.java @@ -35,8 +35,8 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java index fed958955af99..1c286577ae8c9 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java @@ -39,19 +39,20 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; @@ -216,10 +217,10 @@ public Settings settings() { * Adds mapping that will be added when the index gets created. * * @param source The mapping source - * @param xContentType The type of content contained within the source + * @param mediaType The type of content contained within the source */ - public PutIndexTemplateRequest mapping(String source, XContentType xContentType) { - internalMapping(XContentHelper.convertToMap(new BytesArray(source), true, xContentType).v2()); + public PutIndexTemplateRequest mapping(String source, MediaType mediaType) { + internalMapping(XContentHelper.convertToMap(new BytesArray(source), true, mediaType).v2()); return this; } @@ -267,7 +268,7 @@ public PutIndexTemplateRequest mapping(Map<String, Object> source) { private PutIndexTemplateRequest internalMapping(Map<String, Object> source) { try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.map(source); MediaType mediaType = builder.contentType(); Objects.requireNonNull(mediaType); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java index d17dc54713789..a63393bd2341b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java @@ -38,9 +38,8 @@ import org.opensearch.client.TimedRequest; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -106,12 +105,12 @@ public MediaType mediaType() { /** * The mapping source definition. - * + * <p> * Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(Map<String, ?> mappingSource) { try { - XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeParserRegistry.getDefaultMediaType()); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.getDefaultMediaType()); builder.map(mappingSource); return source(builder); } catch (IOException e) { @@ -121,7 +120,7 @@ public PutMappingRequest source(Map<String, ?> mappingSource) { /** * The mapping source definition. - * + * <p> * Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(String mappingSource, MediaType mediaType) { @@ -132,7 +131,7 @@ public PutMappingRequest source(String mappingSource, MediaType mediaType) { /** * The mapping source definition. - * + * <p> * Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(XContentBuilder builder) { @@ -143,7 +142,7 @@ public PutMappingRequest source(XContentBuilder builder) { /** * The mapping source definition. - * + * <p> * Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(BytesReference source, MediaType mediaType) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java index 61799a83e5df5..e53bdd99b3cee 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java @@ -37,9 +37,9 @@ import org.opensearch.client.Validatable; import org.opensearch.client.ValidationException; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverRequest.java index 443cef45e646b..45211864a3df3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverRequest.java @@ -37,8 +37,8 @@ import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.client.TimedRequest; import org.opensearch.client.indices.CreateIndexRequest; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicy.java b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicy.java index 3566d6aef1ac3..fd101c575b97a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicy.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicy.java @@ -33,10 +33,10 @@ package org.opensearch.client.slm; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -169,6 +169,6 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicyMetadata.java index c0b542557159f..dd44d16f0d65e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicyMetadata.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecyclePolicyMetadata.java @@ -33,10 +33,10 @@ package org.opensearch.client.slm; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -289,7 +289,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecycleStats.java b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecycleStats.java index 2126ced76b5ae..476533d9c91ca 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecycleStats.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotLifecycleStats.java @@ -32,11 +32,11 @@ package org.opensearch.client.slm; -import org.opensearch.common.Strings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -188,7 +188,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } public static class SnapshotPolicyStats implements ToXContentFragment { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotRetentionConfiguration.java b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotRetentionConfiguration.java index 2a7ab6bb04095..3165b6bede19d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotRetentionConfiguration.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/slm/SnapshotRetentionConfiguration.java @@ -33,11 +33,11 @@ package org.opensearch.client.slm; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -151,6 +151,6 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/NodeData.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/NodeData.java index 9d752da1ef9f8..4ecdff354c37d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/NodeData.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/NodeData.java @@ -31,14 +31,15 @@ package org.opensearch.client.tasks; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.ObjectParser; +import org.opensearch.core.xcontent.XContentParser; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import org.opensearch.core.ParseField; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.XContentParser; class NodeData { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/OpenSearchException.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/OpenSearchException.java index 8ce6e71cfeeba..2f341e5102a08 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/OpenSearchException.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/OpenSearchException.java @@ -33,6 +33,7 @@ import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.XContentParser; + import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java index c419884700587..9129de717459f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java @@ -38,7 +38,6 @@ /** * Client side counterpart of server side version. - * * {@link org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup} */ public class TaskGroup { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskId.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskId.java index cd036a732957b..c2cf2c826b8bd 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskId.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskId.java @@ -34,7 +34,7 @@ import java.util.Objects; /** - * client side version of a {@link org.opensearch.tasks.TaskId} + * client side version of a {@link org.opensearch.core.tasks.TaskId} */ public class TaskId { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java index 51ac62830446f..75badc4e3dbf2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java @@ -54,6 +54,7 @@ public class TaskInfo { private long runningTimeNanos; private boolean cancellable; private boolean cancelled; + private Long cancellationStartTime; private TaskId parentTaskId; private final Map<String, Object> status = new HashMap<>(); private final Map<String, String> headers = new HashMap<>(); @@ -127,6 +128,14 @@ void setCancelled(boolean cancelled) { this.cancelled = cancelled; } + public Long getCancellationStartTime() { + return this.cancellationStartTime; + } + + public void setCancellationStartTime(Long cancellationStartTime) { + this.cancellationStartTime = cancellationStartTime; + } + public TaskId getParentTaskId() { return parentTaskId; } @@ -180,6 +189,7 @@ private void noOpParse(Object s) {} parser.declareString(TaskInfo::setParentTaskId, new ParseField("parent_task_id")); parser.declareObject(TaskInfo::setHeaders, (p, c) -> p.mapStrings(), new ParseField("headers")); parser.declareObject(TaskInfo::setResourceStats, (p, c) -> p.map(), new ParseField("resource_stats")); + parser.declareLong(TaskInfo::setCancellationStartTime, new ParseField("cancellation_time_millis")); PARSER = (XContentParser p, Void v, String name) -> parser.parse(p, new TaskInfo(new TaskId(name)), null); } @@ -199,7 +209,8 @@ && isCancelled() == taskInfo.isCancelled() && Objects.equals(getParentTaskId(), taskInfo.getParentTaskId()) && Objects.equals(status, taskInfo.status) && Objects.equals(getHeaders(), taskInfo.getHeaders()) - && Objects.equals(getResourceStats(), taskInfo.getResourceStats()); + && Objects.equals(getResourceStats(), taskInfo.getResourceStats()) + && Objects.equals(getCancellationStartTime(), taskInfo.cancellationStartTime); } @Override @@ -216,7 +227,8 @@ public int hashCode() { getParentTaskId(), status, getHeaders(), - getResourceStats() + getResourceStats(), + getCancellationStartTime() ); } @@ -250,6 +262,8 @@ public String toString() { + headers + ", resource_stats=" + resourceStats + + ", cancellationStartTime=" + + cancellationStartTime + '}'; } } diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index e9e793aa9a783..42dde784147c7 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -14,7 +14,7 @@ # either express or implied. See the License for the specific # language governing permissions and limitations under the License. -@defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type +@defaultMessage Use Request#createContentType(MediaType) to be sure to pass the right MIME type org.apache.hc.core5.http.ContentType#create(java.lang.String) org.apache.hc.core5.http.ContentType#create(java.lang.String,java.lang.String) org.apache.hc.core5.http.ContentType#create(java.lang.String,java.nio.charset.Charset) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java index 707f4246009aa..c464ee9ece74a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java @@ -31,21 +31,20 @@ package org.opensearch.client; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; /** * Base class for HLRC request parsing tests. - * + * <p> * This case class facilitates generating client side request test instances and * verifies that they are correctly parsed into server side request instances. * @@ -60,7 +59,7 @@ public final void testFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference bytes = toShuffledXContent(clientTestInstance, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); - final XContent xContent = XContentFactory.xContent(xContentType); + final XContent xContent = xContentType.xContent(); final XContentParser parser = xContent.createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, bytes.streamInput()); final S serverInstance = doParseToServerInstance(parser); assertInstances(serverInstance, clientTestInstance); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java index 5f7df0cd5860c..7d2d6b87b85c6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java @@ -31,21 +31,20 @@ package org.opensearch.client; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; /** * Base class for HLRC response parsing tests. - * + * <p> * This case class facilitates generating server side response test instances and * verifies that they are correctly parsed into HLRC response instances. * @@ -59,7 +58,7 @@ public final void testFromXContent() throws IOException { final S serverTestInstance = createServerTestInstance(xContentType); final BytesReference bytes = toShuffledXContent(serverTestInstance, xContentType, getParams(), randomBoolean()); - final XContent xContent = XContentFactory.xContent(xContentType); + final XContent xContent = xContentType.xContent(); final XContentParser parser = xContent.createParser( NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java index 49acbe8cd2bc1..1d70778398e2e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorIT.java @@ -33,6 +33,7 @@ package org.opensearch.client; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkProcessor; import org.opensearch.action.bulk.BulkRequest; @@ -42,13 +43,12 @@ import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchHit; import org.hamcrest.Matcher; @@ -278,12 +278,12 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception // let's make sure we get at least 1 item in the MultiGetRequest regardless of the randomising roulette if (randomBoolean() || multiGetRequest.getItems().size() == 0) { testDocs++; - processor.add(new IndexRequest("test").id(Integer.toString(testDocs)).source(XContentType.JSON, "field", "value")); + processor.add(new IndexRequest("test").id(Integer.toString(testDocs)).source(MediaTypeRegistry.JSON, "field", "value")); multiGetRequest.add("test", Integer.toString(testDocs)); } else { testReadOnlyDocs++; processor.add( - new IndexRequest("test-ro").id(Integer.toString(testReadOnlyDocs)).source(XContentType.JSON, "field", "value") + new IndexRequest("test-ro").id(Integer.toString(testReadOnlyDocs)).source(MediaTypeRegistry.JSON, "field", "value") ); } } @@ -334,9 +334,9 @@ public void testGlobalParametersAndSingleRequest() throws Exception { processor.add(new IndexRequest() // <1> - .source(XContentType.JSON, "user", "some user")); + .source(MediaTypeRegistry.JSON, "user", "some user")); processor.add(new IndexRequest("blogs").id("1") // <2> - .source(XContentType.JSON, "title", "some title")); + .source(MediaTypeRegistry.JSON, "title", "some title")); } // end::bulk-processor-mix-parameters latch.await(); @@ -400,11 +400,11 @@ private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String l if (randomBoolean()) { processor.add( new IndexRequest(localIndex).id(Integer.toString(i)) - .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .source(MediaTypeRegistry.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30)) ); } else { BytesArray data = bytesBulkRequest(localIndex, i); - processor.add(data, globalIndex, globalPipeline, XContentType.JSON); + processor.add(data, globalIndex, globalPipeline, MediaTypeRegistry.JSON); } multiGetRequest.add(localIndex, Integer.toString(i)); } @@ -423,7 +423,7 @@ private static BytesArray bytesBulkRequest(String localIndex, int id) throws IOE XContentBuilder source = jsonBuilder().startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject(); - String request = Strings.toString(action) + "\n" + Strings.toString(source) + "\n"; + String request = action + "\n" + source + "\n"; return new BytesArray(request); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java index 44bd085788203..3678cc042ba47 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java @@ -40,8 +40,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.transport.RemoteTransportException; import java.util.Collections; @@ -170,7 +170,7 @@ private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add( new IndexRequest(INDEX_NAME).id(Integer.toString(i)) - .source(XContentType.JSON, "field", randomRealisticUnicodeOfCodepointLengthBetween(1, 30)) + .source(MediaTypeRegistry.JSON, "field", randomRealisticUnicodeOfCodepointLengthBetween(1, 30)) ); multiGetRequest.add(INDEX_NAME, Integer.toString(i)); } @@ -180,7 +180,7 @@ private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + * <p> * This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java index 35fc9d88e316c..d392aa842fb35 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkRequestWithGlobalParametersIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.SearchHit; import java.io.IOException; @@ -59,8 +59,8 @@ public void testGlobalPipelineOnBulkRequest() throws IOException { createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("test").id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest("test").id("2").source(XContentType.JSON, "field", "bulk2")); + request.add(new IndexRequest("test").id("1").source(MediaTypeRegistry.JSON, "field", "bulk1")); + request.add(new IndexRequest("test").id("2").source(MediaTypeRegistry.JSON, "field", "bulk2")); request.pipeline("xyz"); bulk(request); @@ -76,8 +76,8 @@ public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { BulkRequest request = new BulkRequest(); request.pipeline("globalId"); - request.add(new IndexRequest("test").id("1").source(XContentType.JSON, "field", "bulk1").setPipeline("perIndexId")); - request.add(new IndexRequest("test").id("2").source(XContentType.JSON, "field", "bulk2").setPipeline("perIndexId")); + request.add(new IndexRequest("test").id("1").source(MediaTypeRegistry.JSON, "field", "bulk1").setPipeline("perIndexId")); + request.add(new IndexRequest("test").id("2").source(MediaTypeRegistry.JSON, "field", "bulk2").setPipeline("perIndexId")); bulk(request); @@ -96,11 +96,11 @@ public void testMixPipelineOnRequestAndGlobal() throws IOException { request.pipeline("globalId"); request.add(new IndexRequest("test").id("1") - .source(XContentType.JSON, "field", "bulk1") + .source(MediaTypeRegistry.JSON, "field", "bulk1") .setPipeline("perIndexId")); // <1> request.add(new IndexRequest("test").id("2") - .source(XContentType.JSON, "field", "bulk2")); // <2> + .source(MediaTypeRegistry.JSON, "field", "bulk2")); // <2> // end::bulk-request-mix-pipeline bulk(request); @@ -116,8 +116,8 @@ public void testMixPipelineOnRequestAndGlobal() throws IOException { public void testGlobalIndex() throws IOException { BulkRequest request = new BulkRequest("global_index"); - request.add(new IndexRequest().id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest().id("2").source(XContentType.JSON, "field", "bulk2")); + request.add(new IndexRequest().id("1").source(MediaTypeRegistry.JSON, "field", "bulk1")); + request.add(new IndexRequest().id("2").source(MediaTypeRegistry.JSON, "field", "bulk2")); bulk(request); @@ -128,10 +128,10 @@ public void testGlobalIndex() throws IOException { @SuppressWarnings("unchecked") public void testIndexGlobalAndPerRequest() throws IOException { BulkRequest request = new BulkRequest("global_index"); - request.add(new IndexRequest("local_index").id("1").source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("local_index").id("1").source(MediaTypeRegistry.JSON, "field", "bulk1")); request.add( new IndexRequest().id("2") // will take global index - .source(XContentType.JSON, "field", "bulk2") + .source(MediaTypeRegistry.JSON, "field", "bulk2") ); bulk(request); @@ -143,8 +143,8 @@ public void testIndexGlobalAndPerRequest() throws IOException { public void testGlobalRouting() throws IOException { createIndexWithMultipleShards("index"); BulkRequest request = new BulkRequest((String) null); - request.add(new IndexRequest("index").id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest("index").id("2").source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("1").source(MediaTypeRegistry.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("2").source(MediaTypeRegistry.JSON, "field", "bulk1")); request.routing("1"); bulk(request); @@ -158,8 +158,8 @@ public void testGlobalRouting() throws IOException { public void testMixLocalAndGlobalRouting() throws IOException { BulkRequest request = new BulkRequest((String) null); request.routing("globalRouting"); - request.add(new IndexRequest("index").id("1").source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest("index").id("2").routing("localRouting").source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("1").source(MediaTypeRegistry.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("2").routing("localRouting").source(MediaTypeRegistry.JSON, "field", "bulk1")); bulk(request); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 2ac0eee407b95..79481fd03b2a1 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -61,12 +61,12 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.SniffConnectionStrategy; @@ -125,7 +125,7 @@ public void testClusterPutSettings() throws IOException { ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); resetRequest.transientSettings(Settings.builder().putNull(transientSettingKey)); - resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON); + resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", MediaTypeRegistry.JSON); ClusterUpdateSettingsResponse resetResponse = execute( resetRequest, diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index f201599632969..3415868c9f8c6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -32,6 +32,8 @@ package org.opensearch.client; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -40,10 +42,8 @@ import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; -import org.apache.hc.client5.http.classic.methods.HttpGet; -import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.CoreMatchers; import org.junit.Assert; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java index eaf8f4f8efff7..da9f790215669 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java @@ -58,17 +58,19 @@ import org.opensearch.client.core.TermVectorsRequest; import org.opensearch.client.core.TermVectorsResponse; import org.opensearch.client.indices.GetIndexRequest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; import org.opensearch.index.get.GetResult; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -202,7 +204,7 @@ public void testExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } IndexRequest index = new IndexRequest("index").id("id"); - index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", MediaTypeRegistry.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); highLevelClient().index(index, RequestOptions.DEFAULT); { @@ -227,7 +229,7 @@ public void testDeprecatedSourceExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); } IndexRequest index = new IndexRequest("index").id("id"); - index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", MediaTypeRegistry.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); highLevelClient().index(index, RequestOptions.DEFAULT); { @@ -250,7 +252,7 @@ public void testSourceExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); } IndexRequest index = new IndexRequest("index").id("id"); - index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", MediaTypeRegistry.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); highLevelClient().index(index, RequestOptions.DEFAULT); { @@ -274,9 +276,9 @@ public void testSourceDoesNotExist() throws IOException { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(noSourceIndex).id("1").source(Collections.singletonMap("foo", 1), XContentType.JSON) + new IndexRequest(noSourceIndex).id("1").source(Collections.singletonMap("foo", 1), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(noSourceIndex).id("2").source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .add(new IndexRequest(noSourceIndex).id("2").source(Collections.singletonMap("foo", 2), MediaTypeRegistry.JSON)) .setRefreshPolicy(RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() @@ -306,7 +308,7 @@ public void testGet() throws IOException { } IndexRequest index = new IndexRequest("index").id("id"); String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - index.source(document, XContentType.JSON); + index.source(document, MediaTypeRegistry.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); highLevelClient().index(index, RequestOptions.DEFAULT); { @@ -406,10 +408,10 @@ public void testMultiGet() throws IOException { BulkRequest bulk = new BulkRequest(); bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); IndexRequest index = new IndexRequest("index").id("id1"); - index.source("{\"field\":\"value1\"}", XContentType.JSON); + index.source("{\"field\":\"value1\"}", MediaTypeRegistry.JSON); bulk.add(index); index = new IndexRequest("index").id("id2"); - index.source("{\"field\":\"value2\"}", XContentType.JSON); + index.source("{\"field\":\"value2\"}", MediaTypeRegistry.JSON); bulk.add(index); highLevelClient().bulk(bulk, RequestOptions.DEFAULT); { @@ -436,8 +438,8 @@ public void testMultiGet() throws IOException { public void testMultiGetWithIds() throws IOException { BulkRequest bulk = new BulkRequest(); bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - bulk.add(new IndexRequest("index").id("id1").source("{\"field\":\"value1\"}", XContentType.JSON)); - bulk.add(new IndexRequest("index").id("id2").source("{\"field\":\"value2\"}", XContentType.JSON)); + bulk.add(new IndexRequest("index").id("id1").source("{\"field\":\"value1\"}", MediaTypeRegistry.JSON)); + bulk.add(new IndexRequest("index").id("id2").source("{\"field\":\"value2\"}", MediaTypeRegistry.JSON)); MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "id1"); @@ -457,7 +459,7 @@ public void testGetSource() throws IOException { } IndexRequest index = new IndexRequest("index").id("id"); String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - index.source(document, XContentType.JSON); + index.source(document, MediaTypeRegistry.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); highLevelClient().index(index, RequestOptions.DEFAULT); { @@ -815,7 +817,7 @@ public void testUpdate() throws IOException { { IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { UpdateRequest updateRequest = new UpdateRequest("index", "id"); - updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), MediaTypeRegistry.JSON)); updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML)); execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); }); @@ -827,7 +829,7 @@ public void testUpdate() throws IOException { { OpenSearchException exception = expectThrows(OpenSearchException.class, () -> { UpdateRequest updateRequest = new UpdateRequest("index", "require_alias").setRequireAlias(true); - updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), MediaTypeRegistry.JSON)); execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); }); assertEquals(RestStatus.NOT_FOUND, exception.status()); @@ -842,7 +844,7 @@ public void testBulk() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < nbItems; i++) { @@ -863,10 +865,10 @@ public void testBulk() throws IOException { } else { BytesReference source = BytesReference.bytes( - XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject() + XContentBuilder.builder(mediaType.xContent()).startObject().field("id", i).endObject() ); if (opType == DocWriteRequest.OpType.INDEX) { - IndexRequest indexRequest = new IndexRequest("index").id(id).source(source, xContentType); + IndexRequest indexRequest = new IndexRequest("index").id(id).source(source, mediaType); if (erroneous) { indexRequest.setIfSeqNo(12L); indexRequest.setIfPrimaryTerm(12L); @@ -874,14 +876,14 @@ public void testBulk() throws IOException { bulkRequest.add(indexRequest); } else if (opType == DocWriteRequest.OpType.CREATE) { - IndexRequest createRequest = new IndexRequest("index").id(id).source(source, xContentType).create(true); + IndexRequest createRequest = new IndexRequest("index").id(id).source(source, mediaType).create(true); if (erroneous) { assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } bulkRequest.add(createRequest); } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = new UpdateRequest("index", id).doc(new IndexRequest().source(source, xContentType)); + UpdateRequest updateRequest = new UpdateRequest("index", id).doc(new IndexRequest().source(source, mediaType)); if (erroneous == false) { assertEquals( RestStatus.CREATED, @@ -905,7 +907,7 @@ public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); AtomicReference<BulkResponse> responseRef = new AtomicReference<>(); AtomicReference<BulkRequest> requestRef = new AtomicReference<>(); @@ -953,7 +955,7 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } else { if (opType == DocWriteRequest.OpType.INDEX) { - IndexRequest indexRequest = new IndexRequest("index").id(id).source(xContentType, "id", i); + IndexRequest indexRequest = new IndexRequest("index").id(id).source(mediaType, "id", i); if (erroneous) { indexRequest.setIfSeqNo(12L); indexRequest.setIfPrimaryTerm(12L); @@ -961,14 +963,14 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) processor.add(indexRequest); } else if (opType == DocWriteRequest.OpType.CREATE) { - IndexRequest createRequest = new IndexRequest("index").id(id).source(xContentType, "id", i).create(true); + IndexRequest createRequest = new IndexRequest("index").id(id).source(mediaType, "id", i).create(true); if (erroneous) { assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } processor.add(createRequest); } else if (opType == DocWriteRequest.OpType.UPDATE) { - UpdateRequest updateRequest = new UpdateRequest("index", id).doc(new IndexRequest().source(xContentType, "id", i)); + UpdateRequest updateRequest = new UpdateRequest("index", id).doc(new IndexRequest().source(mediaType, "id", i)); if (erroneous == false) { assertEquals( RestStatus.CREATED, @@ -1106,9 +1108,12 @@ public void testTermvectors() throws IOException { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("field", "value1"), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("field", "value1"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("field", "value2"), XContentType.JSON)) + .add( + new IndexRequest(sourceIndex).id("2") + .source(Collections.singletonMap("field", "value2"), MediaTypeRegistry.JSON) + ) .setRefreshPolicy(RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() @@ -1201,8 +1206,8 @@ public void testMultiTermvectors() throws IOException { assertEquals( RestStatus.OK, highLevelClient().bulk( - new BulkRequest().add(new IndexRequest(sourceIndex).id("1").source(doc1, XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("2").source(doc2, XContentType.JSON)) + new BulkRequest().add(new IndexRequest(sourceIndex).id("1").source(doc1, MediaTypeRegistry.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(doc2, MediaTypeRegistry.JSON)) .setRefreshPolicy(RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java index 972c96999945f..fe7392cb0c0ce 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java @@ -43,14 +43,14 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Build; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.main.MainRequest; import org.opensearch.action.main.MainResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -174,7 +174,7 @@ private Response mockPerformRequest(Request request) throws IOException { when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT); - BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); + BytesRef bytesRef = XContentHelper.toXContent(response, MediaTypeRegistry.JSON, false).toBytesRef(); when(mockResponse.getEntity()).thenReturn(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/GetAliasesResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/GetAliasesResponseTests.java index 96f958a4ad2d8..245a720dc9825 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/GetAliasesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/GetAliasesResponseTests.java @@ -33,9 +33,9 @@ package org.opensearch.client; import org.opensearch.cluster.metadata.AliasMetadata; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index 8ca5c5fa58742..c8eafb88b7495 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -108,23 +108,23 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.metadata.Template; import org.opensearch.common.ValidationException; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Arrays; @@ -134,6 +134,10 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.common.xcontent.support.XContentMapValues.extractRawValues; +import static org.opensearch.common.xcontent.support.XContentMapValues.extractValue; import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.contains; @@ -149,10 +153,6 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.core.IsInstanceOf.instanceOf; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.common.xcontent.support.XContentMapValues.extractRawValues; -import static org.opensearch.common.xcontent.support.XContentMapValues.extractValue; public class IndicesClientIT extends OpenSearchRestHighLevelClientTestCase { @@ -1074,7 +1074,7 @@ public void testRollover() throws IOException { } { String mappings = "{\"properties\":{\"field2\":{\"type\":\"keyword\"}}}"; - rolloverRequest.getCreateIndexRequest().mapping(mappings, XContentType.JSON); + rolloverRequest.getCreateIndexRequest().mapping(mappings, MediaTypeRegistry.JSON); rolloverRequest.dryRun(false); rolloverRequest.addMaxIndexSizeCondition(new ByteSizeValue(1, ByteSizeUnit.MB)); RolloverResponse rolloverResponse = execute( @@ -1489,7 +1489,7 @@ public void testPutTemplate() throws Exception { .order(10) .create(randomBoolean()) .settings(Settings.builder().put("number_of_shards", "3").put("number_of_replicas", "0")) - .mapping("{ \"properties\": { \"host_name\": { \"type\": \"keyword\" } } }", XContentType.JSON) + .mapping("{ \"properties\": { \"host_name\": { \"type\": \"keyword\" } } }", MediaTypeRegistry.JSON) .alias(new Alias("alias-1").indexRouting("abc")) .alias(new Alias("alias-1").indexRouting("abc")) .alias(new Alias("{index}-write").searchRouting("xyz")); @@ -1558,7 +1558,7 @@ public void testPutTemplateWithTypesUsingUntypedAPI() throws Exception { + " }" + " }" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) .alias(new Alias("alias-1").indexRouting("abc")) .alias(new Alias("{index}-write").searchRouting("xyz")); @@ -1664,7 +1664,7 @@ public void testCRUDIndexTemplate() throws Exception { equalTo(true) ); PutIndexTemplateRequest putTemplate2 = new PutIndexTemplateRequest("template-2").patterns(Arrays.asList("pattern-2", "name-2")) - .mapping("{\"properties\": { \"name\": { \"type\": \"text\" }}}", XContentType.JSON) + .mapping("{\"properties\": { \"name\": { \"type\": \"text\" }}}", MediaTypeRegistry.JSON) .settings(Settings.builder().put("number_of_shards", "2").put("number_of_replicas", "0")); assertThat( execute(putTemplate2, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged(), diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index e21619ff15ef6..c3a0f049f375e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -74,12 +74,12 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import org.junit.Assert; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Arrays; @@ -858,7 +858,7 @@ public void testPutTemplateRequest() throws Exception { + "\" : { \"type\" : \"" + OpenSearchTestCase.randomFrom("text", "keyword") + "\" }}}", - XContentType.JSON + MediaTypeRegistry.JSON ); } if (OpenSearchTestCase.randomBoolean()) { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java index e3c8197dc2c90..33bff06a83065 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java @@ -42,9 +42,9 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.ingest.PipelineConfiguration; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 0f377720b7aed..38dde4be3dd8e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -32,18 +32,18 @@ package org.opensearch.client; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; -import org.apache.hc.client5.http.classic.methods.HttpDelete; -import org.apache.hc.client5.http.classic.methods.HttpGet; -import org.apache.hc.client5.http.classic.methods.HttpPost; -import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Assert; import java.io.IOException; @@ -59,7 +59,7 @@ public void testPutPipeline() throws IOException { PutPipelineRequest request = new PutPipelineRequest( "some_pipeline_id", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); Map<String, String> expectedParams = new HashMap<>(); RequestConvertersTests.setRandomClusterManagerTimeout(request, expectedParams); @@ -130,7 +130,7 @@ public void testSimulatePipeline() throws IOException { + "}"; SimulatePipelineRequest request = new SimulatePipelineRequest( new BytesArray(json.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); request.setId(pipelineId); request.setVerbose(verbose); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java index e1179c0f24cb8..c7cd382d64908 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java @@ -32,12 +32,12 @@ package org.opensearch.client; -import org.opensearch.test.OpenSearchTestCase; import org.apache.hc.client5.http.classic.methods.HttpGet; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.ProtocolVersion; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.StatusLine; +import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java index ee1f217f47ef2..b0a7d1e3578c0 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java @@ -32,8 +32,9 @@ package org.opensearch.client; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -48,21 +49,21 @@ import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.common.Booleans; import org.opensearch.common.CheckedRunnable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.ingest.Pipeline; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchModule; -import org.opensearch.tasks.TaskId; import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.apache.hc.core5.http.ParseException; -import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.AfterClass; import org.junit.Before; @@ -223,7 +224,7 @@ protected static void createFieldAddingPipleine(String id, String fieldName, Str .endArray() .endObject(); - createPipeline(new PutPipelineRequest(id, BytesReference.bytes(pipeline), XContentType.JSON)); + createPipeline(new PutPipelineRequest(id, BytesReference.bytes(pipeline), MediaTypeRegistry.JSON)); } protected static void createPipeline(String pipelineId) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index 1f10deb400ecc..b0990560b08ba 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -10,9 +10,7 @@ import org.apache.hc.client5.http.classic.methods.HttpPost; import org.apache.hc.client5.http.classic.methods.HttpPut; -import org.junit.Before; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.DeletePitInfo; @@ -20,11 +18,12 @@ import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.GetAllPitNodesResponse; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -72,7 +71,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { + public void testDeleteAllAndListAllPits() throws Exception { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -91,11 +90,9 @@ public void testDeleteAllAndListAllPits() throws IOException, InterruptedExcepti List<String> pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); - CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener<DeletePitResponse> deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { - countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -103,19 +100,20 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { - countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } } }; final CreatePitResponse pitResponse3 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); - + assertTrue(pitResponse3.getId() != null); ActionListener<GetAllPitNodesResponse> getPitsListener = new ActionListener<GetAllPitNodesResponse>() { @Override public void onResponse(GetAllPitNodesResponse response) { List<String> pits = response.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse3.getId())); + // delete all pits + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); } @Override @@ -126,11 +124,12 @@ public void onFailure(Exception e) { } }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + // validate no pits case - getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); - assertTrue(getAllPitResponse.getPitInfos().size() == 0); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertBusy(() -> { + GetAllPitNodesResponse getAllPitResponse1 = highLevelClient().getAllPits(RequestOptions.DEFAULT); + assertTrue(getAllPitResponse1.getPitInfos().size() == 0); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + }); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java index 07e5b1627942e..47add92ecaccd 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.junit.Before; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -50,6 +49,7 @@ import org.opensearch.index.rankeval.RatedSearchHit; import org.opensearch.index.rankeval.RecallAtK; import org.opensearch.search.builder.SearchSourceBuilder; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java index 65888e79683e3..2457bafdc6a22 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ReindexIT.java @@ -33,7 +33,6 @@ package org.opensearch.client; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; @@ -42,15 +41,16 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.tasks.TaskSubmissionResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.DeleteByQueryAction; import org.opensearch.index.reindex.DeleteByQueryRequest; import org.opensearch.index.reindex.ReindexRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.RawTaskStatus; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Collections; @@ -76,9 +76,9 @@ public void testReindex() throws IOException { createIndex(sourceIndex, settings); createIndex(destinationIndex, settings); BulkRequest bulkRequest = new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); assertEquals(RestStatus.OK, highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status()); } @@ -132,9 +132,9 @@ public void testReindexTask() throws Exception { createIndex(sourceIndex, settings); createIndex(destinationIndex, settings); BulkRequest bulkRequest = new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); assertEquals(RestStatus.OK, highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status()); } @@ -163,9 +163,9 @@ public void testReindexConflict() throws IOException { createIndex(sourceIndex, settings); createIndex(destIndex, settings); final BulkRequest bulkRequest = new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); @@ -205,10 +205,10 @@ public void testDeleteByQuery() throws Exception { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), MediaTypeRegistry.JSON)) + .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() @@ -305,10 +305,10 @@ public void testDeleteByQueryTask() throws Exception { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), MediaTypeRegistry.JSON)) + .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 15a99b3e91685..084d754275dec 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -32,6 +32,14 @@ package org.opensearch.client; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -54,8 +62,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; @@ -65,18 +73,19 @@ import org.opensearch.client.core.TermVectorsRequest; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.common.CheckedBiConsumer; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.Streams; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -110,17 +119,8 @@ import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.completion.CompletionSuggestionBuilder; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; -import org.apache.hc.client5.http.classic.methods.HttpDelete; -import org.apache.hc.client5.http.classic.methods.HttpGet; -import org.apache.hc.client5.http.classic.methods.HttpHead; -import org.apache.hc.client5.http.classic.methods.HttpPost; -import org.apache.hc.client5.http.classic.methods.HttpPut; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.hc.core5.http.io.entity.ByteArrayEntity; -import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -852,7 +852,7 @@ private static void setRandomIfSeqNoAndTerm(DocWriteRequest<?> request, Map<Stri public void testUpdateWithDifferentContentTypes() { IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { UpdateRequest updateRequest = new UpdateRequest(); - updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON)); + updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), MediaTypeRegistry.JSON)); updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML)); RequestConverters.update(updateRequest); }); @@ -876,7 +876,7 @@ public void testBulk() throws IOException { setRandomRefreshPolicy(bulkRequest::setRefreshPolicy, expectedParams); - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); int nbItems = randomIntBetween(10, 100); DocWriteRequest<?>[] requests = new DocWriteRequest<?>[nbItems]; @@ -884,21 +884,21 @@ public void testBulk() throws IOException { String index = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); - BytesReference source = RandomObjects.randomSource(random(), xContentType); + BytesReference source = RandomObjects.randomSource(random(), mediaType); DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); DocWriteRequest<?> docWriteRequest; if (opType == DocWriteRequest.OpType.INDEX) { - IndexRequest indexRequest = new IndexRequest(index).id(id).source(source, xContentType); + IndexRequest indexRequest = new IndexRequest(index).id(id).source(source, mediaType); docWriteRequest = indexRequest; if (randomBoolean()) { indexRequest.setPipeline(randomAlphaOfLength(5)); } } else if (opType == DocWriteRequest.OpType.CREATE) { - IndexRequest createRequest = new IndexRequest(index).id(id).source(source, xContentType).create(true); + IndexRequest createRequest = new IndexRequest(index).id(id).source(source, mediaType).create(true); docWriteRequest = createRequest; } else if (opType == DocWriteRequest.OpType.UPDATE) { - final UpdateRequest updateRequest = new UpdateRequest(index, id).doc(new IndexRequest().source(source, xContentType)); + final UpdateRequest updateRequest = new UpdateRequest(index, id).doc(new IndexRequest().source(source, mediaType)); docWriteRequest = updateRequest; if (randomBoolean()) { updateRequest.retryOnConflict(randomIntBetween(1, 5)); @@ -927,14 +927,14 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); + assertEquals(mediaType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { Streams.readFully(inputStream, content); } BulkRequest parsedBulkRequest = new BulkRequest(); - parsedBulkRequest.add(content, 0, content.length, xContentType); + parsedBulkRequest.add(content, 0, content.length, mediaType); assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions()); for (int i = 0; i < bulkRequest.numberOfActions(); i++) { @@ -956,7 +956,7 @@ public void testBulk() throws IOException { IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest; assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline()); - assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType); + assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), mediaType); } else if (opType == DocWriteRequest.OpType.UPDATE) { UpdateRequest updateRequest = (UpdateRequest) originalRequest; UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest; @@ -964,7 +964,7 @@ public void testBulk() throws IOException { assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict()); assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); if (updateRequest.doc() != null) { - assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType); + assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), mediaType); } else { assertNull(parsedUpdateRequest.doc()); } @@ -980,34 +980,34 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); + assertEquals(MediaTypeRegistry.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new DeleteRequest("index", "0")); - bulkRequest.add(new IndexRequest("index").id("0").source(singletonMap("field", "value"), xContentType)); + bulkRequest.add(new IndexRequest("index").id("0").source(singletonMap("field", "value"), mediaType)); bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); + assertEquals(mediaType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); UpdateRequest updateRequest = new UpdateRequest("index", "0"); if (randomBoolean()) { - updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + updateRequest.doc(new IndexRequest().source(singletonMap("field", "value"), mediaType)); } else { - updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType)); + updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), mediaType)); } Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); + assertEquals(mediaType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new IndexRequest("index").id("0").source(singletonMap("field", "value"), XContentType.SMILE)); - bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), MediaTypeRegistry.JSON)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); assertEquals( "Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]", @@ -1016,10 +1016,10 @@ public void testBulkWithDifferentContentTypes() throws IOException { } { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new IndexRequest("index").id("0").source(singletonMap("field", "value"), XContentType.JSON)); - bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index").id("0").source(singletonMap("field", "value"), MediaTypeRegistry.JSON)); + bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), MediaTypeRegistry.JSON)); bulkRequest.add( - new UpdateRequest("index", "2").doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) + new UpdateRequest("index", "2").doc(new IndexRequest().source(singletonMap("field", "value"), MediaTypeRegistry.JSON)) .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) ); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); @@ -1032,10 +1032,10 @@ public void testBulkWithDifferentContentTypes() throws IOException { XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(new DeleteRequest("index", "0")); - bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), MediaTypeRegistry.JSON)); bulkRequest.add(new DeleteRequest("index", "2")); bulkRequest.add(new DeleteRequest("index", "3")); - bulkRequest.add(new IndexRequest("index").id("4").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index").id("4").source(singletonMap("field", "value"), MediaTypeRegistry.JSON)); bulkRequest.add(new IndexRequest("index").id("1").source(singletonMap("field", "value"), xContentType)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); assertEquals( @@ -1048,9 +1048,9 @@ public void testBulkWithDifferentContentTypes() throws IOException { public void testGlobalPipelineOnBulkRequest() throws IOException { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.pipeline("xyz"); - bulkRequest.add(new IndexRequest("test").id("11").source(XContentType.JSON, "field", "bulk1")); - bulkRequest.add(new IndexRequest("test").id("12").source(XContentType.JSON, "field", "bulk2")); - bulkRequest.add(new IndexRequest("test").id("13").source(XContentType.JSON, "field", "bulk3")); + bulkRequest.add(new IndexRequest("test").id("11").source(MediaTypeRegistry.JSON, "field", "bulk1")); + bulkRequest.add(new IndexRequest("test").id("12").source(MediaTypeRegistry.JSON, "field", "bulk2")); + bulkRequest.add(new IndexRequest("test").id("13").source(MediaTypeRegistry.JSON, "field", "bulk3")); Request request = RequestConverters.bulk(bulkRequest); @@ -1456,8 +1456,11 @@ public void testMultiSearchTemplate() throws Exception { assertEquals(expectedParams, multiRequest.getParameters()); HttpEntity actualEntity = multiRequest.getEntity(); - byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); + byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat( + multiSearchTemplateRequest, + MediaTypeRegistry.JSON.xContent() + ); + assertEquals(MediaTypeRegistry.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1763,8 +1766,12 @@ public void testDeleteScriptRequest() { } static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { - BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); + BytesReference expectedBytes = org.opensearch.core.xcontent.XContentHelper.toXContent( + expectedBody, + REQUEST_BODY_CONTENT_TYPE, + false + ); + assertEquals(MediaTypeRegistry.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1913,12 +1920,12 @@ public void testCreateContentType() { } public void testEnforceSameContentType() { - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); - IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), xContentType); - assertEquals(xContentType, enforceSameContentType(indexRequest, null)); - assertEquals(xContentType, enforceSameContentType(indexRequest, xContentType)); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); + IndexRequest indexRequest = new IndexRequest().source(singletonMap("field", "value"), mediaType); + assertEquals(mediaType, enforceSameContentType(indexRequest, null)); + assertEquals(mediaType, enforceSameContentType(indexRequest, mediaType)); - XContentType bulkContentType = randomBoolean() ? xContentType : null; + MediaType bulkContentType = randomBoolean() ? mediaType : null; IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, @@ -1938,18 +1945,18 @@ public void testEnforceSameContentType() { exception.getMessage() ); - XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; + MediaType requestContentType = mediaType == MediaTypeRegistry.JSON ? XContentType.SMILE : MediaTypeRegistry.JSON; exception = expectThrows( IllegalArgumentException.class, - () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType) + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), mediaType) ); assertEquals( "Mismatching content-type found for request with content-type [" + requestContentType + "], " + "previous requests have content-type [" - + xContentType + + mediaType + "]", exception.getMessage() ); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index ad6667b0c3d50..b6289c790b511 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -35,11 +35,11 @@ import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.io.entity.StringEntity; -import org.junit.Before; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 24b0fda6c18d5..b0964a22786f0 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -33,8 +33,19 @@ package org.opensearch.client; import com.fasterxml.jackson.core.JsonParseException; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.search.ClearScrollRequest; @@ -46,16 +57,18 @@ import org.opensearch.client.core.MainRequest; import org.opensearch.client.core.MainResponse; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.set.Sets; +import org.opensearch.common.xcontent.cbor.CborXContent; +import org.opensearch.common.xcontent.smile.SmileXContent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.cbor.CborXContent; -import org.opensearch.common.xcontent.smile.SmileXContent; import org.opensearch.index.rankeval.DiscountedCumulativeGain; import org.opensearch.index.rankeval.EvaluationMetric; import org.opensearch.index.rankeval.ExpectedReciprocalRank; @@ -64,27 +77,15 @@ import org.opensearch.index.rankeval.PrecisionAtK; import org.opensearch.index.rankeval.RecallAtK; import org.opensearch.join.aggregations.ChildrenAggregationBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; import org.opensearch.search.suggest.Suggest; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; -import org.apache.hc.client5.http.classic.methods.HttpGet; -import org.apache.hc.core5.http.ClassicHttpResponse; -import org.apache.hc.core5.http.ContentType; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.hc.core5.http.HttpHost; -import org.apache.hc.core5.http.ProtocolVersion; -import org.apache.hc.core5.http.io.entity.ByteArrayEntity; -import org.apache.hc.core5.http.io.entity.StringEntity; -import org.apache.hc.core5.http.message.BasicClassicHttpResponse; -import org.apache.hc.core5.http.message.RequestLine; -import org.apache.hc.core5.http.message.StatusLine; import org.hamcrest.Matchers; import org.junit.Before; @@ -106,7 +107,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index fbfab016b1ead..f2778a97c0c1a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.client; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.explain.ExplainRequest; @@ -52,12 +54,12 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.client.core.CountRequest; import org.opensearch.client.core.CountResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -65,7 +67,6 @@ import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.join.aggregations.Children; import org.opensearch.join.aggregations.ChildrenAggregationBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.script.mustache.MultiSearchTemplateRequest; @@ -101,8 +102,6 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.apache.hc.client5.http.classic.methods.HttpPost; -import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.Matchers; import org.junit.Before; @@ -769,7 +768,7 @@ public void testSearchScroll() throws Exception { for (int i = 0; i < 100; i++) { XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); - doc.setJsonEntity(Strings.toString(builder)); + doc.setJsonEntity(builder.toString()); client().performRequest(doc); } client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); @@ -837,7 +836,7 @@ public void testSearchWithPit() throws Exception { for (int i = 0; i < 100; i++) { XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); - doc.setJsonEntity(Strings.toString(builder)); + doc.setJsonEntity(builder.toString()); client().performRequest(doc); } client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); @@ -1201,7 +1200,7 @@ public void testRenderSearchTemplate() throws IOException { BytesReference actualSource = searchTemplateResponse.getSource(); assertNotNull(actualSource); - assertToXContentEquivalent(expectedSource, actualSource, XContentType.JSON); + assertToXContentEquivalent(expectedSource, actualSource, MediaTypeRegistry.JSON); } public void testMultiSearchTemplate() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java index 70ba39a04e8db..9304be7f21899 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java @@ -13,8 +13,8 @@ import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java index df99d26a0a530..362a8f10d6a77 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java @@ -54,9 +54,9 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.repositories.fs.FsRepository; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.SnapshotInfo; @@ -76,7 +76,7 @@ public class SnapshotIT extends OpenSearchRestHighLevelClientTestCase { private AcknowledgedResponse createTestRepository(String repository, String type, String settings) throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repository); - request.settings(settings, XContentType.JSON); + request.settings(settings, MediaTypeRegistry.JSON); request.type(type); return execute(request, highLevelClient().snapshot()::createRepository, highLevelClient().snapshot()::createRepositoryAsync); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index e86de6ba718f9..af178ad2a5d47 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -48,7 +48,7 @@ import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/StoredScriptsIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/StoredScriptsIT.java index c86de6ae645c1..4d792e53c6064 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/StoredScriptsIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/StoredScriptsIT.java @@ -38,8 +38,8 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.script.Script; import org.opensearch.script.StoredScriptSource; @@ -58,10 +58,16 @@ public void testGetStoredScript() throws Exception { final StoredScriptSource scriptSource = new StoredScriptSource( "painless", "Math.log(_score * 2) + params.my_modifier", - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) ); - PutStoredScriptRequest request = new PutStoredScriptRequest(id, "score", new BytesArray("{}"), XContentType.JSON, scriptSource); + PutStoredScriptRequest request = new PutStoredScriptRequest( + id, + "score", + new BytesArray("{}"), + MediaTypeRegistry.JSON, + scriptSource + ); assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); GetStoredScriptRequest getRequest = new GetStoredScriptRequest("calculate-score"); @@ -76,10 +82,16 @@ public void testDeleteStoredScript() throws Exception { final StoredScriptSource scriptSource = new StoredScriptSource( "painless", "Math.log(_score * 2) + params.my_modifier", - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) ); - PutStoredScriptRequest request = new PutStoredScriptRequest(id, "score", new BytesArray("{}"), XContentType.JSON, scriptSource); + PutStoredScriptRequest request = new PutStoredScriptRequest( + id, + "score", + new BytesArray("{}"), + MediaTypeRegistry.JSON, + scriptSource + ); assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest(id); @@ -100,10 +112,16 @@ public void testPutScript() throws Exception { final StoredScriptSource scriptSource = new StoredScriptSource( "painless", "Math.log(_score * 2) + params.my_modifier", - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) ); - PutStoredScriptRequest request = new PutStoredScriptRequest(id, "score", new BytesArray("{}"), XContentType.JSON, scriptSource); + PutStoredScriptRequest request = new PutStoredScriptRequest( + id, + "score", + new BytesArray("{}"), + MediaTypeRegistry.JSON, + scriptSource + ); assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); Map<String, Object> script = getAsMap("/_scripts/" + id); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java index 24edd5f93bdba..c5a16ec32e686 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksIT.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -46,8 +45,9 @@ import org.opensearch.client.tasks.TaskId; import org.opensearch.client.tasks.TaskSubmissionResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.reindex.ReindexRequest; import java.io.IOException; import java.util.Collections; @@ -94,9 +94,9 @@ public void testGetValidTask() throws Exception { createIndex(sourceIndex, settings); createIndex(destinationIndex, settings); BulkRequest bulkRequest = new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo2", "bar2"), MediaTypeRegistry.JSON)) .setRefreshPolicy(RefreshPolicy.IMMEDIATE); assertEquals(RestStatus.OK, highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status()); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java index a777bbc5d1868..e4c6feb1f6bb1 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java @@ -36,7 +36,7 @@ import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.tasks.CancelTasksRequest; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/UpdateByQueryIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/UpdateByQueryIT.java index e5fbb30d29292..ef7b7d5a39b6f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/UpdateByQueryIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/UpdateByQueryIT.java @@ -32,7 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; @@ -41,15 +40,16 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.tasks.TaskSubmissionResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.UpdateByQueryAction; import org.opensearch.index.reindex.UpdateByQueryRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.Script; import org.opensearch.tasks.RawTaskStatus; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Collections; @@ -76,9 +76,9 @@ public void testUpdateByQuery() throws Exception { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() @@ -197,10 +197,10 @@ public void testUpdateByQueryTask() throws Exception { RestStatus.OK, highLevelClient().bulk( new BulkRequest().add( - new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), XContentType.JSON) + new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", 1), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", 2), MediaTypeRegistry.JSON)) + .add(new IndexRequest(sourceIndex).id("3").source(Collections.singletonMap("foo", 3), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT ).status() @@ -230,9 +230,9 @@ public void testUpdateByQueryConflict() throws IOException { final Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); createIndex(index, settings); final BulkRequest bulkRequest = new BulkRequest().add( - new IndexRequest(index).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON) + new IndexRequest(index).id("1").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON) ) - .add(new IndexRequest(index).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(index).id("2").source(Collections.singletonMap("foo", "bar"), MediaTypeRegistry.JSON)) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/cluster/RemoteInfoResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/cluster/RemoteInfoResponseTests.java index ed60dfa36a237..201a8ee1e60ac 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/cluster/RemoteInfoResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/cluster/RemoteInfoResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.transport.ProxyConnectionStrategy; import org.opensearch.transport.RemoteConnectionInfo; import org.opensearch.transport.SniffConnectionStrategy; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java index 9c4004945b6bf..e184df7ad013c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java @@ -32,8 +32,8 @@ package org.opensearch.client.core; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/BroadcastResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/BroadcastResponseTests.java index 101f00ad25eb8..de2af42cb900f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/BroadcastResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/BroadcastResponseTests.java @@ -32,10 +32,10 @@ package org.opensearch.client.core; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.seqno.RetentionLeaseNotFoundException; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/CountResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/CountResponseTests.java index 06912e0c6bb1f..946ebc1089580 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/CountResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/CountResponseTests.java @@ -35,10 +35,10 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.core.common.ParsingException; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.action.RestActions; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/GetSourceResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/GetSourceResponseTests.java index 03d6e6720ffed..0c2b269c7e78d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/GetSourceResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/GetSourceResponseTests.java @@ -33,13 +33,13 @@ package org.opensearch.client.core; import org.opensearch.client.AbstractResponseTestCase; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -61,7 +61,7 @@ static class SourceOnlyResponse implements ToXContentObject { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { // this implementation copied from RestGetSourceAction.RestGetSourceResponseListener::buildResponse try (InputStream stream = source.streamInput()) { - builder.rawValue(stream, XContentHelper.xContentType(source)); + builder.rawValue(stream, MediaTypeRegistry.xContentType(source)); } return builder; } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java index 3951e2b6e33fd..15546e5b4ecbe 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/MainResponseTests.java @@ -36,8 +36,8 @@ import org.opensearch.Version; import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.cluster.ClusterName; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.VersionUtils; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java index 10bb274b1216c..5b6b96fd9144f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/TermVectorsResponseTests.java @@ -35,11 +35,11 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; -import java.util.ArrayList; -import java.util.List; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.List; import static org.opensearch.test.AbstractXContentTestCase.xContentTester; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java index 63e016fea1da7..060caeddd9826 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/tasks/GetTaskResponseTests.java @@ -35,15 +35,15 @@ import org.opensearch.client.Requests; import org.opensearch.client.tasks.GetTaskResponse; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; +import org.opensearch.core.tasks.resourcetracker.TaskThreadUsage; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.RawTaskStatus; -import org.opensearch.tasks.TaskResourceStats; -import org.opensearch.tasks.TaskResourceUsage; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.TaskThreadUsage; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index a973753aa2032..123a51a54788e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -34,7 +34,6 @@ import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.LatchedActionListener; @@ -74,16 +73,19 @@ import org.opensearch.client.core.TermVectorsResponse; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.client.indices.CreateIndexResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; import org.opensearch.index.get.GetResult; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -94,11 +96,9 @@ import org.opensearch.index.reindex.RemoteInfo; import org.opensearch.index.reindex.ScrollableHitSource; import org.opensearch.index.reindex.UpdateByQueryRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.tasks.TaskId; import java.util.Collections; import java.util.Date; @@ -173,7 +173,7 @@ public void testIndex() throws Exception { "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out OpenSearch\"" + "}"; - request.source(jsonString, XContentType.JSON); // <3> + request.source(jsonString, MediaTypeRegistry.JSON); // <3> //end::index-request-string // tag::index-execute @@ -298,15 +298,14 @@ public void testUpdate() throws Exception { Request request = new Request("POST", "/_scripts/increment-field"); request.setJsonEntity( - Strings.toString( - JsonXContent.contentBuilder() - .startObject() - .startObject("script") - .field("lang", "painless") - .field("source", "ctx._source.field += params.count") - .endObject() - .endObject() - ) + JsonXContent.contentBuilder() + .startObject() + .startObject("script") + .field("lang", "painless") + .field("source", "ctx._source.field += params.count") + .endObject() + .endObject() + .toString() ); Response response = client().performRequest(request); assertEquals(RestStatus.OK.getStatus(), response.getStatusLine().getStatusCode()); @@ -381,7 +380,7 @@ public void testUpdate() throws Exception { "\"updated\":\"2017-01-01\"," + "\"reason\":\"daily update\"" + "}"; - request.doc(jsonString, XContentType.JSON); // <1> + request.doc(jsonString, MediaTypeRegistry.JSON); // <1> //end::update-request-with-doc-as-string request.fetchSource(true); // tag::update-execute @@ -525,7 +524,7 @@ public void testUpdate() throws Exception { // end::update-request-detect-noop // tag::update-request-upsert String jsonString = "{\"created\":\"2017-01-01\"}"; - request.upsert(jsonString, XContentType.JSON); // <1> + request.upsert(jsonString, MediaTypeRegistry.JSON); // <1> // end::update-request-upsert // tag::update-request-scripted-upsert request.scriptedUpsert(true); // <1> @@ -699,11 +698,11 @@ public void testBulk() throws Exception { // tag::bulk-request BulkRequest request = new BulkRequest(); // <1> request.add(new IndexRequest("posts").id("1") // <2> - .source(XContentType.JSON,"field", "foo")); + .source(MediaTypeRegistry.JSON,"field", "foo")); request.add(new IndexRequest("posts").id("2") // <3> - .source(XContentType.JSON,"field", "bar")); + .source(MediaTypeRegistry.JSON,"field", "bar")); request.add(new IndexRequest("posts").id("3") // <4> - .source(XContentType.JSON,"field", "baz")); + .source(MediaTypeRegistry.JSON,"field", "baz")); // end::bulk-request // tag::bulk-execute BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); @@ -716,9 +715,9 @@ public void testBulk() throws Exception { BulkRequest request = new BulkRequest(); request.add(new DeleteRequest("posts", "3")); // <1> request.add(new UpdateRequest("posts", "2") // <2> - .doc(XContentType.JSON,"other", "test")); + .doc(MediaTypeRegistry.JSON,"other", "test")); request.add(new IndexRequest("posts").id("4") // <3> - .source(XContentType.JSON,"field", "baz")); + .source(MediaTypeRegistry.JSON,"field", "baz")); // end::bulk-request-with-mixed-operations BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); @@ -1581,13 +1580,13 @@ public void afterBulk(long executionId, BulkRequest request, // tag::bulk-processor-add IndexRequest one = new IndexRequest("posts").id("1") - .source(XContentType.JSON, "title", + .source(MediaTypeRegistry.JSON, "title", "In which order are my OpenSearch queries executed?"); IndexRequest two = new IndexRequest("posts").id("2") - .source(XContentType.JSON, "title", + .source(MediaTypeRegistry.JSON, "title", "Current status and upcoming changes in OpenSearch"); IndexRequest three = new IndexRequest("posts").id("3") - .source(XContentType.JSON, "title", + .source(MediaTypeRegistry.JSON, "title", "The Future of Federated Search in OpenSearch"); bulkProcessor.add(one); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java index f85fcae7af365..17ea5b273d2a2 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java @@ -32,7 +32,6 @@ package org.opensearch.client.documentation; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -63,11 +62,12 @@ import org.opensearch.common.Priority; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import java.io.IOException; import java.util.HashMap; @@ -138,7 +138,7 @@ public void testClusterPutSettings() throws IOException { // tag::put-settings-settings-source request.transientSettings( "{\"indices.recovery.max_bytes_per_sec\": \"10b\"}" - , XContentType.JSON); // <1> + , MediaTypeRegistry.JSON); // <1> // end::put-settings-settings-source } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index 73346bc57646e..ce080b45273b4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -33,7 +33,6 @@ package org.opensearch.client.documentation; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -61,11 +60,10 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.GetAliasesResponse; +import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; import org.opensearch.client.indices.AnalyzeRequest; @@ -77,21 +75,21 @@ import org.opensearch.client.indices.DeleteAliasRequest; import org.opensearch.client.indices.DeleteComposableIndexTemplateRequest; import org.opensearch.client.indices.DetailAnalyzeResponse; +import org.opensearch.client.indices.GetComposableIndexTemplateRequest; +import org.opensearch.client.indices.GetComposableIndexTemplatesResponse; import org.opensearch.client.indices.GetFieldMappingsRequest; import org.opensearch.client.indices.GetFieldMappingsResponse; import org.opensearch.client.indices.GetIndexRequest; import org.opensearch.client.indices.GetIndexResponse; -import org.opensearch.client.indices.GetComposableIndexTemplateRequest; import org.opensearch.client.indices.GetIndexTemplatesRequest; import org.opensearch.client.indices.GetIndexTemplatesResponse; -import org.opensearch.client.indices.GetComposableIndexTemplatesResponse; import org.opensearch.client.indices.GetMappingsRequest; import org.opensearch.client.indices.GetMappingsResponse; import org.opensearch.client.indices.IndexTemplateMetadata; import org.opensearch.client.indices.IndexTemplatesExistRequest; import org.opensearch.client.indices.PutComponentTemplateRequest; -import org.opensearch.client.indices.PutIndexTemplateRequest; import org.opensearch.client.indices.PutComposableIndexTemplateRequest; +import org.opensearch.client.indices.PutIndexTemplateRequest; import org.opensearch.client.indices.PutMappingRequest; import org.opensearch.client.indices.SimulateIndexTemplateRequest; import org.opensearch.client.indices.SimulateIndexTemplateResponse; @@ -104,16 +102,18 @@ import org.opensearch.cluster.metadata.Template; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Arrays; @@ -137,15 +137,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[example] * -------------------------------------------------- - * + * <p> * The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) @@ -329,7 +329,7 @@ public void testCreateIndex() throws IOException { " }\n" + " }\n" + "}", // <2> - XContentType.JSON); + MediaTypeRegistry.JSON); // end::create-index-request-mappings CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); @@ -407,7 +407,7 @@ public void testCreateIndex() throws IOException { " \"aliases\" : {\n" + " \"twitter_alias\" : {}\n" + " }\n" + - "}", XContentType.JSON); // <1> + "}", MediaTypeRegistry.JSON); // <1> // end::create-index-whole-source // tag::create-index-execute @@ -480,7 +480,7 @@ public void testPutMapping() throws IOException { " }\n" + " }\n" + "}", // <1> - XContentType.JSON); + MediaTypeRegistry.JSON); // end::put-mapping-request-source AcknowledgedResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); @@ -585,7 +585,7 @@ public void testGetMapping() throws IOException { CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); PutMappingRequest request = new PutMappingRequest("twitter"); - request.source("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", XContentType.JSON); + request.source("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", MediaTypeRegistry.JSON); AcknowledgedResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -631,7 +631,7 @@ public void testGetMappingAsync() throws Exception { CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); PutMappingRequest request = new PutMappingRequest("twitter"); - request.source("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", XContentType.JSON); + request.source("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", MediaTypeRegistry.JSON); AcknowledgedResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -703,7 +703,7 @@ public void testGetFieldMapping() throws IOException, InterruptedException { + " }\n" + " }\n" + "}", // <1> - XContentType.JSON + MediaTypeRegistry.JSON ); AcknowledgedResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); @@ -1127,7 +1127,8 @@ public void testGetIndex() throws Exception { { Settings settings = Settings.builder().put("number_of_shards", 3).build(); String mappings = "{\"properties\":{\"field-1\":{\"type\":\"integer\"}}}"; - CreateIndexRequest createIndexRequest = new CreateIndexRequest("index").settings(settings).mapping(mappings, XContentType.JSON); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("index").settings(settings) + .mapping(mappings, MediaTypeRegistry.JSON); CreateIndexResponse createIndexResponse = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1830,7 +1831,7 @@ public void testRolloverIndex() throws Exception { // end::rollover-index-request-settings // tag::rollover-index-request-mapping String mappings = "{\"properties\":{\"field-1\":{\"type\":\"keyword\"}}}"; - request.getCreateIndexRequest().mapping(mappings, XContentType.JSON); // <1> + request.getCreateIndexRequest().mapping(mappings, MediaTypeRegistry.JSON); // <1> // end::rollover-index-request-mapping // tag::rollover-index-request-alias request.getCreateIndexRequest().alias(new Alias("another_alias")); // <1> @@ -2009,7 +2010,7 @@ public void testIndexPutSettings() throws Exception { // tag::indices-put-settings-settings-source request.settings( "{\"index.number_of_replicas\": \"2\"}" - , XContentType.JSON); // <1> + , MediaTypeRegistry.JSON); // <1> // end::indices-put-settings-settings-source } @@ -2090,7 +2091,7 @@ public void testPutTemplate() throws Exception { " }\n" + " }\n" + "}", - XContentType.JSON); + MediaTypeRegistry.JSON); // end::put-template-request-mappings-json assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } @@ -2165,7 +2166,7 @@ public void testPutTemplate() throws Exception { " \"alias-1\": {},\n" + " \"{index}-alias\": {}\n" + " }\n" + - "}", XContentType.JSON); // <1> + "}", MediaTypeRegistry.JSON); // <1> // end::put-template-whole-source // tag::put-template-request-create @@ -2220,7 +2221,7 @@ public void testGetTemplates() throws Exception { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("my-template"); putRequest.patterns(Arrays.asList("pattern-1", "log-*")); putRequest.settings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 1)); - putRequest.mapping("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", XContentType.JSON); + putRequest.mapping("{ \"properties\": { \"message\": { \"type\": \"text\" } } }", MediaTypeRegistry.JSON); assertTrue(client.indices().putTemplate(putRequest, RequestOptions.DEFAULT).isAcknowledged()); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index 6c11aff3d292e..28909cf58541a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -32,7 +32,6 @@ package org.opensearch.client.documentation; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; @@ -48,9 +47,10 @@ import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.ingest.PipelineConfiguration; import java.io.IOException; @@ -65,15 +65,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example] * -------------------------------------------------- - * + * <p> * The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) @@ -91,7 +91,7 @@ public void testPutPipeline() throws IOException { PutPipelineRequest request = new PutPipelineRequest( "my-pipeline-id", // <1> new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2> - XContentType.JSON // <3> + MediaTypeRegistry.JSON // <3> ); // end::put-pipeline-request @@ -125,7 +125,7 @@ public void testPutPipelineAsync() throws Exception { PutPipelineRequest request = new PutPipelineRequest( "my-pipeline-id", new BytesArray(source.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); // tag::put-pipeline-execute-listener @@ -314,7 +314,7 @@ public void testSimulatePipeline() throws IOException { "}"; SimulatePipelineRequest request = new SimulatePipelineRequest( new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <1> - XContentType.JSON // <2> + MediaTypeRegistry.JSON // <2> ); // end::simulate-pipeline-request @@ -370,7 +370,7 @@ public void testSimulatePipelineAsync() throws Exception { + "}"; SimulatePipelineRequest request = new SimulatePipelineRequest( new BytesArray(source.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); // tag::simulate-pipeline-execute-listener diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java index 9f5c2e51a7960..4304ef04f6dc4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/QueryDSLDocumentationTests.java @@ -39,10 +39,10 @@ import org.opensearch.common.geo.builders.MultiPointBuilder; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.index.query.GeoShapeQueryBuilder; +import org.opensearch.index.query.RankFeatureQueryBuilders; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; import org.opensearch.join.query.JoinQueryBuilders; -import org.opensearch.index.query.RankFeatureQueryBuilders; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java index 7f7062f0e8a4c..bf0f70304168e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TotalHits; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; @@ -65,13 +64,15 @@ import org.opensearch.client.core.CountResponse; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.client.indices.CreateIndexResponse; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; -import org.opensearch.core.common.text.Text; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.get.GetResult; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -86,7 +87,6 @@ import org.opensearch.index.rankeval.RatedDocument; import org.opensearch.index.rankeval.RatedRequest; import org.opensearch.index.rankeval.RatedSearchHit; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptType; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.MultiSearchTemplateResponse; @@ -319,9 +319,9 @@ public void testSearchRequestAggregations() throws IOException { RestHighLevelClient client = highLevelClient(); { BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts").id("1").source(XContentType.JSON, "company", "OpenSearch", "age", 20)); - request.add(new IndexRequest("posts").id("2").source(XContentType.JSON, "company", "OpenSearch", "age", 30)); - request.add(new IndexRequest("posts").id("3").source(XContentType.JSON, "company", "OpenSearch", "age", 40)); + request.add(new IndexRequest("posts").id("1").source(MediaTypeRegistry.JSON, "company", "OpenSearch", "age", 20)); + request.add(new IndexRequest("posts").id("2").source(MediaTypeRegistry.JSON, "company", "OpenSearch", "age", 30)); + request.add(new IndexRequest("posts").id("3").source(MediaTypeRegistry.JSON, "company", "OpenSearch", "age", 40)); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); @@ -392,10 +392,10 @@ public void testSearchRequestSuggestions() throws IOException { RestHighLevelClient client = highLevelClient(); { BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts").id("1").source(XContentType.JSON, "user", "foobar")); - request.add(new IndexRequest("posts").id("2").source(XContentType.JSON, "user", "quxx")); - request.add(new IndexRequest("posts").id("3").source(XContentType.JSON, "user", "quzz")); - request.add(new IndexRequest("posts").id("4").source(XContentType.JSON, "user", "corge")); + request.add(new IndexRequest("posts").id("1").source(MediaTypeRegistry.JSON, "user", "foobar")); + request.add(new IndexRequest("posts").id("2").source(MediaTypeRegistry.JSON, "user", "quxx")); + request.add(new IndexRequest("posts").id("3").source(MediaTypeRegistry.JSON, "user", "quzz")); + request.add(new IndexRequest("posts").id("4").source(MediaTypeRegistry.JSON, "user", "corge")); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); @@ -438,7 +438,7 @@ public void testSearchRequestHighlighting() throws IOException { request.add( new IndexRequest("posts").id("1") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "In which order are my OpenSearch queries executed?", "user", @@ -450,7 +450,7 @@ public void testSearchRequestHighlighting() throws IOException { request.add( new IndexRequest("posts").id("2") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "Current status and upcoming changes in OpenSearch", "user", @@ -462,7 +462,7 @@ public void testSearchRequestHighlighting() throws IOException { request.add( new IndexRequest("posts").id("3") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "The Future of Federated Search in OpenSearch", "user", @@ -525,7 +525,7 @@ public void testSearchRequestHighlighting() throws IOException { public void testSearchRequestProfiling() throws IOException { RestHighLevelClient client = highLevelClient(); { - IndexRequest request = new IndexRequest("posts").id("1").source(XContentType.JSON, "tags", "opensearch", "comments", 123); + IndexRequest request = new IndexRequest("posts").id("1").source(MediaTypeRegistry.JSON, "tags", "opensearch", "comments", 123); request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); assertSame(RestStatus.CREATED, indexResponse.status()); @@ -597,13 +597,15 @@ public void testScroll() throws Exception { { BulkRequest request = new BulkRequest(); request.add( - new IndexRequest("posts").id("1").source(XContentType.JSON, "title", "In which order are my OpenSearch queries executed?") + new IndexRequest("posts").id("1") + .source(MediaTypeRegistry.JSON, "title", "In which order are my OpenSearch queries executed?") ); request.add( - new IndexRequest("posts").id("2").source(XContentType.JSON, "title", "Current status and upcoming changes in OpenSearch") + new IndexRequest("posts").id("2") + .source(MediaTypeRegistry.JSON, "title", "Current status and upcoming changes in OpenSearch") ); request.add( - new IndexRequest("posts").id("3").source(XContentType.JSON, "title", "The Future of Federated Search in OpenSearch") + new IndexRequest("posts").id("3").source(MediaTypeRegistry.JSON, "title", "The Future of Federated Search in OpenSearch") ); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); @@ -1320,7 +1322,7 @@ private void indexSearchTestData() throws IOException { bulkRequest.add( new IndexRequest("posts").id("1") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "id", 1, "title", @@ -1334,7 +1336,7 @@ private void indexSearchTestData() throws IOException { bulkRequest.add( new IndexRequest("posts").id("2") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "id", 2, "title", @@ -1348,7 +1350,7 @@ private void indexSearchTestData() throws IOException { bulkRequest.add( new IndexRequest("posts").id("3") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "id", 3, "title", @@ -1360,8 +1362,8 @@ private void indexSearchTestData() throws IOException { ) ); - bulkRequest.add(new IndexRequest("authors").id("1").source(XContentType.JSON, "id", 1, "user", "foobar")); - bulkRequest.add(new IndexRequest("contributors").id("1").source(XContentType.JSON, "id", 1, "user", "quuz")); + bulkRequest.add(new IndexRequest("authors").id("1").source(MediaTypeRegistry.JSON, "id", 1, "user", "foobar")); + bulkRequest.add(new IndexRequest("contributors").id("1").source(MediaTypeRegistry.JSON, "id", 1, "user", "quuz")); bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); @@ -1472,7 +1474,7 @@ private static void indexCountTestData() throws IOException { bulkRequest.add( new IndexRequest("blog").id("1") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "Doubling Down on Open?", "user", @@ -1484,7 +1486,7 @@ private static void indexCountTestData() throws IOException { bulkRequest.add( new IndexRequest("blog").id("2") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "XYZ Joins Forces with OpenSearch", "user", @@ -1496,7 +1498,7 @@ private static void indexCountTestData() throws IOException { bulkRequest.add( new IndexRequest("blog").id("3") .source( - XContentType.JSON, + MediaTypeRegistry.JSON, "title", "On Net Neutrality", "user", @@ -1506,7 +1508,7 @@ private static void indexCountTestData() throws IOException { ) ); - bulkRequest.add(new IndexRequest("author").id("1").source(XContentType.JSON, "user", "foobar")); + bulkRequest.add(new IndexRequest("author").id("1").source(MediaTypeRegistry.JSON, "user", "foobar")); bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 976c69910d309..d0015db044843 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -32,7 +32,6 @@ package org.opensearch.client.documentation; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -64,9 +63,10 @@ import org.opensearch.common.Booleans; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -90,15 +90,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example] * -------------------------------------------------- - * + * <p> * The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) @@ -156,7 +156,7 @@ public void testSnapshotCreateRepository() throws IOException { { // tag::create-repository-settings-source request.settings("{\"location\": \".\", \"compress\": \"true\"}", - XContentType.JSON); // <1> + MediaTypeRegistry.JSON); // <1> // end::create-repository-settings-source } @@ -818,7 +818,7 @@ public void onFailure(Exception e) { private void createTestRepositories() throws IOException { PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); - request.settings("{\"location\": \".\"}", XContentType.JSON); + request.settings("{\"location\": \".\"}", MediaTypeRegistry.JSON); assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged()); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index 742dfa69e718b..2e2d15df5392a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -32,7 +32,6 @@ package org.opensearch.client.documentation; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -42,12 +41,13 @@ import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.Script; import org.opensearch.script.StoredScriptSource; @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example] * -------------------------------------------------- - * + * <p> * The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) @@ -88,7 +88,7 @@ public void testGetStoredScript() throws Exception { final StoredScriptSource scriptSource = new StoredScriptSource( "painless", "Math.log(_score * 2) + params.my_modifier", - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) ); putStoredScript("calculate-score", scriptSource); @@ -152,7 +152,7 @@ public void testDeleteStoredScript() throws Exception { final StoredScriptSource scriptSource = new StoredScriptSource( "painless", "Math.log(_score * 2) + params.my_modifier", - Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) ); putStoredScript("calculate-score", scriptSource); @@ -221,7 +221,7 @@ public void testPutScript() throws Exception { "\"source\": \"Math.log(_score * 2) + params.multiplier\"" + "}\n" + "}\n" - ), XContentType.JSON); // <2> + ), MediaTypeRegistry.JSON); // <2> // end::put-stored-script-request // tag::put-stored-script-context @@ -255,7 +255,7 @@ public void testPutScript() throws Exception { builder.endObject(); } builder.endObject(); - request.content(BytesReference.bytes(builder), XContentType.JSON); // <1> + request.content(BytesReference.bytes(builder), MediaTypeRegistry.JSON); // <1> // end::put-stored-script-content-painless // tag::put-stored-script-execute @@ -310,7 +310,7 @@ public void onFailure(Exception e) { builder.endObject(); } builder.endObject(); - request.content(BytesReference.bytes(builder), XContentType.JSON); // <1> + request.content(BytesReference.bytes(builder), MediaTypeRegistry.JSON); // <1> // end::put-stored-script-content-mustache client.putScript(request, RequestOptions.DEFAULT); @@ -322,7 +322,13 @@ public void onFailure(Exception e) { } private void putStoredScript(String id, StoredScriptSource scriptSource) throws IOException { - PutStoredScriptRequest request = new PutStoredScriptRequest(id, "score", new BytesArray("{}"), XContentType.JSON, scriptSource); + PutStoredScriptRequest request = new PutStoredScriptRequest( + id, + "score", + new BytesArray("{}"), + MediaTypeRegistry.JSON, + scriptSource + ); assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync)); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java index 05479e2e3e81c..cbac0b8c97d9c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java @@ -33,7 +33,6 @@ package org.opensearch.client.documentation; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; @@ -45,7 +44,8 @@ import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.client.tasks.CancelTasksResponse; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import java.io.IOException; @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example] * -------------------------------------------------- - * + * <p> * The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/AnalyzeResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/AnalyzeResponseTests.java index 920235d848f66..dadd64f8329cc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/AnalyzeResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/AnalyzeResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.action.admin.indices.analyze.AnalyzeAction; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.RandomObjects; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java index b83bdab899a41..6aafee142bd22 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java @@ -35,16 +35,15 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.transport.ActionNotFoundTransportException; import java.io.IOException; @@ -194,7 +193,7 @@ public final void testBwcFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference bytes = toShuffledXContent(expected, xContentType, getParams(), randomBoolean()); - final XContent xContent = XContentFactory.xContent(xContentType); + final XContent xContent = xContentType.xContent(); final XContentParser parser = xContent.createParser( NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, @@ -215,7 +214,7 @@ public final void testBwcFromXContent() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference bytes = toShuffledXContent(expected, xContentType, getParams(), randomBoolean()); - final XContent xContent = XContentFactory.xContent(xContentType); + final XContent xContent = xContentType.xContent(); final XContentParser parser = xContent.createParser( NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/DataStreamsStatsResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/DataStreamsStatsResponseTests.java index 96db2fb43c161..9418a82fc6107 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/DataStreamsStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/DataStreamsStatsResponseTests.java @@ -34,11 +34,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.datastream.DataStreamsStatsAction; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetDataStreamResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetDataStreamResponseTests.java index 523758c11de6b..3f0d56a3d9455 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetDataStreamResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetDataStreamResponseTests.java @@ -38,9 +38,9 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.common.UUIDs; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java index 374c33389dd3a..a00f0487116dc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java @@ -39,8 +39,8 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.index.mapper.MapperService; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexTemplatesResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexTemplatesResponseTests.java index 6f924898096a0..507dc7802283f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexTemplatesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexTemplatesResponseTests.java @@ -34,18 +34,18 @@ import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; import org.opensearch.test.OpenSearchTestCase; @@ -164,7 +164,7 @@ private Predicate<String> randomFieldsExcludeFilter() { private static void assertEqualInstances(GetIndexTemplatesResponse expectedInstance, GetIndexTemplatesResponse newInstance) { assertEquals(expectedInstance, newInstance); // Check there's no doc types at the root of the mapping - Map<String, Object> expectedMap = XContentHelper.convertToMap(new BytesArray(mappingString), true, XContentType.JSON).v2(); + Map<String, Object> expectedMap = XContentHelper.convertToMap(new BytesArray(mappingString), true, MediaTypeRegistry.JSON).v2(); for (IndexTemplateMetadata template : newInstance.getIndexTemplates()) { MappingMetadata mappingMD = template.mappings(); if (mappingMD != null) { @@ -194,7 +194,7 @@ static GetIndexTemplatesResponse createTestInstance() { templateBuilder.version(between(0, 100)); } if (randomBoolean()) { - Map<String, Object> map = XContentHelper.convertToMap(new BytesArray(mappingString), true, XContentType.JSON).v2(); + Map<String, Object> map = XContentHelper.convertToMap(new BytesArray(mappingString), true, MediaTypeRegistry.JSON).v2(); MappingMetadata mapping = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, map); templateBuilder.mapping(mapping); } @@ -262,7 +262,7 @@ private static AliasMetadata randomAliasMetadata(String name) { } static XContentBuilder randomMapping(String type, XContentType xContentType) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType); builder.startObject().startObject(type); randomMappingFields(builder, true); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java index fe87e0fe6aac9..6d80cdd3ad074 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java index 415ed18ddf4f2..edb4d16c6d992 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java @@ -32,8 +32,8 @@ package org.opensearch.client.indices; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -44,7 +44,7 @@ public class RandomCreateIndexGenerator { /** * Returns a random {@link CreateIndexRequest}. - * + * <p> * Randomizes the index name, the aliases, mappings and settings associated with the * index. When present, the mappings make no mention of types. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/ResizeResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/ResizeResponseTests.java index 1539f500dabc7..919cc89a9ae44 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/ResizeResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/ResizeResponseTests.java @@ -33,8 +33,8 @@ package org.opensearch.client.indices; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverRequestTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverRequestTests.java index 9d51c09ba2e44..b4e0fc8f7f38c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverRequestTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverRequestTests.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.indices.rollover.MaxAgeCondition; import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverResponseTests.java index ff2418c5caee2..14477f07f8b42 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/rollover/RolloverResponseTests.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.indices.rollover.MaxAgeCondition; import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java index e6411b615df07..faf5024d0c173 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java @@ -38,14 +38,14 @@ import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import java.io.IOException; @@ -84,6 +84,10 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns for (int i = 0; i < 4; i++) { boolean cancellable = randomBoolean(); boolean cancelled = cancellable == true ? randomBoolean() : false; + Long cancellationStartTime = null; + if (cancelled) { + cancellationStartTime = randomNonNegativeLong(); + } tasks.add( new org.opensearch.tasks.TaskInfo( new TaskId(NODE_ID, (long) i), @@ -97,7 +101,8 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns cancelled, new TaskId("node1", randomLong()), Collections.singletonMap("x-header-of", "some-value"), - null + null, + cancellationStartTime ) ); } @@ -135,6 +140,7 @@ protected void assertInstances( assertEquals(ti.isCancelled(), taskInfo.isCancelled()); assertEquals(ti.getParentTaskId().getNodeId(), taskInfo.getParentTaskId().getNodeId()); assertEquals(ti.getParentTaskId().getId(), taskInfo.getParentTaskId().getId()); + assertEquals(ti.getCancellationStartTime(), taskInfo.getCancellationStartTime()); FakeTaskStatus status = (FakeTaskStatus) ti.getStatus(); assertEquals(status.code, taskInfo.getStatus().get("code")); assertEquals(status.status, taskInfo.getStatus().get("status")); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/OpenSearchExceptionTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/OpenSearchExceptionTests.java index df621e67ca3b7..fb8e4f731fb19 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/OpenSearchExceptionTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/OpenSearchExceptionTests.java @@ -32,8 +32,8 @@ package org.opensearch.client.tasks; import org.opensearch.client.AbstractResponseTestCase; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 2c437c909fb03..f18df65dfddfa 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -34,13 +34,13 @@ apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' java { - targetCompatibility = JavaVersion.VERSION_11 - sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_1_8 + sourceCompatibility = JavaVersion.VERSION_1_8 } base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client' + archivesName = 'opensearch-rest-client' } dependencies { @@ -109,3 +109,10 @@ thirdPartyAudit.ignoreMissingClasses( 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener' ) + +tasks.withType(JavaCompile) { + // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' + configure(options) { + options.compilerArgs << '-Xlint:-options' + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 9d140a145b004..15905add76c4f 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -33,22 +33,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hc.core5.http.ClassicHttpResponse; -import org.apache.hc.core5.http.ConnectionClosedException; -import org.apache.hc.core5.http.Header; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; -import org.apache.hc.core5.http.HttpHost; -import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.ConnectTimeoutException; import org.apache.hc.client5.http.auth.AuthCache; import org.apache.hc.client5.http.auth.AuthScheme; import org.apache.hc.client5.http.auth.AuthScope; import org.apache.hc.client5.http.auth.Credentials; import org.apache.hc.client5.http.auth.CredentialsProvider; -import org.apache.hc.client5.http.ConnectTimeoutException; -import org.apache.hc.client5.http.ClientProtocolException; -import org.apache.hc.client5.http.entity.GzipDecompressingEntity; -import org.apache.hc.client5.http.config.RequestConfig; import org.apache.hc.client5.http.classic.methods.HttpDelete; import org.apache.hc.client5.http.classic.methods.HttpGet; import org.apache.hc.client5.http.classic.methods.HttpHead; @@ -56,9 +47,22 @@ import org.apache.hc.client5.http.classic.methods.HttpPatch; import org.apache.hc.client5.http.classic.methods.HttpPost; import org.apache.hc.client5.http.classic.methods.HttpPut; -import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; @@ -66,12 +70,9 @@ import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.Args; import org.opensearch.client.http.HttpUriRequestProducer; -import org.apache.hc.core5.concurrent.FutureCallback; -import org.apache.hc.client5.http.impl.auth.BasicScheme; -import org.apache.hc.client5.http.impl.auth.BasicAuthCache; -import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import javax.net.ssl.SSLHandshakeException; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.Closeable; @@ -309,7 +310,7 @@ public boolean isRunning() { * they will be retried). In case of failures all of the alive nodes (or * dead nodes that deserve a retry) are retried until one responds or none * of them does, in which case an {@link IOException} will be thrown. - * + * <p> * This method works by performing an asynchronous call and waiting * for the result. If the asynchronous call throws an exception we wrap * it and rethrow it so that the stack trace attached to the exception @@ -1115,9 +1116,15 @@ public long getContentLength() { if (chunkedEnabled.get()) { return -1L; } else { - long size; + long size = 0; + final byte[] buf = new byte[8192]; + int nread = 0; + try (InputStream is = getContent()) { - size = is.readAllBytes().length; + // read to EOF which may read more or less than buffer size + while ((nread = is.read(buf)) > 0) { + size += nread; + } } catch (IOException ex) { size = -1L; } diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index a01cf2f403099..3e38f9ae95dec 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,22 +32,22 @@ package org.opensearch.client; -import org.apache.hc.core5.function.Factory; -import org.apache.hc.core5.http.Header; -import org.apache.hc.core5.http.nio.ssl.TlsStrategy; -import org.apache.hc.core5.reactor.ssl.TlsDetails; -import org.apache.hc.core5.util.Timeout; import org.apache.hc.client5.http.async.HttpAsyncClient; import org.apache.hc.client5.http.auth.CredentialsProvider; import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; -import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; -import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; -import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.function.Factory; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; +import org.apache.hc.core5.util.Timeout; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java index 9bd17d1c24c7e..ae38c1a0308d1 100644 --- a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java @@ -86,25 +86,29 @@ protected void data(final ByteBuffer src, final boolean endOfStream) throws IOEx return; } + int len = src.limit(); + if (len < 0) { + len = 4096; + } else if (len > bufferLimitBytes) { + throw new ContentTooLongException( + "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" + ); + } + ByteArrayBuffer buffer = bufferRef.get(); if (buffer == null) { - buffer = new ByteArrayBuffer(bufferLimitBytes); + buffer = new ByteArrayBuffer(len); if (bufferRef.compareAndSet(null, buffer) == false) { buffer = bufferRef.get(); } } - int len = src.limit(); if (buffer.length() + len > bufferLimitBytes) { throw new ContentTooLongException( "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" ); } - if (len < 0) { - len = 4096; - } - if (src.hasArray()) { buffer.append(src.array(), src.arrayOffset() + src.position(), src.remaining()); } else { @@ -136,4 +140,12 @@ public void releaseResources() { buffer = null; } } + + /** + * Gets current byte buffer instance + * @return byte buffer instance + */ + ByteArrayBuffer getBuffer() { + return bufferRef.get(); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java index 9722ec867a376..9c12c79cdb1c3 100644 --- a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java @@ -35,8 +35,8 @@ import org.apache.hc.core5.http.ClassicHttpResponse; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.ProtocolVersion; -import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; import java.util.concurrent.atomic.AtomicReference; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 748bec5fb7de5..9efc34d72c341 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -46,11 +46,11 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; -import static org.hamcrest.CoreMatchers.equalTo; public class NodeTests extends RestClientTestCase { public void testToString() { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index f5e1735042e66..0b7cf6e8bb5fe 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -45,6 +45,7 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.TrustManagerFactory; + import java.io.IOException; import java.io.InputStream; import java.net.InetAddress; diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index beee1c5ca21a0..de04dd843b2db 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -48,15 +48,16 @@ import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.hc.core5.http.message.BasicHeader; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.apache.hc.core5.net.URIBuilder; -import org.junit.After; -import org.junit.Before; import org.opensearch.client.http.HttpUriRequestProducer; import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.InputStreamReader; @@ -73,6 +74,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; +import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -298,37 +300,70 @@ public void testRequestResetAndAbort() throws Exception { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future<ClassicHttpResponse> future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); - httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); - httpGet.abort(); + final Phaser phaser = new Phaser(2); + phaser.register(); try { - future.get(); - fail("expected cancellation exception"); - } catch (CancellationException e) { - // expected + Future<ClassicHttpResponse> future = client.execute( + getRequestProducer(httpGet, httpHost), + getResponseConsumer(phaser), + null + ); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + httpGet.abort(); + + try { + phaser.arriveAndDeregister(); + future.get(); + fail("expected cancellation exception"); + } catch (CancellationException e) { + // expected + } + assertTrue(future.isCancelled()); + } finally { + // Forcing termination since the AsyncResponseConsumer may not be reached, + // the request is aborted right before + phaser.forceTermination(); } - assertTrue(future.isCancelled()); } { - httpGet.reset(); - Future<ClassicHttpResponse> future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); - assertFalse(httpGet.isAborted()); - httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); - httpGet.abort(); - assertTrue(httpGet.isAborted()); + final Phaser phaser = new Phaser(2); + phaser.register(); + try { - assertTrue(future.isCancelled()); - future.get(); - throw new AssertionError("exception should have been thrown"); - } catch (CancellationException e) { - // expected + httpGet.reset(); + Future<ClassicHttpResponse> future = client.execute( + getRequestProducer(httpGet, httpHost), + getResponseConsumer(phaser), + null + ); + assertFalse(httpGet.isAborted()); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + httpGet.abort(); + assertTrue(httpGet.isAborted()); + try { + phaser.arriveAndDeregister(); + assertTrue(future.isCancelled()); + future.get(); + throw new AssertionError("exception should have been thrown"); + } catch (CancellationException e) { + // expected + } + } finally { + // Forcing termination since the AsyncResponseConsumer may not be reached, + // the request is aborted right before + phaser.forceTermination(); } } { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future<ClassicHttpResponse> future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); + final Phaser phaser = new Phaser(0); + Future<ClassicHttpResponse> future = client.execute( + getRequestProducer(httpGet, httpHost), + getResponseConsumer(phaser), + null + ); assertFalse(httpGet.isAborted()); assertEquals(200, future.get().getCode()); assertFalse(future.isCancelled()); @@ -554,8 +589,15 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, return esResponse; } - private AsyncResponseConsumer<ClassicHttpResponse> getResponseConsumer() { - return new HeapBufferedAsyncResponseConsumer(1024); + private AsyncResponseConsumer<ClassicHttpResponse> getResponseConsumer(Phaser phaser) { + phaser.register(); + return new HeapBufferedAsyncResponseConsumer(1024) { + @Override + protected ClassicHttpResponse buildResult(HttpResponse response, byte[] entity, ContentType contentType) { + phaser.arriveAndAwaitAdvance(); + return super.buildResult(response, entity, contentType); + } + }; } private HttpUriRequestProducer getRequestProducer(HttpUriRequestBase request, HttpHost host) { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index f46a91aa910f8..1d57fe2c8dcab 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -67,11 +67,12 @@ import org.apache.hc.core5.net.URIBuilder; import org.apache.hc.core5.reactor.IOReactorStatus; import org.apache.hc.core5.util.TimeValue; +import org.opensearch.client.http.HttpUriRequestProducer; import org.junit.After; import org.junit.Before; -import org.opensearch.client.http.HttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; + import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index b2807d35d230e..42c31864e0578 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -89,15 +89,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/RestClientDocumentation.java[example] * -------------------------------------------------- - * + * <p> * Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java new file mode 100644 index 0000000000000..fdfe49ca901c9 --- /dev/null +++ b/client/rest/src/test/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumerTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.opensearch.client.RestClientTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertThrows; + +public class HeapBufferedAsyncEntityConsumerTests extends RestClientTestCase { + private static final int BUFFER_LIMIT = 100 * 1024 * 1024 /* 100Mb */; + private HeapBufferedAsyncEntityConsumer consumer; + + @Before + public void setUp() { + consumer = new HeapBufferedAsyncEntityConsumer(BUFFER_LIMIT); + } + + @After + public void tearDown() { + consumer.releaseResources(); + } + + public void testConsumerAllocatesBufferLimit() throws IOException { + consumer.consume((ByteBuffer) randomByteBufferOfLength(1000).flip()); + assertThat(consumer.getBuffer().capacity(), equalTo(1000)); + } + + public void testConsumerAllocatesEmptyBuffer() throws IOException { + consumer.consume((ByteBuffer) ByteBuffer.allocate(0).flip()); + assertThat(consumer.getBuffer().capacity(), equalTo(0)); + } + + public void testConsumerExpandsBufferLimits() throws IOException { + consumer.consume((ByteBuffer) randomByteBufferOfLength(1000).flip()); + consumer.consume((ByteBuffer) randomByteBufferOfLength(2000).flip()); + consumer.consume((ByteBuffer) randomByteBufferOfLength(3000).flip()); + assertThat(consumer.getBuffer().capacity(), equalTo(6000)); + } + + public void testConsumerAllocatesLimit() throws IOException { + consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT).flip()); + assertThat(consumer.getBuffer().capacity(), equalTo(BUFFER_LIMIT)); + } + + public void testConsumerFailsToAllocateOverLimit() throws IOException { + assertThrows(ContentTooLongException.class, () -> consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT + 1).flip())); + } + + public void testConsumerFailsToExpandOverLimit() throws IOException { + consumer.consume((ByteBuffer) randomByteBufferOfLength(BUFFER_LIMIT).flip()); + assertThrows(ContentTooLongException.class, () -> consumer.consume((ByteBuffer) randomByteBufferOfLength(1).flip())); + } + + private static ByteBuffer randomByteBufferOfLength(int length) { + return ByteBuffer.allocate(length).put(randomBytesOfLength(length)); + } +} diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f645b2dbbc933..4b50a996d1f9f 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -37,7 +37,7 @@ java { base { group = 'org.opensearch.client' - archivesBaseName = 'opensearch-rest-client-sniffer' + archivesName = 'opensearch-rest-client-sniffer' } dependencies { diff --git a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index e6696c1fc4039..1a87e4f7a5b45 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hc.core5.http.HttpEntity; diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java index adddb3bda725c..f609fae4e3c81 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/Sniffer.java @@ -305,6 +305,7 @@ public void shutdown() { } } + @SuppressWarnings("removal") static class SnifferThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index b678fb050e8f8..5520a5acab2d1 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -35,18 +35,21 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; + import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; -import org.apache.hc.client5.http.classic.methods.HttpGet; -import org.apache.hc.core5.http.HttpHost; import org.junit.After; import org.junit.Before; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java index 36923281dde6b..3aef4f6360a3e 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java @@ -32,14 +32,12 @@ package org.opensearch.client.sniff; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; import org.opensearch.client.sniff.Sniffer.DefaultScheduler; import org.opensearch.client.sniff.Sniffer.Scheduler; -import org.apache.hc.core5.http.HttpHost; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.util.ArrayList; @@ -61,6 +59,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.greaterThan; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 8f3e446d8aefb..8a4ca1fb0a136 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -35,8 +35,8 @@ import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; -import org.opensearch.client.sniff.OpenSearchNodesSniffer; import org.opensearch.client.sniff.NodesSniffer; +import org.opensearch.client.sniff.OpenSearchNodesSniffer; import org.opensearch.client.sniff.SniffOnFailureListener; import org.opensearch.client.sniff.Sniffer; @@ -49,15 +49,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + * <p> * Where example is your tag name. - * + * <p> * Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnifferDocumentation.java[example] * -------------------------------------------------- - * + * <p> * Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/client/test/build.gradle b/client/test/build.gradle index f81a009389681..b77865df6decf 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.build' java { - targetCompatibility = JavaVersion.VERSION_11 - sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_1_8 + sourceCompatibility = JavaVersion.VERSION_1_8 } base { @@ -69,3 +69,10 @@ dependenciesInfo.enabled = false //we aren't releasing this jar thirdPartyAudit.enabled = false test.enabled = false + +tasks.withType(JavaCompile) { + // Suppressing '[options] target value 8 is obsolete and will be removed in a future release' + configure(options) { + options.compilerArgs << '-Xlint:-options' + } +} diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index e7137127053e2..161b8008525b4 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -28,9 +28,11 @@ * under the License. */ +import org.opensearch.gradle.JavaPackageType + apply plugin: 'opensearch.internal-distribution-archive-setup' -CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, String architecture, boolean jdk) { +CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, String architecture, JavaPackageType java) { return copySpec { into("opensearch-${version}") { into('lib') { @@ -39,19 +41,23 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla into('config') { dirMode 0750 fileMode 0660 - with configFiles(distributionType, jdk) + with configFiles(distributionType, java) from { dirMode 0750 jvmOptionsDir.getParent() } } into('bin') { - with binFiles(distributionType, jdk) + with binFiles(distributionType, java) } - if (jdk) { + if (java == JavaPackageType.JDK) { into("darwin".equals(platform) ? 'jdk.app' : 'jdk') { with jdkFiles(project, platform, architecture) } + } else if (java == JavaPackageType.JRE) { + into("darwin".equals(platform) ? 'jre.app' : 'jre') { + with jreFiles(project, platform, architecture) + } } into('') { from { @@ -73,7 +79,7 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla rename { 'LICENSE.txt' } } - with noticeFile(jdk) + with noticeFile(java) into('modules') { with modulesFiles } @@ -84,77 +90,84 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla distribution_archives { integTestZip { content { - archiveFiles(transportModulesFiles, 'zip', null, 'x64', false) + archiveFiles(transportModulesFiles, 'zip', null, 'x64', JavaPackageType.NONE) } } darwinTar { archiveClassifier = 'darwin-x64' content { - archiveFiles(modulesFiles('darwin-x64'), 'tar', 'darwin', 'x64', true) + archiveFiles(modulesFiles('darwin-x64'), 'tar', 'darwin', 'x64', JavaPackageType.JDK) } } darwinArm64Tar { archiveClassifier = 'darwin-arm64' content { - archiveFiles(modulesFiles('darwin-arm64'), 'tar', 'darwin', 'arm64', true) + archiveFiles(modulesFiles('darwin-arm64'), 'tar', 'darwin', 'arm64', JavaPackageType.JDK) } } noJdkDarwinTar { archiveClassifier = 'no-jdk-darwin-x64' content { - archiveFiles(modulesFiles('darwin-x64'), 'tar', 'darwin', 'x64', false) + archiveFiles(modulesFiles('darwin-x64'), 'tar', 'darwin', 'x64', JavaPackageType.NONE) } } noJdkDarwinArm64Tar { archiveClassifier = 'no-jdk-darwin-arm64' content { - archiveFiles(modulesFiles('darwin-arm64'), 'tar', 'darwin', 'arm64', false) + archiveFiles(modulesFiles('darwin-arm64'), 'tar', 'darwin', 'arm64', JavaPackageType.NONE) } } freebsdTar { archiveClassifier = 'freebsd-x64' content { - archiveFiles(modulesFiles('freebsd-x64'), 'tar', 'freebsd', 'x64', false) + archiveFiles(modulesFiles('freebsd-x64'), 'tar', 'freebsd', 'x64', JavaPackageType.NONE) } } noJdkFreebsdTar { archiveClassifier = 'no-jdk-freebsd-x64' content { - archiveFiles(modulesFiles('freebsd-x64'), 'tar', 'freebsd', 'x64', false) + archiveFiles(modulesFiles('freebsd-x64'), 'tar', 'freebsd', 'x64', JavaPackageType.NONE) } } linuxArm64Tar { archiveClassifier = 'linux-arm64' content { - archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', true) + archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', JavaPackageType.JDK) } } noJdkLinuxArm64Tar { archiveClassifier = 'no-jdk-linux-arm64' content { - archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', false) + archiveFiles(modulesFiles('linux-arm64'), 'tar', 'linux', 'arm64', JavaPackageType.NONE) } } linuxTar { archiveClassifier = 'linux-x64' content { - archiveFiles(modulesFiles('linux-x64'), 'tar', 'linux', 'x64', true) + archiveFiles(modulesFiles('linux-x64'), 'tar', 'linux', 'x64', JavaPackageType.JDK) } } noJdkLinuxTar { archiveClassifier = 'no-jdk-linux-x64' content { - archiveFiles(modulesFiles('linux-x64'), 'tar', 'linux', 'x64', false) + archiveFiles(modulesFiles('linux-x64'), 'tar', 'linux', 'x64', JavaPackageType.NONE) + } + } + + jreLinuxTar { + archiveClassifier = 'jre-linux-x64' + content { + archiveFiles(modulesFiles('linux-x64'), 'tar', 'linux', 'x64', JavaPackageType.JRE) } } @@ -163,7 +176,7 @@ distribution_archives { linuxS390xTar { archiveClassifier = 'linux-s390x' content { - archiveFiles(modulesFiles('linux-s390x'), 'tar', 'linux', 's390x', false) + archiveFiles(modulesFiles('linux-s390x'), 'tar', 'linux', 's390x', JavaPackageType.NONE) } } @@ -171,28 +184,28 @@ distribution_archives { linuxPpc64leTar { archiveClassifier = 'linux-ppc64le' content { - archiveFiles(modulesFiles('linux-ppc64le'), 'tar', 'linux', 'ppc64le', true) + archiveFiles(modulesFiles('linux-ppc64le'), 'tar', 'linux', 'ppc64le', JavaPackageType.JDK) } } noJdkLinuxPpc64leTar { archiveClassifier = 'no-jdk-linux-ppc64le' content { - archiveFiles(modulesFiles('linux-ppc64le'), 'tar', 'linux', 'ppc64le', false) + archiveFiles(modulesFiles('linux-ppc64le'), 'tar', 'linux', 'ppc64le', JavaPackageType.NONE) } } windowsZip { archiveClassifier = 'windows-x64' content { - archiveFiles(modulesFiles('windows-x64'), 'zip', 'windows', 'x64', true) + archiveFiles(modulesFiles('windows-x64'), 'zip', 'windows', 'x64', JavaPackageType.JDK) } } noJdkWindowsZip { archiveClassifier = 'no-jdk-windows-x64' content { - archiveFiles(modulesFiles('windows-x64'), 'zip', 'windows', 'x64', false) + archiveFiles(modulesFiles('windows-x64'), 'zip', 'windows', 'x64', JavaPackageType.NONE) } } } diff --git a/distribution/archives/darwin-arm64-tar/build.gradle b/distribution/archives/darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/darwin-tar/build.gradle b/distribution/archives/darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/freebsd-tar/build.gradle b/distribution/archives/freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 9418223b0a44d..ffaea5e8ca771 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -38,7 +38,7 @@ apply plugin: 'com.netflix.nebula.maven-publish' base { group = "org.opensearch.distribution.integ-test-zip" - archivesBaseName = "opensearch" + archivesName = "opensearch" } integTest { diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java index 88f667549f3e8..faef1441d0a02 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/JsonLogsFormatAndParseIT.java @@ -51,6 +51,7 @@ protected Matcher<String> nodeNameMatcher() { return is("integTest-0"); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/NodeRestUsageIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/NodeRestUsageIT.java index d397a1c967ad0..59df4122713d5 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/NodeRestUsageIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/NodeRestUsageIT.java @@ -35,8 +35,8 @@ import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; -import org.opensearch.common.Strings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; @@ -172,8 +172,8 @@ public void testAggregationUsage() throws IOException { .aggregation(AggregationBuilders.terms("str_terms").field("str.keyword")) .aggregation(AggregationBuilders.terms("num_terms").field("num")) .aggregation(AggregationBuilders.avg("num_avg").field("num")); - searchRequest.setJsonEntity(Strings.toString(XContentType.JSON, searchSource)); - searchRequest.setJsonEntity(Strings.toString(XContentType.JSON, searchSource)); + searchRequest.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, searchSource)); + searchRequest.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, searchSource)); client().performRequest(searchRequest); searchRequest = new Request("GET", "/test/_search"); @@ -182,8 +182,8 @@ public void testAggregationUsage() throws IOException { .aggregation(AggregationBuilders.avg("num1").field("num")) .aggregation(AggregationBuilders.avg("num2").field("num")) .aggregation(AggregationBuilders.terms("foo").field("foo.keyword")); - String r = Strings.toString(XContentType.JSON, searchSource); - searchRequest.setJsonEntity(Strings.toString(XContentType.JSON, searchSource)); + String r = Strings.toString(MediaTypeRegistry.JSON, searchSource); + searchRequest.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, searchSource)); client().performRequest(searchRequest); Response response = client().performRequest(new Request("GET", "_nodes/usage")); diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java index 07576dacffb03..b98941874ec6c 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java @@ -32,7 +32,7 @@ package org.opensearch.test.rest; -import org.opensearch.action.ActionFuture; +import org.opensearch.common.action.ActionFuture; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Request; import org.opensearch.client.Response; diff --git a/distribution/archives/jre-linux-tar/build.gradle b/distribution/archives/jre-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/jre-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-arm64-tar/build.gradle b/distribution/archives/linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-ppc64le-tar/build.gradle b/distribution/archives/linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-s390x-tar/build.gradle b/distribution/archives/linux-s390x-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-s390x-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/linux-tar/build.gradle b/distribution/archives/linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-darwin-tar/build.gradle b/distribution/archives/no-jdk-darwin-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-darwin-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-freebsd-tar/build.gradle b/distribution/archives/no-jdk-freebsd-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-freebsd-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-arm64-tar/build.gradle b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-arm64-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-ppc64le-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-linux-tar/build.gradle b/distribution/archives/no-jdk-linux-tar/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-linux-tar/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/no-jdk-windows-zip/build.gradle b/distribution/archives/no-jdk-windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/no-jdk-windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/archives/windows-zip/build.gradle b/distribution/archives/windows-zip/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/archives/windows-zip/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/build.gradle b/distribution/build.gradle index 2378ed833ead7..35ca84ca66dba 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -36,6 +36,7 @@ import org.opensearch.gradle.MavenFilteringHack import org.opensearch.gradle.NoticeTask import org.opensearch.gradle.VersionProperties import org.opensearch.gradle.info.BuildParams +import org.opensearch.gradle.JavaPackageType import java.nio.file.Files import java.nio.file.Path @@ -275,13 +276,14 @@ project(':test:external-modules').subprojects.each { Project testModule -> configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { apply plugin: 'opensearch.jdk-download' + apply plugin: 'opensearch.jre-download' apply plugin: 'opensearch.repositories' // Setup all required JDKs project.jdks { ['darwin', 'linux', 'windows'].each { platform -> (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64', 's390x', 'ppc64le'] : ['x64']).each { architecture -> - "bundled_${platform}_${architecture}" { + "bundled_jdk_${platform}_${architecture}" { it.platform = platform it.version = VersionProperties.getBundledJdk(platform, architecture) it.vendor = VersionProperties.bundledJdkVendor @@ -291,6 +293,20 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } + // Setup all required JREs + project.jres { + ['darwin', 'linux', 'windows'].each { platform -> + (platform == 'linux' || platform == 'darwin' ? ['x64', 'aarch64', 's390x', 'ppc64le'] : ['x64']).each { architecture -> + "bundled_jre_${platform}_${architecture}" { + it.platform = platform + it.version = VersionProperties.getBundledJre(platform, architecture) + it.vendor = VersionProperties.bundledJdkVendor + it.architecture = architecture + } + } + } + } + // TODO: the map needs to be an input of the tasks, so that when it changes, the task will re-run... /***************************************************************************** * Properties to expand when copying packaging files * @@ -377,20 +393,20 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from buildTransportModulesTaskProvider } - configFiles = { distributionType, jdk -> + configFiles = { distributionType, java -> copySpec { with copySpec { // main config files, processed with distribution specific substitutions from '../src/config' exclude 'log4j2.properties' // this is handled separately below - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, jdk)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, java)) } from project(':distribution').buildLog4jConfig from project(':distribution').buildConfig } } - binFiles = { distributionType, jdk -> + binFiles = { distributionType, java -> copySpec { // non-windows files, for all distributions with copySpec { @@ -398,7 +414,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude '*.exe' exclude '*.bat' eachFile { it.setMode(0755) } - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, jdk)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, java)) } // windows files, only for zip if (distributionType == 'zip') { @@ -406,7 +422,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from '../src/bin' include '*.bat' filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, jdk)) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, java)) } with copySpec { from '../src/bin' @@ -424,12 +440,12 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - noticeFile = { jdk -> + noticeFile = { java -> copySpec { if (project.name == 'integ-test-zip') { from buildServerNoticeTaskProvider } else { - if (jdk) { + if (java != JavaPackageType.NONE) { from buildNoticeTaskProvider } else { from buildNoJdkNoticeTaskProvider @@ -446,7 +462,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { if ("arm64".equals(architecture)) { architecture = "aarch64" } - from project.jdks."bundled_${platform}_${architecture}" + from project.jdks."bundled_jdk_${platform}_${architecture}" exclude "demo/**" /* * The Contents/MacOS directory interferes with notarization, and is unused by our distribution, so we exclude @@ -465,6 +481,31 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } } + + jreFiles = { Project project, String platform, String architecture -> + return copySpec { + /* + * Jdk uses aarch64 from ARM. Translating from arm64 to aarch64 which Jdk understands. + */ + if ("arm64".equals(architecture)) { + architecture = "aarch64" + } + from project.jres."bundled_jre_${platform}_${architecture}" + exclude "demo/**" + /* + * The Contents/MacOS directory interferes with notarization, and is unused by our distribution, so we exclude + * it from the build. + */ + if ("darwin".equals(platform)) { + exclude "Contents/MacOS" + } + eachFile { FileCopyDetails details -> + if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { + details.mode = 0755 + } + } + } + } } } @@ -583,7 +624,7 @@ subprojects { ], 'opensearch.bundled_jdk': [ - 'def': jdk ? 'true' : 'false' + 'def': jdk != JavaPackageType.NONE ? true : false ], 'license.name': [ diff --git a/distribution/docker/docker-arm64-build-context/build.gradle b/distribution/docker/docker-arm64-build-context/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/docker/docker-arm64-build-context/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-arm64-export/build.gradle b/distribution/docker/docker-arm64-export/build.gradle new file mode 100644 index 0000000000000..62f3dc68b0c8e --- /dev/null +++ b/distribution/docker/docker-arm64-export/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-export/build.gradle b/distribution/docker/docker-export/build.gradle index 3506c4e39c234..62f3dc68b0c8e 100644 --- a/distribution/docker/docker-export/build.gradle +++ b/distribution/docker/docker-export/build.gradle @@ -11,3 +11,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-ppc64le-export/build.gradle b/distribution/docker/docker-ppc64le-export/build.gradle index 820a0cdf69dfc..ae7def32c4d6c 100644 --- a/distribution/docker/docker-ppc64le-export/build.gradle +++ b/distribution/docker/docker-ppc64le-export/build.gradle @@ -10,3 +10,5 @@ // This file is intentionally blank. All configuration of the // export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/docker-s390x-export/build.gradle b/distribution/docker/docker-s390x-export/build.gradle new file mode 100644 index 0000000000000..62f3dc68b0c8e --- /dev/null +++ b/distribution/docker/docker-s390x-export/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// export is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 761478a9fdc6e..8edd6a7354a16 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -34,6 +34,16 @@ logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +appender.search_request_slowlog_json_appender.type = Console +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog + +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.additivity = false + appender.index_search_slowlog_rolling.type = Console appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling appender.index_search_slowlog_rolling.layout.type = OpenSearchJsonLayout diff --git a/distribution/packages/arm64-deb/build.gradle b/distribution/packages/arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-deb/build.gradle b/distribution/packages/arm64-no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-no-jdk-rpm/build.gradle b/distribution/packages/arm64-no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/arm64-rpm/build.gradle b/distribution/packages/arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 7914fcc172ef4..4e85d19986e43 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.4.0" + id "com.netflix.nebula.ospackage-base" version "11.8.1" } void addProcessFilesTask(String type, boolean jdk) { @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/deb/build.gradle b/distribution/packages/deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-deb/build.gradle b/distribution/packages/no-jdk-arm64-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-arm64-rpm/build.gradle b/distribution/packages/no-jdk-arm64-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-arm64-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-deb/build.gradle b/distribution/packages/no-jdk-deb/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-deb/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/no-jdk-rpm/build.gradle b/distribution/packages/no-jdk-rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/no-jdk-rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/rpm/build.gradle b/distribution/packages/rpm/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/distribution/packages/rpm/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/distribution/src/bin/opensearch-env b/distribution/src/bin/opensearch-env index f94824576b122..cd7a0b2b4520b 100644 --- a/distribution/src/bin/opensearch-env +++ b/distribution/src/bin/opensearch-env @@ -46,7 +46,7 @@ fi # now set the classpath OPENSEARCH_CLASSPATH="$OPENSEARCH_HOME/lib/*" -# now set the path to java: OPENSEARCH_JAVA_HOME -> JAVA_HOME -> bundled JDK +# now set the path to java: OPENSEARCH_JAVA_HOME -> JAVA_HOME -> bundled JRE -> bundled JDK if [ ! -z "$OPENSEARCH_JAVA_HOME" ]; then JAVA="$OPENSEARCH_JAVA_HOME/bin/java" JAVA_TYPE="OPENSEARCH_JAVA_HOME" @@ -57,13 +57,18 @@ else if [ $OS = "darwin" ]; then # macOS bundled Java JAVA="$OPENSEARCH_HOME/jdk.app/Contents/Home/bin/java" + JAVA_TYPE="bundled jdk" elif [ $OS = "freebsd" ]; then # using FreeBSD default java from ports if JAVA_HOME is not set JAVA="/usr/local/bin/java" + JAVA_TYPE="bundled jdk" + elif [ -d "$OPENSEARCH_HOME/jre" ]; then + JAVA="$OPENSEARCH_HOME/jre/bin/java" + JAVA_TYPE="bundled jre" else JAVA="$OPENSEARCH_HOME/jdk/bin/java" + JAVA_TYPE="bundled jdk" fi - JAVA_TYPE="bundled jdk" fi if [ ! -x "$JAVA" ]; then diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 952110c6c0289..f0ac98faffda9 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -38,12 +38,12 @@ 8-10:-XX:+UseCMSInitiatingOccupancyOnly ## G1GC Configuration -# NOTE: G1 GC is only supported on JDK version 10 or later -# to use G1GC, uncomment the next two lines and update the version on the -# following three lines to your version of the JDK -# 10:-XX:-UseConcMarkSweepGC -# 10:-XX:-UseCMSInitiatingOccupancyOnly +# NOTE: G1GC is the default GC for all JDKs 11 and newer 11-:-XX:+UseG1GC +# See https://github.com/elastic/elasticsearch/pull/46169 for the history +# behind these settings, but the tl;dr is that default values can lead +# to situations where heap usage grows enough to trigger a circuit breaker +# before GC kicks in. 11-:-XX:G1ReservePercent=25 11-:-XX:InitiatingHeapOccupancyPercent=30 @@ -81,7 +81,7 @@ ${error.file} # JDK 20+ Incubating Vector Module for SIMD optimizations; # disabling may reduce performance on vector optimized lucene -20:--add-modules=jdk.incubator.vector +20-:--add-modules=jdk.incubator.vector # HDFS ForkJoinPool.common() support by SecurityManager -Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index bb27aaf2e22e6..d040afae82e53 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -113,6 +113,47 @@ logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling logger.deprecation.appenderRef.header_warning.ref = header_warning logger.deprecation.additivity = false +######## Search Request Slowlog JSON #################### +appender.search_request_slowlog_json_appender.type = RollingFile +appender.search_request_slowlog_json_appender.name = search_request_slowlog_json_appender +appender.search_request_slowlog_json_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog.json +appender.search_request_slowlog_json_appender.filePermissions = rw-r----- +appender.search_request_slowlog_json_appender.layout.type = OpenSearchJsonLayout +appender.search_request_slowlog_json_appender.layout.type_name = search_request_slowlog +appender.search_request_slowlog_json_appender.layout.opensearchmessagefields=message,took,took_millis,phase_took,total_hits,search_type,shards,source,id + +appender.search_request_slowlog_json_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz +appender.search_request_slowlog_json_appender.policies.type = Policies +appender.search_request_slowlog_json_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_json_appender.policies.size.size = 1GB +appender.search_request_slowlog_json_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_json_appender.strategy.max = 4 +################################################# +######## Search Request Slowlog Log File - old style pattern #### +appender.search_request_slowlog_log_appender.type = RollingFile +appender.search_request_slowlog_log_appender.name = search_request_slowlog_log_appender +appender.search_request_slowlog_log_appender.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog.log +appender.search_request_slowlog_log_appender.filePermissions = rw-r----- +appender.search_request_slowlog_log_appender.layout.type = PatternLayout +appender.search_request_slowlog_log_appender.layout.pattern = [%d{ISO8601}][%-5p][%c{1.}] [%node_name]%marker %m%n + +appender.search_request_slowlog_log_appender.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}\ + _index_search_slowlog-%i.log.gz +appender.search_request_slowlog_log_appender.policies.type = Policies +appender.search_request_slowlog_log_appender.policies.size.type = SizeBasedTriggeringPolicy +appender.search_request_slowlog_log_appender.policies.size.size = 1GB +appender.search_request_slowlog_log_appender.strategy.type = DefaultRolloverStrategy +appender.search_request_slowlog_log_appender.strategy.max = 4 +################################################# +logger.search_request_slowlog_logger.name = cluster.search.request.slowlog +logger.search_request_slowlog_logger.level = trace +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_json_appender.ref = search_request_slowlog_json_appender +logger.search_request_slowlog_logger.appenderRef.search_request_slowlog_log_appender.ref = search_request_slowlog_log_appender +logger.search_request_slowlog_logger.additivity = false + ######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 3c4fe822005e0..10bab9b3fce92 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -92,24 +92,16 @@ ${path.logs} # cluster.remote_store.enabled: true # # Repository to use for segment upload while enforcing remote store for an index -# cluster.remote_store.segment.repository: my-repo-1 +# node.attr.remote_store.segment.repository: my-repo-1 # # Repository to use for translog upload while enforcing remote store for an index -# cluster.remote_store.translog.repository: my-repo-1 +# node.attr.remote_store.translog.repository: my-repo-1 # # ---------------------------------- Experimental Features ----------------------------------- -# # Gates the visibility of the experimental segment replication features until they are production ready. # #opensearch.experimental.feature.segment_replication_experimental.enabled: false # -# -# Gates the visibility of the index setting that allows persisting data to remote store along with local disk. -# Once the feature is ready for production release, this feature flag can be removed. -# -#opensearch.experimental.feature.remote_store.enabled: false -# -# # Gates the functionality of a new parameter to the snapshot restore API # that allows for creation of a new index type that searches a snapshot # directly in a remote repository without restoring all index data to disk @@ -125,7 +117,11 @@ ${path.logs} #opensearch.experimental.feature.extensions.enabled: false # # -# Gates the concurrent segment search feature. This feature enables concurrent segment search in a separate -# index searcher threadpool. +# Gates the optimization of datetime formatters caching along with change in default datetime formatter +# Once there is no observed impact on performance, this feature flag can be removed. +# +#opensearch.experimental.optimization.datetime_formatter_caching.enabled: false +# +# Gates the functionality of enabling Opensearch to use pluggable caches with respective store names via setting. # -#opensearch.experimental.feature.concurrent_segment_search.enabled: false +#opensearch.experimental.feature.pluggable.caching.enabled: false diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java index 05c1cfdc2fb9a..a4ab98ce730ee 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java @@ -32,10 +32,6 @@ package org.opensearch.common.settings; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; - import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.cli.ExitCodes; @@ -45,6 +41,10 @@ import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; + /** * A sub-command for the keystore cli to create a new keystore. */ diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java index 32f03ec3822de..379b61efc5d32 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java @@ -32,14 +32,14 @@ package org.opensearch.common.settings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - import joptsimple.OptionSet; import org.opensearch.cli.Terminal; import org.opensearch.env.Environment; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + /** * A subcommand for the keystore cli to list all settings in the keystore. */ diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java index d06ffb650dc82..e9219de218aef 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java @@ -31,12 +31,12 @@ package org.opensearch.bootstrap; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.settings.KeyStoreCommandTestCase; import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.common.settings.SecureSettings; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java index 11bfc26e2425c..f554f17c50813 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java @@ -32,16 +32,16 @@ package org.opensearch.common.settings; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Map; - import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.UserException; import org.opensearch.env.Environment; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + import static org.hamcrest.Matchers.containsString; public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase { diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java index b87d08bb08399..0ac653d35b07e 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java @@ -32,25 +32,25 @@ package org.opensearch.common.settings; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.FileSystem; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; -import org.opensearch.common.util.io.IOUtils; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.cli.CommandTestCase; import org.opensearch.common.io.PathUtilsForTesting; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.junit.After; import org.junit.Before; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + /** * Base test case for manipulating the OpenSearch keystore. */ diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java index 62d27c4010849..42452d5c12beb 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java @@ -32,13 +32,13 @@ package org.opensearch.common.settings; -import java.util.Map; - import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.UserException; import org.opensearch.env.Environment; +import java.util.Map; + import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index e75267f7c4a74..aee205a24dea3 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -39,7 +39,7 @@ dependencies { } base { - archivesBaseName = 'opensearch-launchers' + archivesName = 'opensearch-launchers' } tasks.withType(CheckForbiddenApis).configureEach { diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index aa3dfbe39ee96..726c381db09f6 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -101,15 +101,15 @@ private static String maybeShowCodeDetailsInExceptionMessages() { } private static String javaLocaleProviders() { - /** - * SPI setting is used to allow loading custom CalendarDataProvider - * in jdk8 it has to be loaded from jre/lib/ext, - * in jdk9+ it is already within ES project and on a classpath - * - * Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - * parsing will break in an incompatible way for some date patterns and locales. - * //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - * See also: documentation in <code>server/org.opensearch.common.time.IsoCalendarDataProvider</code> + /* + SPI setting is used to allow loading custom CalendarDataProvider + in jdk8 it has to be loaded from jre/lib/ext, + in jdk9+ it is already within ES project and on a classpath + + Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date + parsing will break in an incompatible way for some date patterns and locales. + //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 + See also: documentation in <code>server/org.opensearch.common.time.IsoCalendarDataProvider</code> */ return "-Djava.locale.providers=SPI,COMPAT"; } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 5103999428814..f40fb1c4b0a9f 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -31,21 +31,21 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-plugin-cli' + archivesName = 'opensearch-plugin-cli' } dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.3" + api "org.bouncycastle:bc-fips:1.0.2.4" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false } - implementation 'org.apache.commons:commons-compress:1.23.0' + implementation "org.apache.commons:commons-compress:${versions.commonscompress}" } tasks.named("dependencyLicenses").configure { diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 deleted file mode 100644 index c71320050b7de..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da62b32cb72591f5b4d322e6ab0ce7de3247b534 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 new file mode 100644 index 0000000000000..da37449f80d7e --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 @@ -0,0 +1 @@ +9008d04fc13da6455e6a792935b93b629757335d \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 deleted file mode 100644 index 48dba88409c17..0000000000000 --- a/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4af2060ea9b0c8b74f1854c6cafe4d43cfc161fc \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index 66f43b1e30d28..838d6e22a37bd 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -107,7 +107,7 @@ /** * A command for the plugin cli to install a plugin into opensearch. - * + * <p> * The install command takes a plugin id, which may be any of the following: * <ul> * <li>An official opensearch plugin name</li> @@ -411,7 +411,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf /** * Returns {@code true} if the given url exists, and {@code false} otherwise. - * + * <p> * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. */ // pkg private for tests to manipulate @@ -698,7 +698,6 @@ InputStream getPublicKey() { /** * Creates a URL and opens a connection. - * * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. */ // pkg private for tests diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java index d269603656114..9ca42ac5f4ec1 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java @@ -78,15 +78,14 @@ private void printPlugin(Environment env, Terminal terminal, Path plugin, String PluginInfo info = PluginInfo.readFromProperties(env.pluginsDir().resolve(plugin)); terminal.println(Terminal.Verbosity.SILENT, prefix + info.getName()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!PluginsService.isPluginVersionCompatible(info, Version.CURRENT)) { terminal.errorPrintln( "WARNING: plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getVersion() - + " but version " + + info.getOpenSearchVersionRangesString() + + " and is not compatible with " + Version.CURRENT - + " is required" ); } } diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java index 3ea8e539cdd5c..f87b72c7ecb5f 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java @@ -32,10 +32,10 @@ package org.opensearch.plugins; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.cli.Command; import org.opensearch.cli.LoggingAwareMultiCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.util.io.IOUtils; import java.io.IOException; import java.util.Collection; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java index 579f676631a5a..02be3dbc82a44 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java @@ -41,7 +41,7 @@ * The listener is triggered whenever a full percent is increased * The listener is never triggered twice on the same percentage * The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished - * + * <p> * Only used by the InstallPluginCommand, thus package private here */ abstract class ProgressInputStream extends FilterInputStream { diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java index 3d92f0c52ba1c..c264788df20e8 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java @@ -33,6 +33,7 @@ package org.opensearch.plugins; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import org.apache.lucene.tests.util.LuceneTestCase; @@ -63,14 +64,16 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.Tuple; import org.opensearch.common.hash.MessageDigests; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.PosixPermissionsResetter; +import org.opensearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -283,6 +286,35 @@ static void writePlugin(String name, Path structure, String... additionalProps) writeJar(structure.resolve("plugin.jar"), className); } + static void writePlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) throws IOException { + String[] properties = Stream.concat( + Stream.of( + "description", + "fake desc", + "name", + name, + "version", + "1.0", + "dependencies", + "{opensearch:\"" + opensearchVersionRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ), + Arrays.stream(additionalProps) + ).toArray(String[]::new); + PluginTestUtil.writePluginProperties(structure, properties); + String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin"; + writeJar(structure.resolve("plugin.jar"), className); + } + + static Path createPlugin(String name, Path structure, SemverRange opensearchVersionRange, String... additionalProps) + throws IOException { + writePlugin(name, structure, opensearchVersionRange, additionalProps); + return writeZip(structure, null); + } + static void writePluginSecurityPolicy(Path pluginDir, String... permissions) throws IOException { StringBuilder securityPolicyContent = new StringBuilder("grant {\n "); for (String permission : permissions) { @@ -866,6 +898,32 @@ public void testInstallMisspelledOfficialPlugins() throws Exception { assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } + public void testInstallPluginWithCompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + Version.CURRENT.toString())).toUri() + .toURL() + .toString(); + skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()); + assertThat(terminal.getOutput(), containsString("100%")); + } + + public void testInstallPluginWithIncompatibleDependencies() throws Exception { + Tuple<Path, Environment> env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + // Core version is behind plugin version by one w.r.t patch, hence incompatible + Version coreVersion = Version.CURRENT; + Version pluginVersion = VersionUtils.getVersion(coreVersion.major, coreVersion.minor, (byte) (coreVersion.revision + 1)); + String pluginZip = createPlugin("fake", pluginDir, SemverRange.fromString("~" + pluginVersion.toString())).toUri() + .toURL() + .toString(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> skipJarHellCommand.execute(terminal, Collections.singletonList(pluginZip), false, env.v2()) + ); + assertThat(e.getMessage(), containsString("Plugin [fake] was built for OpenSearch version ~" + pluginVersion)); + } + public void testBatchFlag() throws Exception { MockTerminal terminal = new MockTerminal(); installPlugin(terminal, true); diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java index d84f36d818046..6878efce4c804 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java @@ -32,14 +32,6 @@ package org.opensearch.plugins; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Map; -import java.util.stream.Collectors; - import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -52,6 +44,14 @@ import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + @LuceneTestCase.SuppressFileSystems("*") public class ListPluginsCommandTests extends OpenSearchTestCase { @@ -278,7 +278,7 @@ public void testExistingIncompatiblePlugin() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home); - String message = "plugin [fake_plugin1] was built for OpenSearch version 1.0 but version " + Version.CURRENT + " is required"; + String message = "plugin [fake_plugin1] was built for OpenSearch version 5.0.0 and is not compatible with " + Version.CURRENT; assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); assertEquals("WARNING: " + message + "\n", terminal.getErrorOutput()); @@ -286,4 +286,41 @@ public void testExistingIncompatiblePlugin() throws Exception { terminal = listPlugins(home, params); assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); } + + public void testPluginWithDependencies() throws Exception { + PluginTestUtil.writePluginProperties( + env.pluginsDir().resolve("fake_plugin1"), + "description", + "fake desc 1", + "name", + "fake_plugin1", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "org.fake1" + ); + String[] params = { "-v" }; + MockTerminal terminal = listPlugins(home, params); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsDir(), + "fake_plugin1", + "- Plugin information:", + "Name: fake_plugin1", + "Description: fake desc 1", + "Version: 1.0", + "OpenSearch Version: " + Version.CURRENT.toString(), + "Java Version: " + System.getProperty("java.specification.version"), + "Native Controller: false", + "Extended Plugins: []", + " * Classname: org.fake1", + "Folder name: null" + ), + terminal.getOutput() + ); + } } diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index 99824463f14f8..92c043132c021 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -10,7 +10,7 @@ apply plugin: 'opensearch.build' base { - archivesBaseName = 'opensearch-upgrade-cli' + archivesName = 'opensearch-upgrade-cli' } dependencies { diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java index 24f4b79d12528..90067ffd221bf 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java @@ -9,6 +9,7 @@ package org.opensearch.upgrade; import com.fasterxml.jackson.databind.ObjectMapper; + import org.opensearch.Version; import org.opensearch.cli.Terminal; import org.opensearch.common.SuppressForbidden; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java index cd7c525dd393d..6e29bd2d04239 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java @@ -12,9 +12,9 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; import java.io.OutputStream; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java index b7dcbd50cf781..708f644bcdeb6 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java @@ -17,7 +17,7 @@ * An interface for an upgrade task, which in this instance is an unit of * operation that is part of the overall upgrade process. This extends the * {@link java.util.function.Consumer} interface. - * + * <p> * The implementing tasks consume and instance of a tuple of {@link TaskInput} * and {@link Terminal} and operate via side effects. * diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java index 1038e6d4a484f..a1391ba70a8e8 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java @@ -8,7 +8,6 @@ package org.opensearch.upgrade; -import org.junit.Before; import org.opensearch.cli.MockTerminal; import org.opensearch.cli.Terminal; import org.opensearch.common.SuppressForbidden; @@ -17,6 +16,7 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.File; import java.nio.file.Path; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java index 96544d3297ad4..529253c9ce824 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java @@ -10,7 +10,6 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; -import org.junit.Before; import org.opensearch.cli.MockTerminal; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.Tuple; @@ -19,6 +18,7 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.File; import java.io.IOException; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java index f328cc21685ad..be03470b201a1 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java @@ -10,7 +10,6 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; -import org.junit.Before; import org.opensearch.cli.MockTerminal; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.Tuple; @@ -19,6 +18,7 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.File; import java.io.IOException; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java index 6cb6f0b7cf116..46e189a4765d0 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java @@ -8,18 +8,19 @@ package org.opensearch.upgrade; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.junit.Before; -import org.mockito.Mockito; import org.opensearch.cli.MockTerminal; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.mockito.Mockito; import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.spy; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java index ee871e1d89001..a139480c71a2f 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java @@ -10,8 +10,6 @@ import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; -import org.junit.After; -import org.junit.Before; import org.opensearch.cli.Command; import org.opensearch.cli.CommandTestCase; import org.opensearch.common.SuppressForbidden; @@ -21,6 +19,8 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.junit.After; +import org.junit.Before; import java.io.File; import java.io.IOException; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java index 195c57e5b457f..b9a536afb1361 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java @@ -8,7 +8,6 @@ package org.opensearch.upgrade; -import org.junit.Before; import org.opensearch.LegacyESVersion; import org.opensearch.cli.MockTerminal; import org.opensearch.common.collect.Tuple; @@ -17,6 +16,7 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Arrays; import java.util.Map; diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e6122e7baf91a..e1ad55fe4b60b 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -45,7 +45,7 @@ * It isn't recursive, just ignores exactly the elements you tell it. * Has option --missing-method to apply "method" level to selected packages (fix one at a time). * Matches package names exactly: so you'll need to list subpackages separately. - * + * <p> * Note: This by default ignores javadoc validation on overridden methods. */ // Original version of this class is ported from MissingDoclet code in Lucene, diff --git a/docs/build.gradle b/docs/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/docs/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index dfb4ddba24113..3ca6b1fe84ea7 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -13,7 +13,7 @@ repositories { gradlePluginPortal() // TODO: Find the way to use the repositories from RepositoriesSetupPlugin maven { - url = "https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/" + url = "https://ci.opensearch.org/ci/dbc/snapshots/lucene/" } } @@ -37,7 +37,7 @@ tasks.withType(JacocoReport).configureEach { if (System.getProperty("tests.coverage")) { reporting { reports { - testCodeCoverageReport(JacocoCoverageReport) { + testCodeCoverageReport(JacocoCoverageReport) { testType = TestSuiteType.UNIT_TEST } } @@ -45,6 +45,6 @@ if (System.getProperty("tests.coverage")) { // Attach code coverage report task to Gradle check task project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure { - dependsOn tasks.named('testCodeCoverageReport', JacocoReport) + dependsOn tasks.named('testCodeCoverageReport', JacocoReport) } } diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index 93be7b454d650..f3a4bf5cc765b 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -66,6 +66,22 @@ allprojects { target '**/*.java' removeUnusedImports() + importOrder( + 'de.thetaphi', + 'com.carrotsearch', + 'com.fasterxml', + 'com.avast', + 'com.sun', + 'com.maxmind|com.github|com.networknt|groovy|nebula', + 'org.antlr', + 'software.amazon', + 'com.azure|com.microsoft|com.ibm|com.google|joptsimple|org.apache|org.bouncycastle|org.codehaus|org.opensearch|org.objectweb|org.joda|org.hamcrest|org.openjdk|org.gradle|org.junit', + 'javax', + 'java', + '', + '\\#java|\\#org.opensearch|\\#org.hamcrest|\\#' + ) + eclipse().configFile rootProject.file('buildSrc/formatterConfig.xml') trimTrailingWhitespace() endWithNewline() @@ -83,7 +99,9 @@ allprojects { } } format 'misc', { - target '*.md', '*.gradle', '**/*.yaml', '**/*.yml', '**/*.svg' + target '*.md', '*.gradle', '**/*.json', '**/*.yaml', '**/*.yml', '**/*.svg' + + targetExclude '**/simple-bulk11.json', '**/simple-msearch5.json' trimTrailingWhitespace() endWithNewline() diff --git a/gradle/ide.gradle b/gradle/ide.gradle index bc442a081adf0..14d6b2982ccd0 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -82,6 +82,9 @@ if (System.getProperty('idea.active') == 'true') { runConfigurations { defaults(JUnit) { vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT' + if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { + vmParameters += ' -Djava.security.manager=allow' + } } } copyright { diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 03addc1ba3616..e9a6d798b8323 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -147,6 +147,7 @@ configure([ project(":plugins:repository-s3"), project(":plugins:store-smb"), project(":plugins:transport-nio"), + project(":plugins:crypto-kms"), project(":qa:die-with-dignity"), project(":qa:wildfly"), project(":test:external-modules:test-delayed-aggs"), @@ -166,6 +167,7 @@ configure([ configure([ project(":libs:opensearch-common"), project(":libs:opensearch-core"), + project(":libs:opensearch-compress"), project(":plugins:events-correlation-engine"), project(":server") ]) { diff --git a/gradle/run.gradle b/gradle/run.gradle index 639479e97d28f..34651f1d94964 100644 --- a/gradle/run.gradle +++ b/gradle/run.gradle @@ -39,6 +39,12 @@ testClusters { testDistribution = 'archive' if (numZones > 1) numberOfZones = numZones if (numNodes > 1) numberOfNodes = numNodes + if (findProperty("installedPlugins")) { + installedPlugins = Eval.me(installedPlugins) + for (String p : installedPlugins) { + plugin('plugins:'.concat(p)) + } + } } } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 033e24c4cdf41..d64cd4917707c 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index e10ceefe2a012..82a4add334a7d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.2.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=7c3ad722e9b0ce8205b91560fd6ce8296ac3eadf065672242fd73c06b8eeb6ee +distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d diff --git a/gradlew b/gradlew index fcb6fca147c0c..1aa94a4269074 100755 --- a/gradlew +++ b/gradlew @@ -83,7 +83,8 @@ done # This is normally unused # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -144,7 +145,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac @@ -152,7 +153,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then '' | soft) :;; #( *) # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -201,11 +202,11 @@ fi # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ diff --git a/libs/cli/src/main/java/org/opensearch/cli/Command.java b/libs/cli/src/main/java/org/opensearch/cli/Command.java index eed5c4ba4ee6f..cc9230bdb2282 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Command.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Command.java @@ -162,7 +162,7 @@ protected static void exit(int status) { /** * Executes this command. - * + * <p> * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; diff --git a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java index c705177b0d7b6..90efc89a08caf 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java +++ b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java @@ -36,20 +36,34 @@ * POSIX exit codes. */ public class ExitCodes { + /** No error */ public static final int OK = 0; - public static final int USAGE = 64; /* command line usage error */ - public static final int DATA_ERROR = 65; /* data format error */ - public static final int NO_INPUT = 66; /* cannot open input */ - public static final int NO_USER = 67; /* addressee unknown */ - public static final int NO_HOST = 68; /* host name unknown */ - public static final int UNAVAILABLE = 69; /* service unavailable */ - public static final int CODE_ERROR = 70; /* internal software error */ - public static final int CANT_CREATE = 73; /* can't create (user) output file */ - public static final int IO_ERROR = 74; /* input/output error */ - public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ - public static final int PROTOCOL = 76; /* remote error in protocol */ - public static final int NOPERM = 77; /* permission denied */ - public static final int CONFIG = 78; /* configuration error */ + /** command line usage error */ + public static final int USAGE = 64; + /** data format error */ + public static final int DATA_ERROR = 65; + /** cannot open input */ + public static final int NO_INPUT = 66; + /** addressee unknown */ + public static final int NO_USER = 67; + /** host name unknown */ + public static final int NO_HOST = 68; + /** service unavailable */ + public static final int UNAVAILABLE = 69; + /** internal software error */ + public static final int CODE_ERROR = 70; + /** can't create (user) output file */ + public static final int CANT_CREATE = 73; + /** input/output error */ + public static final int IO_ERROR = 74; + /** temp failure; user is invited to retry */ + public static final int TEMP_FAILURE = 75; + /** remote error in protocol */ + public static final int PROTOCOL = 76; + /** permission denied */ + public static final int NOPERM = 77; + /** configuration error */ + public static final int CONFIG = 78; private ExitCodes() { /* no instance, just constants */ } } diff --git a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java index 657b95fa052ab..fb1097178e5a3 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java @@ -44,13 +44,15 @@ /** * A Terminal wraps access to reading input and writing output for a cli. - * + * <p> * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line * of text. Printing is also gated by the {@link Verbosity} of the terminal, * which allows {@link #println(Verbosity,String)} calls which act like a logger, * only actually printing if the verbosity level of the terminal is above * the verbosity of the message. + * @see ConsoleTerminal + * @see SystemTerminal */ public abstract class Terminal { @@ -65,35 +67,57 @@ private static PrintWriter newErrorWriter() { return new PrintWriter(System.err); } - /** Defines the available verbosity levels of messages to be printed. */ + /** Defines the available verbosity levels of messages to be printed.*/ public enum Verbosity { - SILENT, /* always printed */ - NORMAL, /* printed when no options are given to cli */ - VERBOSE /* printed only when cli is passed verbose option */ + /** always printed */ + SILENT, + /** printed when no options are given to cli */ + NORMAL, + /** printed only when cli is passed verbose option */ + VERBOSE } /** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */ private Verbosity verbosity = Verbosity.NORMAL; - /** The newline used when calling println. */ + /** The newline separator used when calling println. */ private final String lineSeparator; + /** Constructs a new terminal with the given line separator. + * @param lineSeparator the line separator to use when calling println + * */ protected Terminal(String lineSeparator) { this.lineSeparator = lineSeparator; } - /** Sets the verbosity of the terminal. */ + /** Sets the {@link Terminal#verbosity} of the terminal. (Default is {@link Verbosity#NORMAL}) + * @param verbosity the {@link Verbosity} level that will be used for printing + * */ public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } - /** Reads clear text from the terminal input. See {@link Console#readLine()}. */ + /** Reads clear text from the terminal input. + * @see Console#readLine() + * @param prompt message to display to the user + * @return the text entered by the user + * */ public abstract String readText(String prompt); - /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */ + /** Reads secret text from the terminal input with echoing disabled. + * @see Console#readPassword() + * @param prompt message to display to the user + * @return the secret as a character array + * */ public abstract char[] readSecret(String prompt); - /** Read password text form terminal input up to a maximum length. */ + /** Read secret text from terminal input with echoing disabled, up to a maximum length. + * @see Console#readPassword() + * @param prompt message to display to the user + * @param maxLength the maximum length of the secret + * @return the secret as a character array + * @throws IllegalStateException if the secret exceeds the maximum length + * */ public char[] readSecret(String prompt, int maxLength) { char[] result = readSecret(prompt); if (result.length > maxLength) { @@ -103,30 +127,48 @@ public char[] readSecret(String prompt, int maxLength) { return result; } - /** Returns a Writer which can be used to write to the terminal directly using standard output. */ + /** Returns a Writer which can be used to write to the terminal directly using standard output. + * @return a writer to {@link Terminal#DEFAULT} output + * @see Terminal.ConsoleTerminal + * @see Terminal.SystemTerminal + * */ public abstract PrintWriter getWriter(); - /** Returns a Writer which can be used to write to the terminal directly using standard error. */ + /** Returns a Writer which can be used to write to the terminal directly using standard error. + * @return a writer to stderr + * */ public PrintWriter getErrorWriter() { return ERROR_WRITER; } - /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void println(String msg) { println(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal at {@code verbosity} level. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void println(Verbosity verbosity, String msg) { print(verbosity, msg + lineSeparator); } - /** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void print(Verbosity verbosity, String msg) { print(verbosity, msg, false); } - /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + /** Prints message to either standard or error output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * @param isError if true, prints to standard error instead of standard output + * */ private void print(Verbosity verbosity, String msg, boolean isError) { if (isPrintable(verbosity)) { PrintWriter writer = isError ? getErrorWriter() : getWriter(); @@ -135,29 +177,44 @@ private void print(Verbosity verbosity, String msg, boolean isError) { } } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrint(Verbosity verbosity, String msg) { print(verbosity, msg, true); } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void errorPrintln(String msg) { errorPrintln(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal's standard error at {@code verbosity} level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrintln(Verbosity verbosity, String msg) { errorPrint(verbosity, msg + lineSeparator); } - /** Checks if is enough {@code verbosity} level to be printed */ + /** Checks if given {@link Verbosity} level is high enough to be printed at the level defined by {@link Terminal#verbosity} + * @param verbosity the {@link Verbosity} level to check + * @return true if the {@link Verbosity} level is high enough to be printed + * @see Terminal#setVerbosity(Verbosity) + * */ public final boolean isPrintable(Verbosity verbosity) { return this.verbosity.ordinal() >= verbosity.ordinal(); } /** - * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n' + * Prompt for a yes or no answer from the user. This method will loop until 'y', 'n' * (or the default empty value) is entered. + * @param prompt the prompt to display to the user + * @param defaultYes if true, the default answer is 'y', otherwise it is 'n' + * @return true if the user answered 'y', false if the user answered 'n' or the defaultYes value if the user entered nothing */ public final boolean promptYesNo(String prompt, boolean defaultYes) { String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]"; @@ -181,6 +238,11 @@ public final boolean promptYesNo(String prompt, boolean defaultYes) { * character is immediately preceded by a carriage return, we have * a Windows-style newline, so we discard the carriage return as well * as the newline. + * @param reader the reader to read from + * @param maxLength the maximum length of the line to read + * @return the line read from the reader + * @throws RuntimeException if the line read exceeds the maximum length + * @throws RuntimeException if an IOException occurs while reading */ public static char[] readLineToCharArray(Reader reader, int maxLength) { char[] buf = new char[maxLength + 2]; @@ -215,6 +277,7 @@ public static char[] readLineToCharArray(Reader reader, int maxLength) { } } + /** Flushes the terminal's standard output and standard error. */ public void flush() { this.getWriter().flush(); this.getErrorWriter().flush(); diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 973fe30d09842..60bf488833393 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -14,7 +14,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-common' + archivesName = 'opensearch-common' } dependencies { @@ -43,3 +43,64 @@ tasks.named('forbiddenApisMain').configure { // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server replaceSignatureFiles 'jdk-signatures' } + +// Add support for incubator modules on supported Java versions. +if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { + sourceSets { + java20 { + java { + srcDirs = ['src/main/java20'] + } + } + } + + configurations { + java20Implementation.extendsFrom(implementation) + } + + dependencies { + java20Implementation sourceSets.main.output + } + + compileJava20Java { + targetCompatibility = JavaVersion.VERSION_20 + options.compilerArgs += ['--add-modules', 'jdk.incubator.vector'] + options.compilerArgs -= '-Werror' // use of incubator modules is reported as a warning + } + + jar { + metaInf { + into 'versions/20' + from sourceSets.java20.output + } + manifest.attributes('Multi-Release': 'true') + } + + tasks.withType(Test).configureEach { + // Relying on the convention for Test.classpath in custom Test tasks has been deprecated + // and scheduled to be removed in Gradle 9.0. Below lines are added from the migration guide: + // https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#test_task_default_classpath + testClassesDirs = testing.suites.test.sources.output.classesDirs + classpath = testing.suites.test.sources.runtimeClasspath + + // Adds the multi-release JAR to the classpath when executing tests. + // This allows newer sources to be picked up at test runtime (if supported). + classpath += files(jar.archiveFile) + // Removes the "main" sources from the classpath to avoid JarHell problems as + // the multi-release JAR already contains those classes. + classpath -= sourceSets.main.output + } + + tasks.register('roundableSimdTest', Test) { + group 'verification' + include '**/RoundableTests.class' + systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced' + } + + check.dependsOn(roundableSimdTest) + + forbiddenApisJava20 { + failOnMissingClasses = false + ignoreSignaturesOfMissingClasses = true + } +} diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java index c4ba778e7db86..fc5e364241d12 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java @@ -104,7 +104,7 @@ public static void checkJarHell(Consumer<String> output) throws IOException, URI /** * Parses the classpath into an array of URLs - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ public static Set<URL> parseClassPath() { @@ -114,7 +114,7 @@ public static Set<URL> parseClassPath() { /** * Parses the classpath into a set of URLs. For testing. * @param classPath classpath to parse (typically the system property {@code java.class.path}) - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ @SuppressForbidden(reason = "resolves against CWD because that is how classpaths work") diff --git a/libs/common/src/main/java/org/opensearch/common/Booleans.java b/libs/common/src/main/java/org/opensearch/common/Booleans.java index 2ca061820b2eb..ab7ad37e92612 100644 --- a/libs/common/src/main/java/org/opensearch/common/Booleans.java +++ b/libs/common/src/main/java/org/opensearch/common/Booleans.java @@ -45,30 +45,72 @@ private Booleans() { /** * Parses a char[] representation of a boolean value to <code>boolean</code>. * - * @return <code>true</code> iff the sequence of chars is "true", <code>false</code> iff the sequence of chars is "false" or the - * provided default value iff either text is <code>null</code> or length == 0. + * @return <code>true</code> iff the sequence of chars is "true", <code>false</code> iff the sequence of + * chars is "false" or the provided default value iff either text is <code>null</code> or length == 0. * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { + if (text == null) { return defaultValue; - } else { - return parseBoolean(new String(text, offset, length)); } + + switch (length) { + case 0: + return defaultValue; + case 1: + case 2: + case 3: + default: + break; + case 4: + if (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e') { + return true; + } + break; + case 5: + if (text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e') { + return false; + } + break; + } + + throw new IllegalArgumentException( + "Failed to parse value [" + new String(text, offset, length) + "] as only [true] or [false] are allowed." + ); } /** - * returns true iff the sequence of chars is one of "true","false". + * Returns true iff the sequence of chars is one of "true", "false". * * @param text sequence to check * @param offset offset to start * @param length length to check */ public static boolean isBoolean(char[] text, int offset, int length) { - if (text == null || length == 0) { + if (text == null) { return false; } - return isBoolean(new String(text, offset, length)); + + switch (length) { + case 0: + case 1: + case 2: + case 3: + default: + return false; + case 4: + return text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'; + case 5: + return text[offset] == 'f' + && text[offset + 1] == 'a' + && text[offset + 2] == 'l' + && text[offset + 3] == 's' + && text[offset + 4] == 'e'; + } } public static boolean isBoolean(String value) { @@ -91,63 +133,45 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } - private static boolean hasText(CharSequence str) { - if (str == null || str.length() == 0) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (!Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - /** + * Parses a string representation of a boolean value to <code>boolean</code>. + * Note the subtle difference between this and {@link #parseBoolean(char[], int, int, boolean)}; this returns the + * default value even when the value is non-zero length containing all whitespaces (possibly overlooked, but + * preserving this behavior for compatibility reasons). Use {@link #parseBooleanStrict(String, boolean)} instead. * * @param value text to parse. - * @param defaultValue The default value to return if the provided value is <code>null</code>. + * @param defaultValue The default value to return if the provided value is <code>null</code> or blank. * @return see {@link #parseBoolean(String)} */ + @Deprecated public static boolean parseBoolean(String value, boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); - } - return defaultValue; - } - - public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (hasText(value)) { - return parseBoolean(value); + if (value == null || value.isBlank()) { + return defaultValue; } - return defaultValue; + return parseBoolean(value); } - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead. - */ @Deprecated - public static Boolean parseBooleanLenient(String value, Boolean defaultValue) { - if (value == null) { // only for the null case we do that here! + public static Boolean parseBoolean(String value, Boolean defaultValue) { + if (value == null || value.isBlank()) { return defaultValue; } - return parseBooleanLenient(value, false); + return parseBoolean(value); } /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. + * Parses a string representation of a boolean value to <code>boolean</code>. + * Analogous to {@link #parseBoolean(char[], int, int, boolean)}. * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead. + * @return <code>true</code> iff the sequence of chars is "true", <code>false</code> iff the sequence of + * chars is "false", or the provided default value iff either text is <code>null</code> or length == 0. + * @throws IllegalArgumentException if the string cannot be parsed to boolean. */ - @Deprecated - public static boolean parseBooleanLenient(String value, boolean defaultValue) { - if (value == null) { + public static boolean parseBooleanStrict(String value, boolean defaultValue) { + if (value == null || value.length() == 0) { return defaultValue; } - return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no")); + return parseBoolean(value); } /** @@ -163,71 +187,4 @@ public static boolean isFalse(String value) { public static boolean isTrue(String value) { return "true".equals(value); } - - /** - * Returns {@code false} if text is in "false", "0", "off", "no"; else, {@code true}. - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead - */ - @Deprecated - public static boolean parseBooleanLenient(char[] text, int offset, int length, boolean defaultValue) { - if (text == null || length == 0) { - return defaultValue; - } - if (length == 1) { - return text[offset] != '0'; - } - if (length == 2) { - return !(text[offset] == 'n' && text[offset + 1] == 'o'); - } - if (length == 3) { - return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f'); - } - if (length == 5) { - return !(text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return true; - } - - /** - * returns true if the a sequence of chars is one of "true","false","on","off","yes","no","0","1" - * - * @param text sequence to check - * @param offset offset to start - * @param length length to check - * - * @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #isBoolean(char[], int, int)} instead. - */ - @Deprecated - public static boolean isBooleanLenient(char[] text, int offset, int length) { - if (text == null || length == 0) { - return false; - } - if (length == 1) { - return text[offset] == '0' || text[offset] == '1'; - } - if (length == 2) { - return (text[offset] == 'n' && text[offset + 1] == 'o') || (text[offset] == 'o' && text[offset + 1] == 'n'); - } - if (length == 3) { - return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') - || (text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's'); - } - if (length == 4) { - return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e'); - } - if (length == 5) { - return (text[offset] == 'f' - && text[offset + 1] == 'a' - && text[offset + 2] == 'l' - && text[offset + 3] == 's' - && text[offset + 4] == 'e'); - } - return false; - } - } diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java index 50c15bb7a95a8..c2ef08e288346 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedBiConsumer.java @@ -32,13 +32,16 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.BiConsumer; /** * A {@link BiConsumer}-like interface which allows throwing checked exceptions. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedBiConsumer<T, U, E extends Exception> { void accept(T t, U u) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java b/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java index dede06d0e207d..07b4973c3a340 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Consumer; /** @@ -39,6 +41,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedConsumer<T, E extends Exception> { void accept(T t) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java index 9c17ad4b4ee3f..927edd1b9905a 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Function; /** @@ -39,6 +41,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedFunction<T, R, E extends Exception> { R apply(T t) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index 66e079c461e75..da44c6fd4dcef 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -32,19 +32,22 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** * Holds a value that is either: * a) set implicitly e.g. through some default value * b) set explicitly e.g. from a user selection - * + * <p> * When merging conflicting configuration settings such as * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Explicit<T> { private final T value; diff --git a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java index 7e89641927ed5..eb7b331c9aa24 100644 --- a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java +++ b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java @@ -514,7 +514,7 @@ public boolean anyMoveBackToPreviousDay() { * Builds an array that can be {@link Arrays#binarySearch(long[], long)}ed * for the daylight savings time transitions. * - * @openearch.internal + * @opensearch.internal */ private static class TransitionArrayLookup extends AbstractManyTransitionsLookup { private final LocalTimeOffset[] offsets; diff --git a/libs/common/src/main/java/org/opensearch/common/Nullable.java b/libs/common/src/main/java/org/opensearch/common/Nullable.java index 804b339449147..70db2a3755eba 100644 --- a/libs/common/src/main/java/org/opensearch/common/Nullable.java +++ b/libs/common/src/main/java/org/opensearch/common/Nullable.java @@ -32,8 +32,11 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import javax.annotation.CheckForNull; import javax.annotation.meta.TypeQualifierNickname; + import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; @@ -52,5 +55,6 @@ @CheckForNull @Retention(RetentionPolicy.RUNTIME) @Target({ ElementType.PARAMETER, ElementType.FIELD, ElementType.METHOD }) +@PublicApi(since = "1.0.0") public @interface Nullable { } diff --git a/libs/common/src/main/java/org/opensearch/common/Numbers.java b/libs/common/src/main/java/org/opensearch/common/Numbers.java index 084e52a41f8b1..d5a364a4a934e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Numbers.java +++ b/libs/common/src/main/java/org/opensearch/common/Numbers.java @@ -260,4 +260,12 @@ public static double unsignedLongToDouble(long value) { // want to replace that with 1 in the shifted value for correct rounding. return (double) ((value >>> 1) | (value & 1)) * 2.0; } + + /** + * Return the strictly greater next power of two for the given value. + * For zero and negative numbers, it returns 1. + */ + public static long nextPowerOfTwo(long value) { + return 1L << (Long.SIZE - Long.numberOfLeadingZeros(value)); + } } diff --git a/libs/common/src/main/java/org/opensearch/common/SetOnce.java b/libs/common/src/main/java/org/opensearch/common/SetOnce.java index a596b5fcdb61d..778926ce108b7 100644 --- a/libs/common/src/main/java/org/opensearch/common/SetOnce.java +++ b/libs/common/src/main/java/org/opensearch/common/SetOnce.java @@ -35,7 +35,7 @@ * A convenient class which offers a semi-immutable object wrapper implementation which allows one * to set the value of an object exactly once, and retrieve it many times. If {@link #set(Object)} * is called more than once, {@link AlreadySetException} is thrown and the operation will fail. - * + * <p> * This is borrowed from lucene's experimental API. It is not reused to eliminate the dependency * on lucene core for such a simple (standalone) utility class that may change beyond OpenSearch needs. * diff --git a/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java b/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java index 1f1b28bcf6759..c479d7bd98e8a 100644 --- a/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java +++ b/libs/common/src/main/java/org/opensearch/common/SuppressForbidden.java @@ -31,6 +31,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -43,6 +45,7 @@ */ @Retention(RetentionPolicy.CLASS) @Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +@PublicApi(since = "1.0.0") public @interface SuppressForbidden { String reason(); } diff --git a/libs/common/src/main/java/org/opensearch/common/TriFunction.java b/libs/common/src/main/java/org/opensearch/common/TriFunction.java index 7b1bbece68680..8594e8e2cd0c9 100644 --- a/libs/common/src/main/java/org/opensearch/common/TriFunction.java +++ b/libs/common/src/main/java/org/opensearch/common/TriFunction.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + /** * Represents a function that accepts three arguments and produces a result. * @@ -40,8 +42,9 @@ * @param <U> the type of the third argument * @param <R> the return type * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface TriFunction<S, T, U, R> { /** diff --git a/server/src/main/java/org/opensearch/action/ActionFuture.java b/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java similarity index 96% rename from server/src/main/java/org/opensearch/action/ActionFuture.java rename to libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java index d796180eda021..7f9dd096667e9 100644 --- a/server/src/main/java/org/opensearch/action/ActionFuture.java +++ b/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.common.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.concurrent.Future; @@ -42,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ActionFuture<T> extends Future<T> { /** diff --git a/libs/common/src/main/java/org/opensearch/common/action/package-info.java b/libs/common/src/main/java/org/opensearch/common/action/package-info.java new file mode 100644 index 0000000000000..4ed2687c80cc9 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/action/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** foundation action classes used across the code base */ +package org.opensearch.common.action; diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/DeprecatedApi.java b/libs/common/src/main/java/org/opensearch/common/annotation/DeprecatedApi.java new file mode 100644 index 0000000000000..964380e1a26ce --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/DeprecatedApi.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Marks the public APIs as deprecated and scheduled for removal in one of the upcoming + * major releases. The types marked with this annotations could only be other {@link PublicApi}s. + * + * @opensearch.api + */ +@Documented +@Target({ + ElementType.TYPE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +@PublicApi(since = "2.10.0") +public @interface DeprecatedApi { + /** + * Version since this API is deprecated + */ + String since(); + + /** + * Next major version when this API is scheduled for removal + */ + String forRemoval() default ""; +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/ExperimentalApi.java b/libs/common/src/main/java/org/opensearch/common/annotation/ExperimentalApi.java new file mode 100644 index 0000000000000..001ffd6eb720a --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/ExperimentalApi.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Experimental APIs that may not retain source and binary compatibility within major, + * minor or patch releases. The types marked with this annotations could only expose + * other {@link PublicApi} or {@link ExperimentalApi} types as public members. + * + * @opensearch.api + */ +@Documented +@Target({ + ElementType.TYPE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +@PublicApi(since = "2.10.0") +public @interface ExperimentalApi { + +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/InternalApi.java b/libs/common/src/main/java/org/opensearch/common/annotation/InternalApi.java new file mode 100644 index 0000000000000..ae58c49e58c5e --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/InternalApi.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Internal APIs that have no compatibility guarantees and should be not used outside + * of OpenSearch core components. + * + * @opensearch.api + */ +@Documented +@Target({ + ElementType.TYPE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +@PublicApi(since = "2.10.0") +public @interface InternalApi { + +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/PublicApi.java b/libs/common/src/main/java/org/opensearch/common/annotation/PublicApi.java new file mode 100644 index 0000000000000..33862446d6442 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/PublicApi.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +/** + * Stable public APIs that retain source and binary compatibility within a major release. + * These interfaces can change from one major release to another major release + * (e.g. from 1.0 to 2.0). The types marked with this annotations could only expose + * other {@link PublicApi} or {@link ExperimentalApi} types as public members. + * + * @opensearch.api + */ +@Documented +@Target({ + ElementType.TYPE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +@PublicApi(since = "2.10.0") +public @interface PublicApi { + /** + * Version when this API was released + */ + String since(); +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/package-info.java b/libs/common/src/main/java/org/opensearch/common/annotation/package-info.java new file mode 100644 index 0000000000000..7bb79d7579747 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The OpenSearch API related annotations + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +package org.opensearch.common.annotation; diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java new file mode 100644 index 0000000000000..569f48a8465f3 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java @@ -0,0 +1,369 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.DeprecatedApi; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.RoundEnvironment; +import javax.annotation.processing.SupportedAnnotationTypes; +import javax.lang.model.AnnotatedConstruct; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.Element; +import javax.lang.model.element.ElementKind; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.PackageElement; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.TypeParameterElement; +import javax.lang.model.element.VariableElement; +import javax.lang.model.type.ArrayType; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.type.ReferenceType; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.TypeVariable; +import javax.lang.model.type.WildcardType; +import javax.tools.Diagnostic.Kind; + +import java.util.HashSet; +import java.util.Set; + +/** + * The annotation processor for API related annotations: {@link DeprecatedApi}, {@link ExperimentalApi}, + * {@link InternalApi} and {@link PublicApi}. + * <p> + * The checks are built on top of the following rules: + * <ul> + * <li>introspect each type annotated with {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi}, + * filtering out package-private declarations</li> + * <li>make sure those leak only {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi} types as well (exceptions, + * method return values, method arguments, method generic type arguments, class generic type arguments, annotations)</li> + * <li>recursively follow the type introspection chains to enforce the rules down the line</li> + * </ul> + */ +@InternalApi +@SupportedAnnotationTypes("org.opensearch.common.annotation.*") +public class ApiAnnotationProcessor extends AbstractProcessor { + private static final String OPTION_CONTINUE_ON_FAILING_CHECKS = "continueOnFailingChecks"; + private static final String OPENSEARCH_PACKAGE = "org.opensearch"; + + private final Set<Element> reported = new HashSet<>(); + private final Set<AnnotatedConstruct> processed = new HashSet<>(); + private Kind reportFailureAs = Kind.ERROR; + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + + @Override + public Set<String> getSupportedOptions() { + return Set.of(OPTION_CONTINUE_ON_FAILING_CHECKS); + } + + @Override + public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment round) { + processingEnv.getMessager().printMessage(Kind.NOTE, "Processing OpenSearch Api annotations"); + + if (processingEnv.getOptions().containsKey(OPTION_CONTINUE_ON_FAILING_CHECKS) == true) { + reportFailureAs = Kind.NOTE; + } + + final Set<? extends Element> elements = round.getElementsAnnotatedWithAny( + Set.of(PublicApi.class, ExperimentalApi.class, DeprecatedApi.class) + ); + + for (var element : elements) { + if (!checkPackage(element)) { + continue; + } + + // Skip all not-public elements + checkPublicVisibility(null, element); + + if (element instanceof TypeElement) { + process((TypeElement) element); + } + } + + return false; + } + + /** + * Check top level executable element + * @param executable top level executable element + * @param enclosing enclosing element + */ + private void process(ExecutableElement executable, Element enclosing) { + if (!inspectable(executable)) { + return; + } + + // The executable element should not be internal (unless constructor for injectable core component) + checkNotInternal(enclosing, executable); + + // Check this element's annotations + for (final AnnotationMirror annotation : executable.getAnnotationMirrors()) { + final Element element = annotation.getAnnotationType().asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + } + + // Process method return types + final TypeMirror returnType = executable.getReturnType(); + if (returnType instanceof ReferenceType) { + process(executable, (ReferenceType) returnType); + } + + // Process method thrown types + for (final TypeMirror thrownType : executable.getThrownTypes()) { + if (thrownType instanceof ReferenceType) { + process(executable, (ReferenceType) thrownType); + } + } + + // Process method type parameters + for (final TypeParameterElement typeParameter : executable.getTypeParameters()) { + for (final TypeMirror boundType : typeParameter.getBounds()) { + if (boundType instanceof ReferenceType) { + process(executable, (ReferenceType) boundType); + } + } + } + + // Process method arguments + for (final VariableElement parameter : executable.getParameters()) { + final TypeMirror parameterType = parameter.asType(); + if (parameterType instanceof ReferenceType) { + process(executable, (ReferenceType) parameterType); + } + } + } + + /** + * Check wildcard type bounds referred by an element + * @param executable element + * @param type wildcard type + */ + private void process(ExecutableElement executable, WildcardType type) { + if (type.getExtendsBound() instanceof ReferenceType) { + process(executable, (ReferenceType) type.getExtendsBound()); + } + + if (type.getSuperBound() instanceof ReferenceType) { + process(executable, (ReferenceType) type.getSuperBound()); + } + } + + /** + * Check reference type bounds referred by an executable element + * @param executable executable element + * @param ref reference type + */ + private void process(ExecutableElement executable, ReferenceType ref) { + // The element has been processed already + if (processed.add(ref) == false) { + return; + } + + if (ref instanceof DeclaredType) { + final DeclaredType declaredType = (DeclaredType) ref; + + final Element element = declaredType.asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + + for (final TypeMirror type : declaredType.getTypeArguments()) { + if (type instanceof ReferenceType) { + process(executable, (ReferenceType) type); + } else if (type instanceof WildcardType) { + process(executable, (WildcardType) type); + } + } + } else if (ref instanceof ArrayType) { + final TypeMirror componentType = ((ArrayType) ref).getComponentType(); + if (componentType instanceof ReferenceType) { + process(executable, (ReferenceType) componentType); + } + } else if (ref instanceof TypeVariable) { + final TypeVariable typeVariable = (TypeVariable) ref; + if (typeVariable.getUpperBound() instanceof ReferenceType) { + process(executable, (ReferenceType) typeVariable.getUpperBound()); + } + if (typeVariable.getLowerBound() instanceof ReferenceType) { + process(executable, (ReferenceType) typeVariable.getLowerBound()); + } + } + + // Check this element's annotations + for (final AnnotationMirror annotation : ref.getAnnotationMirrors()) { + final Element element = annotation.getAnnotationType().asElement(); + if (inspectable(element)) { + checkNotInternal(executable.getEnclosingElement(), element); + checkPublic(executable.getEnclosingElement(), element); + } + } + } + + /** + * Check if a particular executable element should be inspected or not + * @param executable executable element to inspect + * @return {@code true} if a particular executable element should be inspected, {@code false} otherwise + */ + private boolean inspectable(ExecutableElement executable) { + // The constructors for public APIs could use non-public APIs when those are supposed to be only + // consumed (not instantiated) by external consumers. + return executable.getKind() != ElementKind.CONSTRUCTOR && executable.getModifiers().contains(Modifier.PUBLIC); + } + + /** + * Check if a particular element should be inspected or not + * @param element element to inspect + * @return {@code true} if a particular element should be inspected, {@code false} otherwise + */ + private boolean inspectable(Element element) { + final PackageElement pckg = processingEnv.getElementUtils().getPackageOf(element); + return pckg.getQualifiedName().toString().startsWith(OPENSEARCH_PACKAGE); + } + + /** + * Check if a particular element belongs to OpenSeach managed packages + * @param element element to inspect + * @return {@code true} if a particular element belongs to OpenSeach managed packages, {@code false} otherwise + */ + private boolean checkPackage(Element element) { + // The element was reported already + if (reported.contains(element)) { + return false; + } + + final PackageElement pckg = processingEnv.getElementUtils().getPackageOf(element); + final boolean belongsToOpenSearch = pckg.getQualifiedName().toString().startsWith(OPENSEARCH_PACKAGE); + + if (!belongsToOpenSearch) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The type " + + element + + " is not residing in " + + OPENSEARCH_PACKAGE + + ".* package " + + "and should not be annotated as OpenSearch APIs." + ); + } + + return belongsToOpenSearch; + } + + /** + * Check the fields, methods, constructors, and member types that are directly + * declared in this class or interface. + * @param element class or interface + */ + private void process(Element element) { + // Check the fields, methods, constructors, and member types that are directly + // declared in this class or interface. + for (final Element enclosed : element.getEnclosedElements()) { + // Skip all not-public elements + if (!enclosed.getModifiers().contains(Modifier.PUBLIC)) { + continue; + } + + if (enclosed instanceof ExecutableElement) { + process((ExecutableElement) enclosed, element); + } + } + } + + /** + * Check if element is public and annotated with {@link PublicApi}, {@link DeprecatedApi} or {@link ExperimentalApi} + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkPublic(@Nullable Element referencedBy, final Element element) { + // The element was reported already + if (reported.contains(element)) { + return; + } + + checkPublicVisibility(referencedBy, element); + + if (element.getAnnotation(PublicApi.class) == null + && element.getAnnotation(ExperimentalApi.class) == null + && element.getAnnotation(DeprecatedApi.class) == null) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } + + /** + * Check if element has public visibility (following Java visibility rules) + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkPublicVisibility(Element referencedBy, final Element element) { + if (!element.getModifiers().contains(Modifier.PUBLIC) && !element.getModifiers().contains(Modifier.PROTECTED)) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but does not have public or protected visibility" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } + + /** + * Check if element is not annotated with {@link InternalApi} + * @param referencedBy the referrer for the element + * @param element element to check + */ + private void checkNotInternal(@Nullable Element referencedBy, final Element element) { + // The element was reported already + if (reported.contains(element)) { + return; + } + + if (element.getAnnotation(InternalApi.class) != null) { + reported.add(element); + + processingEnv.getMessager() + .printMessage( + reportFailureAs, + "The element " + + element + + " is part of the public APIs but is marked as @InternalApi" + + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") + ); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java b/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java new file mode 100644 index 0000000000000..fa23e4a7addce --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/annotation/processor/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Classes related yo OpenSearch API annotation processing + * + * @opensearch.internal + */ +@org.opensearch.common.annotation.InternalApi +package org.opensearch.common.annotation.processor; diff --git a/server/src/main/java/org/opensearch/common/collect/Iterators.java b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java similarity index 78% rename from server/src/main/java/org/opensearch/common/collect/Iterators.java rename to libs/common/src/main/java/org/opensearch/common/collect/Iterators.java index c7e7ae6a44a21..9b64932356c10 100644 --- a/server/src/main/java/org/opensearch/common/collect/Iterators.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java @@ -41,6 +41,15 @@ * @opensearch.internal */ public class Iterators { + + /** + * Concat iterators + * + * @param iterators the iterators to concat + * @param <T> the type of iterator + * @return a new {@link ConcatenatedIterator} + * @throws NullPointerException if iterators is null + */ public static <T> Iterator<T> concat(Iterator<? extends T>... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); @@ -71,6 +80,11 @@ static class ConcatenatedIterator<T> implements Iterator<T> { this.iterators = iterators; } + /** + * Returns {@code true} if the iteration has more elements. (In other words, returns {@code true} if {@link #next} would return an + * element rather than throwing an exception.) + * @return {@code true} if the iteration has more elements + */ @Override public boolean hasNext() { boolean hasNext = false; @@ -81,6 +95,11 @@ public boolean hasNext() { return hasNext; } + /** + * Returns the next element in the iteration. + * @return the next element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ @Override public T next() { if (!hasNext()) { diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index 36bc5504061f5..a5d97dcd85ef7 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -32,13 +32,15 @@ package org.opensearch.common.collect; +import org.opensearch.common.annotation.PublicApi; + /** * Java 9 Tuple - * * todo: deprecate and remove w/ min jdk upgrade to 11? * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Tuple<V1, V2> { public static <V1, V2> Tuple<V1, V2> tuple(V1 v1, V2 v2) { @@ -61,6 +63,20 @@ public V2 v2() { return v2; } + /** + * Returns {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + * <p> + * Returns {@code false} otherwise, including for {@code null} values or + * objects of different types. + * <p> + * Note: {@code Tuple} instances are equal if the underlying values are + * equal, even if the types are different. + * + * @param o the object to compare to + * @return {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -74,6 +90,10 @@ public boolean equals(Object o) { return true; } + /** + * Returns the hash code value for this Tuple. + * @return the hash code value for this Tuple. + */ @Override public int hashCode() { int result = v1 != null ? v1.hashCode() : 0; @@ -81,6 +101,10 @@ public int hashCode() { return result; } + /** + * Returns a string representation of a Tuple + * @return {@code "Tuple [v1=value1, v2=value2]"} + */ @Override public String toString() { return "Tuple [v1=" + v1 + ", v2=" + v2 + "]"; diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java new file mode 100644 index 0000000000000..9572b5b9054b2 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.crypto; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.io.InputStreamContainer; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; + +/** + * Crypto provider abstractions for encryption and decryption of data. Allows registering multiple providers + * for defining different ways of encrypting or decrypting data. + * + * @param <T> Encryption Metadata / CryptoContext + * @param <U> Parsed Encryption Metadata / CryptoContext + */ +@ExperimentalApi +public interface CryptoHandler<T, U> extends Closeable { + + /** + * To initialise or create a new crypto metadata to be used in encryption. This is needed to set the context before + * beginning encryption. + * + * @return crypto metadata instance + */ + T initEncryptionMetadata(); + + /** + * To load crypto metadata to be used in encryption from content header. + * Note that underlying information in the loaded metadata object is same as present in the object created during + * encryption but object type may differ. + * + * @param encryptedHeaderContentSupplier supplier for encrypted header content. + * @return crypto metadata instance used in decryption. + */ + U loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException; + + /** + * Few encryption algorithms have certain conditions on the unit of content to be encrypted. This requires the + * content size to be re adjusted in order to fulfil these conditions for partial writes. If write requests for + * encryption of a part of content do not fulfil these conditions then encryption fails or can result in corrupted + * content depending on the algorithm used. This method exposes a means to re-adjust sizes of such writes. + * + * @param cryptoContext crypto metadata instance + * @param contentSize Size of the raw content + * @return Adjusted size of the content. + */ + long adjustContentSizeForPartialEncryption(T cryptoContext, long contentSize); + + /** + * Estimate length of the encrypted content. It should only be used to determine length of entire content after + * encryption. + * + * @param cryptoContext crypto metadata instance consisting of encryption metadata used in encryption. + * @param contentLength Size of the raw content + * @return Calculated size of the encrypted content. + */ + long estimateEncryptedLengthOfEntireContent(T cryptoContext, long contentLength); + + /** + * For given encrypted content length, estimate the length of the decrypted content. + * @param cryptoContext crypto metadata instance consisting of encryption metadata used in encryption. + * @param contentLength Size of the encrypted content + * @return Calculated size of the decrypted content. + */ + long estimateDecryptedLength(U cryptoContext, long contentLength); + + /** + * Wraps a raw InputStream with encrypting stream + * + * @param encryptionMetadata created earlier to set the crypto metadata. + * @param stream Raw InputStream to encrypt + * @return encrypting stream wrapped around raw InputStream. + */ + InputStreamContainer createEncryptingStream(T encryptionMetadata, InputStreamContainer stream); + + /** + * Provides encrypted stream for a raw stream emitted for a part of content. + * + * @param cryptoContext crypto metadata instance. + * @param stream raw stream for which encrypted stream has to be created. + * @param totalStreams Number of streams being used for the entire content. + * @param streamIdx Index of the current stream. + * @return Encrypted stream for the provided raw stream. + */ + InputStreamContainer createEncryptingStreamOfPart(T cryptoContext, InputStreamContainer stream, int totalStreams, int streamIdx); + + /** + * This method accepts an encrypted stream and provides a decrypting wrapper. + * @param encryptingStream to be decrypted. + * @return Decrypting wrapper stream + */ + InputStream createDecryptingStream(InputStream encryptingStream); + + /** + * This method creates a {@link DecryptedRangedStreamProvider} which provides a wrapped stream to decrypt the + * underlying stream. This also provides adjusted range against the actual range which should be used for fetching + * and supplying the encrypted content for decryption. Extra content outside the range is trimmed down and returned + * by the decrypted stream. + * For partial reads of encrypted content, few algorithms require the range of content to be adjusted for + * successful decryption. Adjusted range may or may not be same as the provided range. If range is adjusted then + * starting offset of resultant range can be lesser than the starting offset of provided range and end + * offset can be greater than the ending offset of the provided range. + * + * @param cryptoContext crypto metadata instance. + * @param startPosOfRawContent starting position in the raw/decrypted content + * @param endPosOfRawContent ending position in the raw/decrypted content + */ + DecryptedRangedStreamProvider createDecryptingStreamOfRange(U cryptoContext, long startPosOfRawContent, long endPosOfRawContent); +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java new file mode 100644 index 0000000000000..711c0d314ecef --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +/** + * Key pair generated by {@link MasterKeyProvider} + */ +public class DataKeyPair { + + /** Unencrypted data key used for encryption and decryption */ + private final byte[] rawKey; + /** Encrypted version of rawKey */ + private final byte[] encryptedKey; + + /** + * Constructor to initialize key-pair values + * @param rawKey Unencrypted data key used for encryption and decryption + * @param encryptedKey Encrypted version of rawKey + */ + public DataKeyPair(byte[] rawKey, byte[] encryptedKey) { + this.rawKey = rawKey; + this.encryptedKey = encryptedKey; + } + + /** + * Returns Unencrypted data key + * @return raw/decrypted key + */ + public byte[] getRawKey() { + return rawKey; + } + + /** + * Returns encrypted key + * @return encrypted key + */ + public byte[] getEncryptedKey() { + return encryptedKey; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java new file mode 100644 index 0000000000000..2cda3c1f8bdb4 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.crypto; + +import java.io.InputStream; +import java.util.function.UnaryOperator; + +/** + * Contains adjusted range of partial encrypted content which needs to be used for decryption. + */ +public class DecryptedRangedStreamProvider { + + /** Adjusted range of partial encrypted content which needs to be used for decryption. */ + private final long[] adjustedRange; + /** Stream provider for decryption and range re-adjustment. */ + private final UnaryOperator<InputStream> decryptedStreamProvider; + + /** + * To construct adjusted encrypted range. + * @param adjustedRange range of partial encrypted content which needs to be used for decryption. + * @param decryptedStreamProvider stream provider for decryption and range re-adjustment. + */ + public DecryptedRangedStreamProvider(long[] adjustedRange, UnaryOperator<InputStream> decryptedStreamProvider) { + this.adjustedRange = adjustedRange; + this.decryptedStreamProvider = decryptedStreamProvider; + } + + /** + * Adjusted range of partial encrypted content which needs to be used for decryption. + * @return adjusted range + */ + public long[] getAdjustedRange() { + return adjustedRange; + } + + /** + * A utility stream provider which supplies the stream responsible for decrypting the content and reading the + * desired range of decrypted content by skipping extra content which got decrypted as a result of range adjustment. + * @return stream provider for decryption and supplying the desired range of content. + */ + public UnaryOperator<InputStream> getDecryptedStreamProvider() { + return decryptedStreamProvider; + } + +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java b/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java new file mode 100644 index 0000000000000..49a037f05f185 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +import java.io.IOException; + +/** + * This is used in partial decryption. Header information is required for decryption of actual encrypted content. + * Implementation of this supplier only requires first few bytes of encrypted content to be supplied. + */ +public interface EncryptedHeaderContentSupplier { + + /** + * @param start Start position of the encrypted content (Generally supplied as 0 during usage) + * @param end End position of the header. + * @return Encrypted header content (May contain additional content which is later discarded) + * @throws IOException In case content fetch fails. + */ + byte[] supply(long start, long end) throws IOException; +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java new file mode 100644 index 0000000000000..31d2dcd0dba3d --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; +import java.util.Map; + +/** + * Master key provider responsible for management of master keys. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface MasterKeyProvider extends Closeable { + + /** + * Returns data key pair + * @return data key pair generated by master key. + */ + DataKeyPair generateDataPair(); + + /** + * Returns decrypted key against the encrypted key. + * @param encryptedKey Key to decrypt + * @return Decrypted version of key. + */ + byte[] decryptKey(byte[] encryptedKey); + + /** + * Returns key id. + * @return key id + */ + String getKeyId(); + + /** + * Returns encryption context associated with this master key. + * @return encryption context associated with this master key. + */ + Map<String, String> getEncryptionContext(); +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java b/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java new file mode 100644 index 0000000000000..c744689ebf532 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common crypto utilities used across opensearch. */ +package org.opensearch.common.crypto; diff --git a/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java new file mode 100644 index 0000000000000..07b2306eda4e5 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.annotation.InternalApi; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +import static java.lang.Long.rotateRight; + +/** + * t1ha: Fast Positive Hash + * + * <p> + * Implements <a href="https://github.com/erthink/t1ha#t1ha1--64-bits-baseline-fast-portable-hash">t1ha1</a>; + * a fast portable hash function with reasonable quality for checksums, hash tables, and thin fingerprinting. + * + * <p> + * To overcome language and performance limitations, this implementation differs slightly from the + * <a href="https://github.com/erthink/t1ha/blob/master/src/t1ha1.c">reference implementation</a> in C++, + * so the returned values may vary before JDK 18. + * + * <p> + * Intended for little-endian systems but returns the same result on big-endian, albeit marginally slower. + * + * @opensearch.internal + */ +@InternalApi +public final class T1ha1 { + private static final long SEED = System.nanoTime(); + private static final Mux64 MUX_64_IMPL = fastestMux64Impl(); + + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle SHORT_HANDLE = MethodHandles.byteArrayViewVarHandle(short[].class, ByteOrder.LITTLE_ENDIAN); + + // "Magic" primes: + private static final long p0 = 0xEC99BF0D8372CAABL; + private static final long p1 = 0x82434FE90EDCEF39L; + private static final long p2 = 0xD4F06DB99D67BE4BL; + private static final long p3 = 0xBD9CACC22C6E9571L; + private static final long p4 = 0x9C06FAF4D023E3ABL; + private static final long p5 = 0xC060724A8424F345L; + private static final long p6 = 0xCB5AF53AE3AAAC31L; + + // Rotations: + private static final int s0 = 41; + private static final int s1 = 17; + private static final int s2 = 31; + + /** + * No public constructor. + */ + private T1ha1() {} + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @return hash code + */ + public static long hash(byte[] input, int offset, int length) { + return hash(input, offset, length, SEED); + } + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @param seed customized seed + * @return hash code + */ + public static long hash(byte[] input, int offset, int length, long seed) { + long a = seed; + long b = length; + + if (length > 32) { + long c = rotateRight(length, s1) + seed; + long d = length ^ rotateRight(seed, s1); + + do { + long w0 = fetch64(input, offset); + long w1 = fetch64(input, offset + 8); + long w2 = fetch64(input, offset + 16); + long w3 = fetch64(input, offset + 24); + + long d02 = w0 ^ rotateRight(w2 + d, s1); + long c13 = w1 ^ rotateRight(w3 + c, s1); + c += a ^ rotateRight(w0, s0); + d -= b ^ rotateRight(w1, s2); + a ^= p1 * (d02 + w3); + b ^= p0 * (c13 + w2); + + offset += 32; + length -= 32; + } while (length >= 32); + + a ^= p6 * (rotateRight(c, s1) + d); + b ^= p5 * (rotateRight(d, s1) + c); + } + + return h32(input, offset, length, a, b); + } + + /** + * Computes the hash of up to 32 bytes. + * Constants in the switch expression are dense; JVM will use them as indices into a table of + * instruction pointers (tableswitch instruction), making lookups really fast. + */ + @SuppressWarnings("fallthrough") + private static long h32(byte[] input, int offset, int length, long a, long b) { + switch (length) { + default: + b += mux64(fetch64(input, offset), p4); + offset += 8; + length -= 8; + case 24: + case 23: + case 22: + case 21: + case 20: + case 19: + case 18: + case 17: + a += mux64(fetch64(input, offset), p3); + offset += 8; + length -= 8; + case 16: + case 15: + case 14: + case 13: + case 12: + case 11: + case 10: + case 9: + b += mux64(fetch64(input, offset), p2); + offset += 8; + length -= 8; + case 8: + case 7: + case 6: + case 5: + case 4: + case 3: + case 2: + case 1: + a += mux64(tail64(input, offset, length), p1); + case 0: + // Final weak avalanche + return mux64(rotateRight(a + b, s1), p4) + mix64(a ^ b, p0); + } + } + + /** + * XOR the high and low parts of the full 128-bit product. + */ + private static long mux64(long a, long b) { + return MUX_64_IMPL.mux64(a, b); + } + + /** + * XOR-MUL-XOR bit-mixer. + */ + private static long mix64(long a, long b) { + a *= b; + return a ^ rotateRight(a, s0); + } + + /** + * Reads "length" bytes starting at "offset" in little-endian order; returned as long. + * It is assumed that the length is between 1 and 8 (inclusive); but no defensive checks are made as such. + */ + private static long tail64(byte[] input, int offset, int length) { + switch (length) { + case 1: + return fetch8(input, offset); + case 2: + return fetch16(input, offset); + case 3: + return fetch16(input, offset) | (fetch8(input, offset + 2) << 16); + case 4: + return fetch32(input, offset); + case 5: + return fetch32(input, offset) | (fetch8(input, offset + 4) << 32); + case 6: + return fetch32(input, offset) | (fetch16(input, offset + 4) << 32); + case 7: + // This is equivalent to: + // return fetch32(input, offset) | (fetch16(input, offset + 4) << 32) | (fetch8(input, offset + 6) << 48); + // But reading two ints overlapping by one byte is faster due to lesser instructions. + return fetch32(input, offset) | (fetch32(input, offset + 3) << 24); + default: + return fetch64(input, offset); + } + } + + /** + * Reads a 64-bit long. + */ + private static long fetch64(byte[] input, int offset) { + return (long) LONG_HANDLE.get(input, offset); + } + + /** + * Reads a 32-bit unsigned integer, returned as long. + */ + private static long fetch32(byte[] input, int offset) { + return (int) INT_HANDLE.get(input, offset) & 0xFFFFFFFFL; + } + + /** + * Reads a 16-bit unsigned short, returned as long. + */ + private static long fetch16(byte[] input, int offset) { + return (short) SHORT_HANDLE.get(input, offset) & 0xFFFFL; + } + + /** + * Reads an 8-bit unsigned byte, returned as long. + */ + private static long fetch8(byte[] input, int offset) { + return input[offset] & 0xFFL; + } + + /** + * The implementation of mux64. + */ + @FunctionalInterface + private interface Mux64 { + long mux64(long a, long b); + } + + /** + * Provides the fastest available implementation of mux64 on this platform. + * + * <p> + * Ideally, the following should be returned to match the reference implementation: + * {@code Math.unsignedMultiplyHigh(a, b) ^ (a * b)} + * + * <p> + * Since unsignedMultiplyHigh isn't available before JDK 18, and calculating it without intrinsics is quite slow, + * the multiplyHigh method is used instead. Slight loss in quality is imperceptible for our use-case: a hash table. + * {@code Math.multiplyHigh(a, b) ^ (a * b)} + * + * <p> + * This indirection can be removed once we stop supporting older JDKs. + */ + private static Mux64 fastestMux64Impl() { + try { + final MethodHandle unsignedMultiplyHigh = MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return (a, b) -> { + try { + return (long) unsignedMultiplyHigh.invokeExact(a, b) ^ (a * b); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }; + } catch (NoSuchMethodException e) { + return (a, b) -> Math.multiplyHigh(a, b) ^ (a * b); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/hash/package-info.java b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java new file mode 100644 index 0000000000000..bd393b8b921ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common hashing utilities. + */ +package org.opensearch.common.hash; diff --git a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java index eb8a4e1382497..3095336338f7f 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java +++ b/libs/common/src/main/java/org/opensearch/common/io/InputStreamContainer.java @@ -8,13 +8,16 @@ package org.opensearch.common.io; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.InputStream; /** * Model composed of an input stream and the total content length of the stream * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public class InputStreamContainer { private final InputStream inputStream; diff --git a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java index b3526859933ec..ed8d50892b74a 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java +++ b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java @@ -93,7 +93,7 @@ public static Path get(URI uri) { /** * Tries to resolve the given path against the list of available roots. - * + * <p> * If path starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, String path) { @@ -109,7 +109,7 @@ public static Path get(Path[] roots, String path) { /** * Tries to resolve the given file uri against the list of available roots. - * + * <p> * If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, URI uri) { diff --git a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java index 30bea6185febc..dfc4fefb9ee55 100644 --- a/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java +++ b/libs/common/src/main/java/org/opensearch/common/lease/Releasable.java @@ -32,13 +32,16 @@ package org.opensearch.common.lease; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; /** * Specialization of {@link AutoCloseable} for calls that might not throw a checked exception. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Releasable extends Closeable { @Override diff --git a/server/src/main/java/org/opensearch/common/component/AbstractLifecycleComponent.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/AbstractLifecycleComponent.java similarity index 98% rename from server/src/main/java/org/opensearch/common/component/AbstractLifecycleComponent.java rename to libs/common/src/main/java/org/opensearch/common/lifecycle/AbstractLifecycleComponent.java index 837f8af44bf13..111556fbe43cf 100644 --- a/server/src/main/java/org/opensearch/common/component/AbstractLifecycleComponent.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/AbstractLifecycleComponent.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.component; +package org.opensearch.common.lifecycle; import java.io.IOException; import java.io.UncheckedIOException; diff --git a/server/src/main/java/org/opensearch/common/component/Lifecycle.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java similarity index 97% rename from server/src/main/java/org/opensearch/common/component/Lifecycle.java rename to libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java index fb12c1fc9ac4b..c1cf9b2998a13 100644 --- a/server/src/main/java/org/opensearch/common/component/Lifecycle.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java @@ -30,7 +30,9 @@ * GitHub history for details. */ -package org.opensearch.common.component; +package org.opensearch.common.lifecycle; + +import org.opensearch.common.annotation.PublicApi; /** * Lifecycle state. Allows the following transitions: @@ -73,15 +75,17 @@ * } * </pre> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Lifecycle { /** * State in the lifecycle * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INITIALIZED, STOPPED, diff --git a/server/src/main/java/org/opensearch/common/component/LifecycleComponent.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java similarity index 91% rename from server/src/main/java/org/opensearch/common/component/LifecycleComponent.java rename to libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java index 984d55df1bdfa..781c276fefe13 100644 --- a/server/src/main/java/org/opensearch/common/component/LifecycleComponent.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java @@ -30,15 +30,17 @@ * GitHub history for details. */ -package org.opensearch.common.component; +package org.opensearch.common.lifecycle; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; /** * Base interface for a lifecycle component. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LifecycleComponent extends Releasable { Lifecycle.State lifecycleState(); diff --git a/server/src/main/java/org/opensearch/common/component/LifecycleListener.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleListener.java similarity index 97% rename from server/src/main/java/org/opensearch/common/component/LifecycleListener.java rename to libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleListener.java index 89c344b955bc9..7ac41a5eb0df0 100644 --- a/server/src/main/java/org/opensearch/common/component/LifecycleListener.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleListener.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.component; +package org.opensearch.common.lifecycle; /** * Base lifecycle listener. diff --git a/libs/common/src/main/java/org/opensearch/common/lifecycle/package-info.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/package-info.java new file mode 100644 index 0000000000000..1bedde5585e36 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Foundation implementation for a object lifecycle. + * + * See {@link org.opensearch.common.lifecycle.Lifecycle} for example usage + * + * @opensearch.internal + */ +package org.opensearch.common.lifecycle; diff --git a/server/src/main/java/org/opensearch/common/network/Cidrs.java b/libs/common/src/main/java/org/opensearch/common/network/Cidrs.java similarity index 100% rename from server/src/main/java/org/opensearch/common/network/Cidrs.java rename to libs/common/src/main/java/org/opensearch/common/network/Cidrs.java diff --git a/server/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java similarity index 99% rename from server/src/main/java/org/opensearch/common/network/InetAddresses.java rename to libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index a4fbc6cb65b0d..60c0717a28f05 100644 --- a/server/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -52,7 +52,7 @@ public static boolean isInetAddress(String ipString) { return ipStringToBytes(ipString) != null; } - private static byte[] ipStringToBytes(String ipString) { + public static byte[] ipStringToBytes(String ipString) { // Make a first pass to categorize the characters in this string. boolean hasColon = false; boolean hasDot = false; @@ -368,7 +368,7 @@ public static InetAddress forString(String ipString) { /** * Convert a byte array into an InetAddress. - * + * <p> * {@link InetAddress#getByAddress} is documented as throwing a checked * exception "if IP address is of illegal length." We replace it with * an unchecked exception, for use by callers who already know that addr @@ -423,7 +423,7 @@ public static Tuple<InetAddress, Integer> parseCidr(String maskedAddress) { /** * Given an address and prefix length, returns the string representation of the range in CIDR notation. - * + * <p> * See {@link #toAddrString} for details on how the address is represented. */ public static String toCidrString(InetAddress address, int prefixLength) { diff --git a/server/src/main/java/org/opensearch/common/network/NetworkAddress.java b/libs/common/src/main/java/org/opensearch/common/network/NetworkAddress.java similarity index 100% rename from server/src/main/java/org/opensearch/common/network/NetworkAddress.java rename to libs/common/src/main/java/org/opensearch/common/network/NetworkAddress.java diff --git a/libs/common/src/main/java/org/opensearch/common/network/package-info.java b/libs/common/src/main/java/org/opensearch/common/network/package-info.java new file mode 100644 index 0000000000000..92e4eac5bde42 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/network/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** common network layer classes used across the code base */ +package org.opensearch.common.network; diff --git a/server/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java b/libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java rename to libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java diff --git a/server/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/DequeRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/DequeRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/DequeRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/DequeRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/FilterRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/FilterRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/FilterRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/FilterRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/NoneRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/NoneRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/NoneRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/NoneRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/Recycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/Recycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/Recycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/Recycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java b/libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java similarity index 98% rename from server/src/main/java/org/opensearch/common/recycler/Recyclers.java rename to libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java index 4cbb80509d6a1..52587144369f1 100644 --- a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java +++ b/libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java @@ -75,6 +75,8 @@ public static <T> Recycler.Factory<T> dequeFactory(final Recycler.C<T> c, final /** * Wrap the provided recycler so that calls to {@link Recycler#obtain()} and {@link Recycler.V#close()} are protected by * a lock. + * + * @opensearch.internal */ public static <T> Recycler<T> locked(final Recycler<T> recycler) { return new FilterRecycler<T>() { @@ -140,7 +142,7 @@ public static <T> Recycler<T> concurrent(final Recycler.Factory<T> factory, fina private final Recycler<T>[] recyclers; { - @SuppressWarnings("unchecked") + @SuppressWarnings({ "rawtypes", "unchecked" }) final Recycler<T>[] recyclers = new Recycler[concurrencyLevel]; this.recyclers = recyclers; for (int i = 0; i < concurrencyLevel; ++i) { diff --git a/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java b/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java new file mode 100644 index 0000000000000..fec3c5d5e52d3 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common Recycler functionality for recycling objects */ +package org.opensearch.common.recycler; diff --git a/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java new file mode 100644 index 0000000000000..5c3dcf2bd4708 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BidirectionalLinearSearcher.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * It uses linear search on a sorted array of pre-computed round-down points. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + * <p> + * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ +@InternalApi +class BidirectionalLinearSearcher implements Roundable { + private final long[] ascending; + private final long[] descending; + + BidirectionalLinearSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int len = (size + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[size - i - 1]; + } + } + + @Override + public long floor(long key) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= key) { + return descending[i]; + } + if (ascending[i] > key) { + assert i > 0 : "key must be greater than or equal to " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java new file mode 100644 index 0000000000000..b9d76945115ed --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/BinarySearcher.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Arrays; + +/** + * It uses binary search on a sorted array of pre-computed round-down points. + * + * @opensearch.internal + */ +@InternalApi +class BinarySearcher implements Roundable { + private final long[] values; + private final int size; + + BinarySearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + this.values = values; + this.size = size; + } + + @Override + public long floor(long key) { + int idx = Arrays.binarySearch(values, 0, size, key); + assert idx != -1 : "key must be greater than or equal to " + values[0]; + if (idx < 0) { + idx = -2 - idx; + } + return values[idx]; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/Roundable.java b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java new file mode 100644 index 0000000000000..ae6f9b787c1e9 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/Roundable.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Interface to round-off values. + * + * @opensearch.internal + */ +@InternalApi +@FunctionalInterface +public interface Roundable { + /** + * Returns the greatest lower bound of the given key. + * In other words, it returns the largest value such that {@code value <= key}. + * @param key to floor + * @return the floored value + */ + long floor(long key); +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..b7422694c3013 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: <a href="https://github.com/opensearch-project/OpenSearch/pull/9727">PR #9727</a> + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/round/package-info.java b/libs/common/src/main/java/org/opensearch/common/round/package-info.java new file mode 100644 index 0000000000000..e79c4017de31b --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/round/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains classes to round-off values. + */ +package org.opensearch.common.round; diff --git a/server/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java b/libs/common/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java similarity index 100% rename from server/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java rename to libs/common/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java diff --git a/server/src/main/java/org/opensearch/common/transport/PortsRange.java b/libs/common/src/main/java/org/opensearch/common/transport/PortsRange.java similarity index 100% rename from server/src/main/java/org/opensearch/common/transport/PortsRange.java rename to libs/common/src/main/java/org/opensearch/common/transport/PortsRange.java diff --git a/libs/common/src/main/java/org/opensearch/common/transport/package-info.java b/libs/common/src/main/java/org/opensearch/common/transport/package-info.java new file mode 100644 index 0000000000000..7d28ac6c60a14 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** common transport layer classes used across the code base */ +package org.opensearch.common.transport; diff --git a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java index 670275397893c..30ed5bf63a748 100644 --- a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java +++ b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java @@ -32,6 +32,8 @@ package org.opensearch.common.unit; +import org.opensearch.common.annotation.PublicApi; + import java.util.Locale; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -41,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TimeValue implements Comparable<TimeValue> { /** How many nano-seconds in one milli-second */ @@ -221,10 +224,10 @@ public double getDaysFrac() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + * <p> * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. - * + * <p> * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) @@ -236,12 +239,12 @@ public String toString() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + * <p> * Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of * fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces * specified. - * + * <p> * Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) diff --git a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java index 8762217916c7a..d6ea4fa359df3 100644 --- a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java +++ b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java @@ -25,9 +25,9 @@ /** * Bit mixing utilities from carrotsearch.hppc. - * + * <p> * Licensed under ALv2. This is pulled in directly to avoid a full hppc dependency. - * + * <p> * The purpose of these methods is to evenly distribute key space over int32 * range. */ @@ -111,7 +111,7 @@ public static int mix32(int k) { /** * Computes David Stafford variant 9 of 64bit mix function (MH3 finalization step, * with different shifts and constants). - * + * <p> * Variant 9 is picked because it contains two 32-bit shifts which could be possibly * optimized into better machine code. * diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java diff --git a/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java new file mode 100644 index 0000000000000..626fb6e6b810e --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/BtreeSearcher.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +import jdk.incubator.vector.LongVector; +import jdk.incubator.vector.Vector; +import jdk.incubator.vector.VectorOperators; +import jdk.incubator.vector.VectorSpecies; + +/** + * It uses vectorized B-tree search to find the round-down point. + * + * @opensearch.internal + */ +@InternalApi +class BtreeSearcher implements Roundable { + private static final VectorSpecies<Long> LONG_VECTOR_SPECIES = LongVector.SPECIES_PREFERRED; + private static final int LANES = LONG_VECTOR_SPECIES.length(); + private static final int SHIFT = log2(LANES); + + private final long[] values; + private final long minValue; + + BtreeSearcher(long[] values, int size) { + if (size <= 0) { + throw new IllegalArgumentException("at least one value must be present"); + } + + int blocks = (size + LANES - 1) / LANES; // number of blocks + int length = 1 + blocks * LANES; // size of the backing array (1-indexed) + + this.minValue = values[0]; + this.values = new long[length]; + build(values, 0, size, this.values, 1); + } + + /** + * Builds the B-tree memory layout. + * It builds the tree recursively, following an in-order traversal. + * + * <p> + * Each block stores 'lanes' values at indices {@code i, i + 1, ..., i + lanes - 1} where {@code i} is the + * starting offset. The starting offset of the root block is 1. The branching factor is (1 + lanes) so each + * block can have these many children. Given the starting offset {@code i} of a block, the starting offset + * of its k-th child (ranging from {@code 0, 1, ..., k}) can be computed as {@code i + ((i + k) << shift)}. + * + * @param src is the sorted input array + * @param i is the index in the input array to read the value from + * @param size the number of values in the input array + * @param dst is the output array + * @param j is the index in the output array to write the value to + * @return the next index 'i' + */ + private static int build(long[] src, int i, int size, long[] dst, int j) { + if (j < dst.length) { + for (int k = 0; k < LANES; k++) { + i = build(src, i, size, dst, j + ((j + k) << SHIFT)); + + // Fills the B-tree as a complete tree, i.e., all levels are completely filled, + // except the last level which is filled from left to right. + // The trick is to fill the destination array between indices 1...size (inclusive / 1-indexed) + // and pad the remaining array with +infinity. + dst[j + k] = (j + k <= size) ? src[i++] : Long.MAX_VALUE; + } + i = build(src, i, size, dst, j + ((j + LANES) << SHIFT)); + } + return i; + } + + @Override + public long floor(long key) { + Vector<Long> keyVector = LongVector.broadcast(LONG_VECTOR_SPECIES, key); + int i = 1, result = 1; + + while (i < values.length) { + Vector<Long> valuesVector = LongVector.fromArray(LONG_VECTOR_SPECIES, values, i); + int j = i + valuesVector.compare(VectorOperators.GT, keyVector).firstTrue(); + result = (j > i) ? j : result; + i += (j << SHIFT); + } + + assert result > 1 : "key must be greater than or equal to " + minValue; + return values[result - 1]; + } + + private static int log2(int num) { + if ((num <= 0) || ((num & (num - 1)) != 0)) { + throw new IllegalArgumentException(num + " is not a positive power of 2"); + } + return 32 - Integer.numberOfLeadingZeros(num - 1); + } +} diff --git a/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java new file mode 100644 index 0000000000000..0709ed4374227 --- /dev/null +++ b/libs/common/src/main/java20/org/opensearch/common/round/RoundableFactory.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Factory class to create and return the fastest implementation of {@link Roundable}. + * + * @opensearch.internal + */ +@InternalApi +public final class RoundableFactory { + /** + * The maximum limit up to which linear search is used, otherwise binary or B-tree search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: <a href="https://github.com/opensearch-project/OpenSearch/pull/9727">PR #9727</a> + */ + private static final int LINEAR_SEARCH_MAX_SIZE = 64; + + /** + * Indicates whether the vectorized (SIMD) B-tree search implementation is to be used. + * It is true when either: + * 1. The feature flag is set to "forced", or + * 2. The platform has a minimum of 4 long vector lanes and the feature flag is set to "true". + */ + private static final boolean USE_BTREE_SEARCHER; + + /** + * This class is initialized only when: + * - JDK-20+ + * - jdk.incubator.vector.LongVector is available (--add-modules=jdk.incubator.vector is passed) + */ + private static final class VectorCheck { + final static int SPECIES_PREFERRED = jdk.incubator.vector.LongVector.SPECIES_PREFERRED.length(); + } + + static { + String simdRoundingFeatureFlag = System.getProperty("opensearch.experimental.feature.simd.rounding.enabled"); + boolean useBtreeSearcher = false; + + try { + final Class<?> incubator = Class.forName("jdk.incubator.vector.LongVector"); + + useBtreeSearcher = "forced".equalsIgnoreCase(simdRoundingFeatureFlag) + || (VectorCheck.SPECIES_PREFERRED >= 4 && "true".equalsIgnoreCase(simdRoundingFeatureFlag)); + + } catch (final ClassNotFoundException ex) { + /* do not use BtreeSearcher */ + } + + USE_BTREE_SEARCHER = useBtreeSearcher; + } + + private RoundableFactory() {} + + /** + * Creates and returns the fastest implementation of {@link Roundable}. + */ + public static Roundable create(long[] values, int size) { + if (size <= LINEAR_SEARCH_MAX_SIZE) { + return new BidirectionalLinearSearcher(values, size); + } else if (USE_BTREE_SEARCHER) { + return new BtreeSearcher(values, size); + } else { + return new BinarySearcher(values, size); + } + } +} diff --git a/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor new file mode 100644 index 0000000000000..c4e4dfed864f2 --- /dev/null +++ b/libs/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor @@ -0,0 +1,12 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# + +org.opensearch.common.annotation.processor.ApiAnnotationProcessor \ No newline at end of file diff --git a/libs/common/src/test/java/org/opensearch/common/BooleansTests.java b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java new file mode 100644 index 0000000000000..578ec742d126d --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/BooleansTests.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.common; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.is; + +public class BooleansTests extends OpenSearchTestCase { + private static final String[] NON_BOOLEANS = new String[] { + "11", + "00", + "sdfsdfsf", + "F", + "T", + "on", + "off", + "yes", + "no", + "0", + "1", + "True", + "False" }; + private static final String[] BOOLEANS = new String[] { "true", "false" }; + + public void testIsBoolean() { + for (String b : BOOLEANS) { + String t = "prefix" + b + "suffix"; + assertTrue("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), b.length())); + assertTrue("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(b)); + } + } + + public void testIsNonBoolean() { + assertThat(Booleans.isBoolean(null, 0, 1), is(false)); + + for (String nb : NON_BOOLEANS) { + String t = "prefix" + nb + "suffix"; + assertFalse("recognized [" + nb + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), nb.length())); + assertFalse("recognized [" + nb + "] as boolean", Booleans.isBoolean(t)); + } + } + + public void testParseBooleanWithFallback() { + assertFalse(Booleans.parseBoolean(null, false)); + assertTrue(Booleans.parseBoolean(null, true)); + assertNull(Booleans.parseBoolean(null, null)); + assertFalse(Booleans.parseBoolean(null, Boolean.FALSE)); + assertTrue(Booleans.parseBoolean(null, Boolean.TRUE)); + + assertFalse(Booleans.parseBoolean("", false)); + assertTrue(Booleans.parseBoolean("", true)); + assertNull(Booleans.parseBoolean("", null)); + assertFalse(Booleans.parseBoolean("", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean("", Boolean.TRUE)); + + assertFalse(Booleans.parseBoolean(" \t\n", false)); + assertTrue(Booleans.parseBoolean(" \t\n", true)); + assertNull(Booleans.parseBoolean(" \t\n", null)); + assertFalse(Booleans.parseBoolean(" \t\n", Boolean.FALSE)); + assertTrue(Booleans.parseBoolean(" \t\n", Boolean.TRUE)); + + assertTrue(Booleans.parseBoolean("true", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); + assertFalse(Booleans.parseBoolean("false", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); + + assertTrue(Booleans.parseBoolean(new char[0], 0, 0, true)); + assertFalse(Booleans.parseBoolean(new char[0], 0, 0, false)); + } + + public void testParseNonBooleanWithFallback() { + for (String nonBoolean : NON_BOOLEANS) { + boolean defaultValue = randomFrom(Boolean.TRUE, Boolean.FALSE); + + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(nonBoolean, defaultValue)); + expectThrows( + IllegalArgumentException.class, + () -> Booleans.parseBoolean(nonBoolean.toCharArray(), 0, nonBoolean.length(), defaultValue) + ); + } + } + + public void testParseBoolean() { + assertTrue(Booleans.parseBoolean("true")); + assertFalse(Booleans.parseBoolean("false")); + } + + public void testParseNonBoolean() { + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(null)); + for (String nonBoolean : NON_BOOLEANS) { + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(nonBoolean)); + } + } + + public void testParseBooleanStrict() { + assertTrue(Booleans.parseBooleanStrict("true", false)); + assertFalse(Booleans.parseBooleanStrict("false", true)); + assertTrue(Booleans.parseBooleanStrict(null, true)); + assertFalse(Booleans.parseBooleanStrict("", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict("foobar", false)); + expectThrows(IllegalArgumentException.class, () -> Booleans.parseBooleanStrict(" \t\n", false)); + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java new file mode 100644 index 0000000000000..8d8a4c7895339 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java @@ -0,0 +1,476 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.test.OpenSearchTestCase; + +import javax.tools.Diagnostic; + +import static org.opensearch.common.annotation.processor.CompilerSupport.HasDiagnostic.matching; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; + +@SuppressWarnings("deprecation") +public class ApiAnnotationProcessorTests extends OpenSearchTestCase implements CompilerSupport { + public void testPublicApiMethodArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentNotAnnotatedGenerics() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodThrowsNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodThrowsNotAnnotated.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedException is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodThrowsNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentNotAnnotatedPackagePrivate() { + final CompilerResult result = compile("PublicApiMethodArgumentNotAnnotatedPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(4)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but does not have public or protected visibility " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedPackagePrivate)" + ) + ) + ) + ); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedPackagePrivate)" + ) + ) + ) + ); + } + + public void testPublicApiMethodArgumentAnnotatedPackagePrivate() { + final CompilerResult result = compile("PublicApiMethodArgumentAnnotatedPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.AnnotatedPackagePrivate is part of the public APIs but does not have public or protected visibility " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentAnnotatedPackagePrivate)" + ) + ) + ) + ); + } + + public void testPublicApiWithInternalApiMethod() { + final CompilerResult result = compile("PublicApiWithInternalApiMethod.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element method() is part of the public APIs but is marked as @InternalApi (referenced by org.opensearch.common.annotation.processor.PublicApiWithInternalApiMethod)" + ) + ) + ) + ); + } + + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public void testPublicApiConstructorArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiConstructorArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public void testPublicApiConstructorArgumentAnnotatedInternalApi() { + final CompilerResult result = compile("PublicApiConstructorArgumentAnnotatedInternalApi.java", "InternalApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiWithExperimentalApiMethod() { + final CompilerResult result = compile("PublicApiWithExperimentalApiMethod.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodReturnNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedArray() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedArray.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedArray)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedBoundedGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedBoundedGenerics.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedBoundedGenerics)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedAnnotation() { + final CompilerResult result = compile( + "PublicApiMethodReturnNotAnnotatedAnnotation.java", + "PublicApiAnnotated.java", + "NotAnnotatedAnnotation.java" + ); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedAnnotation)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnNotAnnotatedWildcardGenerics() { + final CompilerResult result = compile("PublicApiMethodReturnNotAnnotatedWildcardGenerics.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiWithPackagePrivateMethod() { + final CompilerResult result = compile("PublicApiWithPackagePrivateMethod.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodReturnSelf() { + final CompilerResult result = compile("PublicApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testExperimentalApiMethodReturnSelf() { + final CompilerResult result = compile("ExperimentalApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testDeprecatedApiMethodReturnSelf() { + final CompilerResult result = compile("DeprecatedApiMethodReturnSelf.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiPackagePrivate() { + final CompilerResult result = compile("PublicApiPackagePrivate.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.PublicApiPackagePrivate is part of the public APIs but does not have public or protected visibility" + ) + ) + ) + ); + } + + public void testPublicApiMethodGenericsArgumentNotAnnotated() { + final CompilerResult result = compile("PublicApiMethodGenericsArgumentNotAnnotated.java", "NotAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodGenericsArgumentNotAnnotated)" + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnAnnotatedArray() { + final CompilerResult result = compile("PublicApiMethodReturnAnnotatedArray.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiMethodGenericsArgumentAnnotated() { + final CompilerResult result = compile("PublicApiMethodGenericsArgumentAnnotated.java", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } + + public void testPublicApiAnnotatedNotOpensearch() { + final CompilerResult result = compileWithPackage("org.acme", "PublicApiAnnotated.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The type org.acme.PublicApiAnnotated is not residing in org.opensearch.* package and should not be annotated as OpenSearch APIs." + ) + ) + ) + ); + } + + public void testPublicApiMethodReturnAnnotatedGenerics() { + final CompilerResult result = compile( + "PublicApiMethodReturnAnnotatedGenerics.java", + "PublicApiAnnotated.java", + "NotAnnotatedAnnotation.java" + ); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(3)); + + assertThat( + failure.diagnotics(), + hasItem( + matching( + Diagnostic.Kind.ERROR, + containsString( + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnAnnotatedGenerics)" + ) + ) + ) + ); + } + + /** + * The type could expose protected inner types which are still considered to be a public API when used + */ + public void testPublicApiWithProtectedInterface() { + final CompilerResult result = compile("PublicApiWithProtectedInterface.java"); + assertThat(result, instanceOf(Failure.class)); + + final Failure failure = (Failure) result; + assertThat(failure.diagnotics(), hasSize(2)); + + assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR)))); + } +} diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java new file mode 100644 index 0000000000000..c8fdb3333a714 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/CompilerSupport.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import javax.tools.Diagnostic; +import javax.tools.DiagnosticCollector; +import javax.tools.JavaCompiler; +import javax.tools.JavaCompiler.CompilationTask; +import javax.tools.JavaFileObject; +import javax.tools.JavaFileObject.Kind; +import javax.tools.SimpleJavaFileObject; +import javax.tools.StandardJavaFileManager; +import javax.tools.ToolProvider; + +import java.io.IOException; +import java.io.InputStream; +import java.io.StringWriter; +import java.io.UncheckedIOException; +import java.net.URI; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +interface CompilerSupport { + default CompilerResult compile(String name, String... names) { + return compileWithPackage(ApiAnnotationProcessorTests.class.getPackageName(), name, names); + } + + @SuppressWarnings("removal") + default CompilerResult compileWithPackage(String pck, String name, String... names) { + final JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); + final DiagnosticCollector<JavaFileObject> collector = new DiagnosticCollector<>(); + + try (StringWriter out = new StringWriter()) { + final StandardJavaFileManager fileManager = compiler.getStandardFileManager(collector, null, null); + final List<JavaFileObject> files = Stream.concat(Stream.of(name), Arrays.stream(names)) + .map(f -> asSource(pck, f)) + .collect(Collectors.toList()); + + final CompilationTask task = compiler.getTask(out, fileManager, collector, null, null, files); + task.setProcessors(Collections.singleton(new ApiAnnotationProcessor())); + + if (AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> task.call())) { + return new Success(); + } else { + return new Failure(collector.getDiagnostics()); + } + } catch (final IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static JavaFileObject asSource(String pkg, String name) { + final String resource = "/" + pkg.replaceAll("[.]", "/") + "/" + name; + final URL source = ApiAnnotationProcessorTests.class.getResource(resource); + + return new SimpleJavaFileObject(URI.create(source.toExternalForm()), Kind.SOURCE) { + @Override + public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException { + try (final InputStream in = ApiAnnotationProcessorTests.class.getResourceAsStream(resource)) { + return new String(in.readAllBytes(), StandardCharsets.UTF_8); + } + } + }; + } + + class CompilerResult {} + + class Success extends CompilerResult { + + } + + class Failure extends CompilerResult { + private final List<Diagnostic<? extends JavaFileObject>> diagnotics; + + Failure(List<Diagnostic<? extends JavaFileObject>> diagnotics) { + this.diagnotics = diagnotics; + } + + List<Diagnostic<? extends JavaFileObject>> diagnotics() { + return diagnotics; + } + } + + class HasDiagnostic extends TypeSafeMatcher<Diagnostic<? extends JavaFileObject>> { + private final Diagnostic.Kind kind; + private final Matcher<String> matcher; + + HasDiagnostic(final Diagnostic.Kind kind, final Matcher<String> matcher) { + this.kind = kind; + this.matcher = matcher; + } + + @Override + public void describeTo(Description description) { + description.appendText("diagnostic with kind ").appendValue(kind).appendText(" "); + + if (matcher != null) { + description.appendText(" and message "); + matcher.describeTo(description); + } + } + + @Override + protected boolean matchesSafely(Diagnostic<? extends JavaFileObject> item) { + if (!kind.equals(item.getKind())) { + return false; + } else if (matcher != null) { + return matcher.matches(item.getMessage(Locale.ROOT)); + } else { + return true; + } + } + + public static HasDiagnostic matching(final Diagnostic.Kind kind, final Matcher<String> matcher) { + return new HasDiagnostic(kind, matcher); + } + + public static HasDiagnostic matching(final Diagnostic.Kind kind) { + return new HasDiagnostic(kind, null); + } + } +} diff --git a/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java b/libs/common/src/test/java/org/opensearch/common/collect/IteratorsTests.java similarity index 98% rename from server/src/test/java/org/opensearch/common/collect/IteratorsTests.java rename to libs/common/src/test/java/org/opensearch/common/collect/IteratorsTests.java index 6ad272542dbb1..2578926454c0b 100644 --- a/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java +++ b/libs/common/src/test/java/org/opensearch/common/collect/IteratorsTests.java @@ -83,6 +83,7 @@ public void testRandomSingleton() { int numberOfIterators = randomIntBetween(1, 1000); int singletonIndex = randomIntBetween(0, numberOfIterators - 1); int value = randomInt(); + @SuppressWarnings("rawtypes") Iterator<Integer>[] iterators = new Iterator[numberOfIterators]; for (int i = 0; i < numberOfIterators; i++) { iterators[i] = i != singletonIndex ? empty() : singletonIterator(value); @@ -92,6 +93,7 @@ public void testRandomSingleton() { public void testRandomIterators() { int numberOfIterators = randomIntBetween(1, 1000); + @SuppressWarnings("rawtypes") Iterator<Integer>[] iterators = new Iterator[numberOfIterators]; List<Integer> values = new ArrayList<>(); for (int i = 0; i < numberOfIterators; i++) { diff --git a/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java new file mode 100644 index 0000000000000..e348fbf759bdd --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +public class T1Ha1Tests extends HashFunctionTestCase { + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private final byte[] scratch = new byte[8]; + + /** + * Inspired from the tests defined in the reference implementation: + * <a href="https://github.com/erthink/t1ha/blob/master/src/t1ha_selfcheck.c">t1ha_selfcheck.c</a> + */ + public void testSelfCheck() { + byte[] testPattern = { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + (byte) 0xFF, + 0x7F, + 0x3F, + 0x1F, + 0xF, + 8, + 16, + 32, + 64, + (byte) 0x80, + (byte) 0xFE, + (byte) 0xFC, + (byte) 0xF8, + (byte) 0xF0, + (byte) 0xE0, + (byte) 0xC0, + (byte) 0xFD, + (byte) 0xFB, + (byte) 0xF7, + (byte) 0xEF, + (byte) 0xDF, + (byte) 0xBF, + 0x55, + (byte) 0xAA, + 11, + 17, + 19, + 23, + 29, + 37, + 42, + 43, + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'g', + 'h', + 'i', + 'j', + 'k', + 'l', + 'm', + 'n', + 'o', + 'p', + 'q', + 'r', + 's', + 't', + 'u', + 'v', + 'w', + 'x' }; + + // Reference hashes when using {@link Math::unsignedMultiplyHigh} in the mux64 step. + // These values match the ones defined in the reference implementation: + // https://github.com/erthink/t1ha/blob/master/src/t1ha1_selfcheck.c#L51-L72 + long[] referenceUnsignedMultiplyHigh = { + 0L, + 0x6A580668D6048674L, + 0xA2FE904AFF0D0879L, + 0xE3AB9C06FAF4D023L, + 0x6AF1C60874C95442L, + 0xB3557E561A6C5D82L, + 0x0AE73C696F3D37C0L, + 0x5EF25F7062324941L, + 0x9B784F3B4CE6AF33L, + 0x6993BB206A74F070L, + 0xF1E95DF109076C4CL, + 0x4E1EB70C58E48540L, + 0x5FDD7649D8EC44E4L, + 0x559122C706343421L, + 0x380133D58665E93DL, + 0x9CE74296C8C55AE4L, + 0x3556F9A5757AB6D0L, + 0xF62751F7F25C469EL, + 0x851EEC67F6516D94L, + 0xED463EE3848A8695L, + 0xDC8791FEFF8ED3ACL, + 0x2569C744E1A282CFL, + 0xF90EB7C1D70A80B9L, + 0x68DFA6A1B8050A4CL, + 0x94CCA5E8210D2134L, + 0xF5CC0BEABC259F52L, + 0x40DBC1F51618FDA7L, + 0x0807945BF0FB52C6L, + 0xE5EF7E09DE70848DL, + 0x63E1DF35FEBE994AL, + 0x2025E73769720D5AL, + 0xAD6120B2B8A152E1L, + 0x2A71D9F13959F2B7L, + 0x8A20849A27C32548L, + 0x0BCBC9FE3B57884EL, + 0x0E028D255667AEADL, + 0xBE66DAD3043AB694L, + 0xB00E4C1238F9E2D4L, + 0x5C54BDE5AE280E82L, + 0x0E22B86754BC3BC4L, + 0x016707EBF858B84DL, + 0x990015FBC9E095EEL, + 0x8B9AF0A3E71F042FL, + 0x6AA56E88BD380564L, + 0xAACE57113E681A0FL, + 0x19F81514AFA9A22DL, + 0x80DABA3D62BEAC79L, + 0x715210412CABBF46L, + 0xD8FA0B9E9D6AA93FL, + 0x6C2FC5A4109FD3A2L, + 0x5B3E60EEB51DDCD8L, + 0x0A7C717017756FE7L, + 0xA73773805CA31934L, + 0x4DBD6BB7A31E85FDL, + 0x24F619D3D5BC2DB4L, + 0x3E4AF35A1678D636L, + 0x84A1A8DF8D609239L, + 0x359C862CD3BE4FCDL, + 0xCF3A39F5C27DC125L, + 0xC0FF62F8FD5F4C77L, + 0x5E9F2493DDAA166CL, + 0x17424152BE1CA266L, + 0xA78AFA5AB4BBE0CDL, + 0x7BFB2E2CEF118346L, + 0x647C3E0FF3E3D241L, + 0x0352E4055C13242EL, + 0x6F42FC70EB660E38L, + 0x0BEBAD4FABF523BAL, + 0x9269F4214414D61DL, + 0x1CA8760277E6006CL, + 0x7BAD25A859D87B5DL, + 0xAD645ADCF7414F1DL, + 0xB07F517E88D7AFB3L, + 0xB321C06FB5FFAB5CL, + 0xD50F162A1EFDD844L, + 0x1DFD3D1924FBE319L, + 0xDFAEAB2F09EF7E78L, + 0xA7603B5AF07A0B1EL, + 0x41CD044C0E5A4EE3L, + 0xF64D2F86E813BF33L, + 0xFF9FDB99305EB06AL }; + + // Reference hashes when using {@link Math::multiplyHigh} in the mux64 step. + long[] referenceMultiplyHigh = { + 0L, + 0xCE510B7405E0A2CAL, + 0xC0A2DA74A8271FCBL, + 0x1C549C06FAF4D023L, + 0x084CDA0ED41CD2D4L, + 0xD05BA7AA9FEECE5BL, + 0x7D6128AB2CCC4EB1L, + 0x62332FA6EC1B50AAL, + 0x1B66C81767870EF2L, + 0xEC6B92A37AED73B8L, + 0x1712987232EF4ED3L, + 0xAA503A04AE2450B5L, + 0x15D25DE445730A6CL, + 0xAB87E38AA8D21746L, + 0x18CAE735BBF62D15L, + 0x0D56DFF9914CA656L, + 0xCB4F5859A9AE5B52L, + 0xEE97003F7B1283E1L, + 0x50CFB2AF0F54BA6DL, + 0x570B4D6AE4C67814L, + 0x1ED59274A97497EBL, + 0x8608D03D165C59BFL, + 0x6CBE0E537BE04C02L, + 0xD4C8FCFD4179A874L, + 0xFB4E677D876118A1L, + 0x6B1A96F1B4765D79L, + 0x1075B9B89BDFE5F8L, + 0x02771D08F2891CB1L, + 0x4BB8E16FF410F19EL, + 0x3EB7849C0DFAF566L, + 0x173B09359DE422CFL, + 0xFE212C6DB7474306L, + 0xA74E7C2D632664EFL, + 0x56ECDED6546F0914L, + 0x08DEF866EF20A94BL, + 0x7D0BAC64606521F1L, + 0xCA6BA9817A357FA9L, + 0x0873B834A6E2AAE4L, + 0x45EE02D6DCF8992EL, + 0x3EA060225B3E1C1FL, + 0x24DBB6D02D5CC531L, + 0xE5E91A7340BF9382L, + 0x28975F86E2E2177FL, + 0x80E48374A6B42E85L, + 0xDF40392265BB4A66L, + 0x43750475A48C7023L, + 0x5648BD3E391C01D3L, + 0x9BE9E11AD1A6C369L, + 0x2E079CB8C1A11F50L, + 0xB2D538403F1020F1L, + 0x297518A4EF6AF5F1L, + 0xA8CE1B90167A6F8BL, + 0xB926B2FA50541BA9L, + 0xC46A2D3BD6925A35L, + 0x3071BC8E6C400487L, + 0x300D3885894BA47FL, + 0x840BFF3BEB7EEADDL, + 0xDC9E04DF744BDC0CL, + 0xBE01CF6841412C77L, + 0x6C55B2DC74B816A1L, + 0x4D4C63128A344F82L, + 0xC6227497E100B463L, + 0x53C9987705EA71C0L, + 0x3E355394668C3559L, + 0x05984B7D358B107AL, + 0x4D32FA1D79002A57L, + 0x910B0DAD1440EC24L, + 0x025BDE6A7BEBF320L, + 0x0D33817EF345D999L, + 0xBA0DE64B3F4DB34AL, + 0x54666461D0EB4FD7L, + 0x746ECFA92D1CAF81L, + 0x6E6A774ACD266DF2L, + 0x1A86161AE8E82A85L, + 0xFFF7C351A4CEC13DL, + 0xFFF05844F57498B8L, + 0x8DB71789127C6C13L, + 0x4A52ACF805F370ABL, + 0xFE13F90A1ACFBD58L, + 0x615730E301ED12E2L, + 0x1A2D4AA43B6C0103L }; + + long[] reference = hasUnsignedMultiplyHigh() ? referenceUnsignedMultiplyHigh : referenceMultiplyHigh; + + int offset = 0; + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, 0L)); // empty-zero + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, ~0L)); // empty-all1 + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, 64, 0L)); // bin64-zero + + long seed = 1; + for (int i = 1; i < 64; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, i, seed)); // bin%i-1p%i + seed <<= 1; + } + + seed = ~0L; + for (int i = 1; i <= 7; i++) { + seed <<= 1; + assertEquals(reference[offset++], T1ha1.hash(testPattern, i, 64 - i, seed)); // align%i_F%i + } + + byte[] testPatternLong = new byte[512]; + for (int i = 0; i < testPatternLong.length; i++) { + testPatternLong[i] = (byte) i; + } + for (int i = 0; i <= 7; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPatternLong, i, 128 + i * 17, seed)); // long-%05i + } + } + + @Override + public byte[] hash(byte[] input) { + long hash = T1ha1.hash(input, 0, input.length); + LONG_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 64; + } + + private static boolean hasUnsignedMultiplyHigh() { + try { + MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return true; + } catch (NoSuchMethodException e) { + return false; + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/test/java/org/opensearch/common/network/CidrsTests.java b/libs/common/src/test/java/org/opensearch/common/network/CidrsTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/CidrsTests.java rename to libs/common/src/test/java/org/opensearch/common/network/CidrsTests.java diff --git a/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java b/libs/common/src/test/java/org/opensearch/common/network/InetAddressesTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/InetAddressesTests.java rename to libs/common/src/test/java/org/opensearch/common/network/InetAddressesTests.java diff --git a/server/src/test/java/org/opensearch/common/network/NetworkAddressTests.java b/libs/common/src/test/java/org/opensearch/common/network/NetworkAddressTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/NetworkAddressTests.java rename to libs/common/src/test/java/org/opensearch/common/network/NetworkAddressTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java b/libs/common/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java rename to libs/common/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java diff --git a/server/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java diff --git a/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java new file mode 100644 index 0000000000000..ad19f456b0df4 --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/round/RoundableTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.round; + +import org.opensearch.test.OpenSearchTestCase; + +public class RoundableTests extends OpenSearchTestCase { + + public void testRoundingEmptyArray() { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> RoundableFactory.create(new long[0], 0)); + assertEquals("at least one value must be present", throwable.getMessage()); + } + + public void testRoundingSmallArray() { + int size = randomIntBetween(1, 64); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + + assertEquals("BidirectionalLinearSearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } + + public void testRoundingLargeArray() { + int size = randomIntBetween(65, 256); + long[] values = randomArrayOfSortedValues(size); + Roundable roundable = RoundableFactory.create(values, size); + + boolean useBtreeSearcher = "forced".equalsIgnoreCase(System.getProperty("opensearch.experimental.feature.simd.rounding.enabled")); + assertEquals(useBtreeSearcher ? "BtreeSearcher" : "BinarySearcher", roundable.getClass().getSimpleName()); + assertRounding(roundable, values, size); + } + + private void assertRounding(Roundable roundable, long[] values, int size) { + for (int i = 0; i < 100000; i++) { + // Index of the expected round-down point. + int idx = randomIntBetween(0, size - 1); + + // Value of the expected round-down point. + long expected = values[idx]; + + // Delta between the expected and the next round-down point. + long delta = (idx < size - 1) ? (values[idx + 1] - values[idx]) : 200; + + // Adding a random delta between 0 (inclusive) and delta (exclusive) to the expected + // round-down point, which will still floor to the same value. + long key = expected + (randomNonNegativeLong() % delta); + + assertEquals(expected, roundable.floor(key)); + } + + Throwable throwable = assertThrows(AssertionError.class, () -> roundable.floor(values[0] - 1)); + assertEquals("key must be greater than or equal to " + values[0], throwable.getMessage()); + } + + private static long[] randomArrayOfSortedValues(int size) { + int capacity = size + randomInt(20); // May be slightly more than the size. + long[] values = new long[capacity]; + + for (int i = 1; i < size; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 200) + 1; + } + + return values; + } +} diff --git a/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java b/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java new file mode 100644 index 0000000000000..bc16fd996e69d --- /dev/null +++ b/libs/common/src/test/resources/org/acme/PublicApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.acme; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy b/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..e0a183b7eac88 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // allow to use JVM tooling (Java Compiler) in tests for annotation processing + permission java.io.FilePermission "${java.home}/lib/*", "read"; + permission java.io.FilePermission "${java.home}/lib/modules/*", "read"; + permission java.lang.RuntimePermission "accessSystemModules"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "accessClassInPackage.*"; +}; diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java new file mode 100644 index 0000000000000..7c5b6f6ea2f51 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/DeprecatedApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.DeprecatedApi; + +@DeprecatedApi(since = "1.0.0") +public class DeprecatedApiMethodReturnSelf { + public DeprecatedApiMethodReturnSelf method() { + return new DeprecatedApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java new file mode 100644 index 0000000000000..5be07e22c811f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; + +@ExperimentalApi +public class ExperimentalApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java new file mode 100644 index 0000000000000..cde8f4f254faf --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/ExperimentalApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; + +@ExperimentalApi +public class ExperimentalApiMethodReturnSelf { + public ExperimentalApiMethodReturnSelf method() { + return new ExperimentalApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java new file mode 100644 index 0000000000000..9996ba8b736aa --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class InternalApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java new file mode 100644 index 0000000000000..ec16ce926ea86 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotated.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +public class NotAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java new file mode 100644 index 0000000000000..a3e9c4f576d92 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedAnnotation.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Target; + +@Target({ + ElementType.TYPE, + ElementType.TYPE_PARAMETER, + ElementType.TYPE_USE, + ElementType.PACKAGE, + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.PARAMETER, + ElementType.FIELD, + ElementType.ANNOTATION_TYPE, + ElementType.MODULE }) +public @interface NotAnnotatedAnnotation { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java new file mode 100644 index 0000000000000..0aadaf8f9bf31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/NotAnnotatedException.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +public class NotAnnotatedException extends Exception { + private static final long serialVersionUID = 1L; +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java new file mode 100644 index 0000000000000..b2a7f03cb2d31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiAnnotated { + +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java new file mode 100644 index 0000000000000..6bea2961a14e6 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentAnnotatedInternalApi.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiConstructorArgumentAnnotatedInternalApi { + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public PublicApiConstructorArgumentAnnotatedInternalApi(InternalApiAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java new file mode 100644 index 0000000000000..6c7481d9978cd --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorArgumentNotAnnotated.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiConstructorArgumentNotAnnotated { + /** + * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi} + */ + public PublicApiConstructorArgumentNotAnnotated(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java new file mode 100644 index 0000000000000..5dae56a7cd7d3 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentAnnotatedPackagePrivate.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentAnnotatedPackagePrivate { + public void method(AnnotatedPackagePrivate arg) {} +} + +// The public API exposes this class through public method argument, it should be public +@PublicApi(since = "1.0.0") +class AnnotatedPackagePrivate {} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java new file mode 100644 index 0000000000000..ddfec939f79e8 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotated { + public void method(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java new file mode 100644 index 0000000000000..d32502831d299 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedGenerics.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotatedGenerics { + public void method(Collection<? super NotAnnotated> arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java new file mode 100644 index 0000000000000..d4fb31b172ef2 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodArgumentNotAnnotatedPackagePrivate.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodArgumentNotAnnotatedPackagePrivate { + public void method(NotAnnotatedPackagePrivate arg) {} +} + +// The public API exposes this class through public method argument, it should be annotated and be public +class NotAnnotatedPackagePrivate {} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java new file mode 100644 index 0000000000000..9715748cfa659 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodGenericsArgumentAnnotated { + public <T extends PublicApiAnnotated> void method(T arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java new file mode 100644 index 0000000000000..f149c1f34b067 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodGenericsArgumentNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodGenericsArgumentNotAnnotated { + public <T extends NotAnnotated> void method(T arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java new file mode 100644 index 0000000000000..39b7e146fe1e7 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedArray.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnAnnotatedArray { + public PublicApiAnnotated[] method() { + return new PublicApiAnnotated[0]; + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java new file mode 100644 index 0000000000000..2171eccee2f31 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnAnnotatedGenerics.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +import org.acme.PublicApiAnnotated; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnAnnotatedGenerics { + public Collection<@NotAnnotatedAnnotation PublicApiAnnotated> method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java new file mode 100644 index 0000000000000..725d06072d0ea --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotated.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotated { + public NotAnnotated method() { + return new NotAnnotated(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java new file mode 100644 index 0000000000000..b684e36a53da1 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedAnnotation.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedAnnotation { + public @NotAnnotatedAnnotation PublicApiAnnotated method() { + return new PublicApiAnnotated(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java new file mode 100644 index 0000000000000..e4c541dcea57f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedArray.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedArray { + public NotAnnotated[] method() { + return new NotAnnotated[0]; + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java new file mode 100644 index 0000000000000..0646faf152610 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedBoundedGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedBoundedGenerics { + public Collection<? extends NotAnnotated> method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java new file mode 100644 index 0000000000000..2227883c707d0 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedGenerics { + public Collection<NotAnnotated> method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java new file mode 100644 index 0000000000000..f2818ebb23c4a --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnNotAnnotatedWildcardGenerics.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +import java.util.Collection; +import java.util.Collections; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnNotAnnotatedWildcardGenerics { + public Collection<?> method() { + return Collections.emptyList(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java new file mode 100644 index 0000000000000..883471b23ae0f --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodReturnSelf.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodReturnSelf { + public PublicApiMethodReturnSelf method() { + return new PublicApiMethodReturnSelf(); + } +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java new file mode 100644 index 0000000000000..496b243276565 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiMethodThrowsNotAnnotated.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiMethodThrowsNotAnnotated { + public void method(PublicApiAnnotated arg) throws NotAnnotatedException {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java new file mode 100644 index 0000000000000..88c20e7f4c8f1 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiPackagePrivate.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +class PublicApiPackagePrivate { + void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java new file mode 100644 index 0000000000000..faaaa1d9f4051 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithExperimentalApiMethod.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithExperimentalApiMethod { + @ExperimentalApi + public void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java new file mode 100644 index 0000000000000..5bfa3c9f3e008 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithInternalApiMethod.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithInternalApiMethod { + // The public API exposes internal API method, it should be public API + @InternalApi + public void method() {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java new file mode 100644 index 0000000000000..1345467423530 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithPackagePrivateMethod.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithPackagePrivateMethod { + void method(NotAnnotated arg) {} +} diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java new file mode 100644 index 0000000000000..222ae01fd15e6 --- /dev/null +++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiWithProtectedInterface.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.annotation.processor; + +import org.opensearch.common.annotation.PublicApi; + +@PublicApi(since = "1.0.0") +public class PublicApiWithProtectedInterface { + public void method(ProtectedInterface iface) {} + + /** + * The type could expose protected inner types which are still considered to be a public API when used + */ + @PublicApi(since = "1.0.0") + protected interface ProtectedInterface {} +} diff --git a/libs/compress/build.gradle b/libs/compress/build.gradle new file mode 100644 index 0000000000000..7a5bc2f573dea --- /dev/null +++ b/libs/compress/build.gradle @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +base { + archivesName = 'opensearch-compress' +} + +dependencies { + api project(':libs:opensearch-common') + api project(':libs:opensearch-core') + + //zstd + api "com.github.luben:zstd-jni:${versions.zstd}" + + testImplementation(project(":test:framework")) { + // tests use the locally compiled version of server + exclude group: 'org.opensearch', module: 'opensearch-compress' + } +} + +tasks.named('forbiddenApisMain').configure { + // :libs:opensearch-compress does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server + replaceSignatureFiles 'jdk-signatures' +} + +jarHell.enabled = false diff --git a/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 b/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 new file mode 100644 index 0000000000000..498c60c34e3da --- /dev/null +++ b/libs/compress/licenses/zstd-jni-1.5.5-5.jar.sha1 @@ -0,0 +1 @@ +74ffdc5f140080adacf5278287aadd950179f848 \ No newline at end of file diff --git a/server/licenses/zstd-jni-LICENSE.txt b/libs/compress/licenses/zstd-jni-LICENSE.txt similarity index 100% rename from server/licenses/zstd-jni-LICENSE.txt rename to libs/compress/licenses/zstd-jni-LICENSE.txt diff --git a/server/licenses/zstd-jni-NOTICE.txt b/libs/compress/licenses/zstd-jni-NOTICE.txt similarity index 100% rename from server/licenses/zstd-jni-NOTICE.txt rename to libs/compress/licenses/zstd-jni-NOTICE.txt diff --git a/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java new file mode 100644 index 0000000000000..e2a740f72be93 --- /dev/null +++ b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.compress; + +import com.github.luben.zstd.RecyclingBufferPool; +import com.github.luben.zstd.ZstdInputStreamNoFinalizer; +import com.github.luben.zstd.ZstdOutputStreamNoFinalizer; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; + +/** + * {@link Compressor} implementation based on the ZSTD compression algorithm. + * + * @opensearch.api - registered name requires BWC support + * @opensearch.experimental - class methods might change + */ +public class ZstdCompressor implements Compressor { + + /** + * An arbitrary header that we use to identify compressed streams + * It needs to be different from other compressors and to not be specific + * enough so that no stream starting with these bytes could be detected as + * a XContent + * */ + private static final byte[] HEADER = new byte[] { 'Z', 'S', 'T', 'D', '\0' }; + + /** + * The name to register the compressor by + * + * @opensearch.api - requires BWC support + */ + @PublicApi(since = "2.10.0") + public static final String NAME = "ZSTD"; + + /** + * The compression level for {@link ZstdOutputStreamNoFinalizer} + */ + private static final int LEVEL = 3; + + /** The buffer size for {@link BufferedInputStream} and {@link BufferedOutputStream} + */ + private static final int BUFFER_SIZE = 4096; + + /** + * Compares the given bytes with the {@link ZstdCompressor#HEADER} of a compressed stream + * @param bytes the bytes to compare to ({@link ZstdCompressor#HEADER}) + * @return true if the bytes are the {@link ZstdCompressor#HEADER}, false otherwise + */ + @Override + public boolean isCompressed(BytesReference bytes) { + if (bytes.length() < HEADER.length) { + return false; + } + for (int i = 0; i < HEADER.length; ++i) { + if (bytes.get(i) != HEADER[i]) { + return false; + } + } + return true; + } + + /** + * Returns the length of the {@link ZstdCompressor#HEADER} + * @return the {@link ZstdCompressor#HEADER} length + */ + @Override + public int headerLength() { + return HEADER.length; + } + + /** + * Returns a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @param in the compressed {@link InputStream} + * @return a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @throws IOException if an I/O error occurs + * @throws IllegalArgumentException if the input stream is not compressed with ZSTD + */ + @Override + public InputStream threadLocalInputStream(InputStream in) throws IOException { + final byte[] header = in.readNBytes(HEADER.length); + if (Arrays.equals(header, HEADER) == false) { + throw new IllegalArgumentException("Input stream is not compressed with ZSTD!"); + } + return new ZstdInputStreamNoFinalizer(new BufferedInputStream(in, BUFFER_SIZE), RecyclingBufferPool.INSTANCE); + } + + /** + * Returns a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @param out the {@link OutputStream} + * @return a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @throws IOException if an I/O error occurs + */ + @Override + public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { + out.write(HEADER); + return new ZstdOutputStreamNoFinalizer(new BufferedOutputStream(out, BUFFER_SIZE), RecyclingBufferPool.INSTANCE, LEVEL); + } + + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to uncompress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + * @throws IOException is never thrown + */ + @Override + public BytesReference uncompress(BytesReference bytesReference) throws IOException { + throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); + } + + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to compress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + */ + @Override + public BytesReference compress(BytesReference bytesReference) throws IOException { + throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); + } +} diff --git a/libs/compress/src/main/java/org/opensearch/compress/package-info.java b/libs/compress/src/main/java/org/opensearch/compress/package-info.java new file mode 100644 index 0000000000000..3ffa53079fa69 --- /dev/null +++ b/libs/compress/src/main/java/org/opensearch/compress/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Concrete {@link org.opensearch.core.compress.Compressor} implementations + */ +package org.opensearch.compress; diff --git a/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java new file mode 100644 index 0000000000000..f0c6969377d78 --- /dev/null +++ b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.compress.spi; + +import org.opensearch.compress.ZstdCompressor; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.spi.CompressorProvider; + +import java.util.AbstractMap.SimpleEntry; +import java.util.List; +import java.util.Map.Entry; + +/** + * Additional "optional" compressor implementations provided by the opensearch compress library + * + * @opensearch.internal + */ +public class CompressionProvider implements CompressorProvider { + + /** + * Returns the concrete {@link Compressor}s provided by the compress library + * @return a list of {@link Compressor}s + * */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Override + public List<Entry<String, Compressor>> getCompressors() { + return List.of(new SimpleEntry<>(ZstdCompressor.NAME, new ZstdCompressor())); + } +} diff --git a/libs/compress/src/main/java/org/opensearch/compress/spi/package-info.java b/libs/compress/src/main/java/org/opensearch/compress/spi/package-info.java new file mode 100644 index 0000000000000..47d982a7ca2f9 --- /dev/null +++ b/libs/compress/src/main/java/org/opensearch/compress/spi/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Service Provider Interface for registering concrete {@link org.opensearch.core.compress.Compressor} + * implementations. + * + * See {@link org.opensearch.compress.ZstdCompressor} + */ +package org.opensearch.compress.spi; diff --git a/libs/compress/src/main/java/org/opensearch/package-info.java b/libs/compress/src/main/java/org/opensearch/package-info.java new file mode 100644 index 0000000000000..264680e9cb271 --- /dev/null +++ b/libs/compress/src/main/java/org/opensearch/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This is the compress library for registering optional + * {@link org.opensearch.core.compress.Compressor} implementations + */ +package org.opensearch; diff --git a/libs/compress/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider b/libs/compress/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider new file mode 100644 index 0000000000000..a9ea063e24436 --- /dev/null +++ b/libs/compress/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +org.opensearch.compress.spi.CompressionProvider diff --git a/libs/compress/src/test/java/org/opensearch/compress/ZstdCompressTests.java b/libs/compress/src/test/java/org/opensearch/compress/ZstdCompressTests.java new file mode 100644 index 0000000000000..54864054a0e02 --- /dev/null +++ b/libs/compress/src/test/java/org/opensearch/compress/ZstdCompressTests.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.compress; + +import org.opensearch.core.compress.Compressor; +import org.opensearch.test.core.compress.AbstractCompressorTestCase; + +/** + * Test streaming compression + */ +public class ZstdCompressTests extends AbstractCompressorTestCase { + + private final Compressor compressor = new ZstdCompressor(); + + @Override + protected Compressor compressor() { + return compressor; + } +} diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 46b6f4471731f..0cf2cd0bf92b6 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -33,46 +33,7 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' base { - archivesBaseName = 'opensearch-core' -} - -// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs -if (!isEclipse) { - sourceSets { - java11 { - java { - srcDirs = ['src/main/java11'] - } - } - } - - configurations { - java11Compile.extendsFrom(compile) - } - - dependencies { - java11Implementation sourceSets.main.output - } - - compileJava11Java { - sourceCompatibility = JavaVersion.VERSION_11 - targetCompatibility = JavaVersion.VERSION_11 - } - - forbiddenApisJava11 { - if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) { - targetCompatibility = JavaVersion.VERSION_11 - } - replaceSignatureFiles 'jdk-signatures' - } - - jar { - metaInf { - into 'versions/11' - from sourceSets.java11.output - } - manifest.attributes('Multi-Release': 'true') - } + archivesName = 'opensearch-core' } dependencies { diff --git a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 b/libs/core/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/core/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.2.jar.sha1 b/libs/core/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/libs/core/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 b/libs/core/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.21.0.jar.sha1 b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/libs/core/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Build.java b/libs/core/src/main/java/org/opensearch/Build.java index 67a50a8a31a0e..b5d67f5501725 100644 --- a/libs/core/src/main/java/org/opensearch/Build.java +++ b/libs/core/src/main/java/org/opensearch/Build.java @@ -216,7 +216,7 @@ public String getDistribution() { /** * Get the version as considered at build time - * + * <p> * Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. diff --git a/libs/core/src/main/java/org/opensearch/ExceptionsHelper.java b/libs/core/src/main/java/org/opensearch/ExceptionsHelper.java index a03b2f94b27fe..9692d20a050ff 100644 --- a/libs/core/src/main/java/org/opensearch/ExceptionsHelper.java +++ b/libs/core/src/main/java/org/opensearch/ExceptionsHelper.java @@ -33,16 +33,17 @@ package org.opensearch; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; -import org.opensearch.core.common.compress.NotXContentException; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.compress.NotXContentException; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; diff --git a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java index 32eae654cf975..5d8e067a8fd8b 100644 --- a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java +++ b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java @@ -40,7 +40,7 @@ /** * The Contents of this file were originally moved from {@link Version}. - * + * <p> * This class keeps all the supported OpenSearch predecessor versions for * backward compatibility purpose. * diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index f75a225af1b4d..cce86b452f698 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -34,20 +34,20 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.Strings; import org.opensearch.core.common.logging.LoggerMessageFormat; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -61,12 +61,11 @@ import java.util.concurrent.ConcurrentHashMap; import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; import static org.opensearch.OpenSearchException.OpenSearchExceptionHandleRegistry.registerExceptionHandle; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; -import static java.util.Collections.singletonMap; - /** * A core library base class for all opensearch exceptions. * @@ -118,6 +117,14 @@ public class OpenSearchException extends RuntimeException implements Writeable, UNKNOWN_VERSION_ADDED ) ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.OpenSearchParseException.class, + org.opensearch.OpenSearchParseException::new, + 35, + UNKNOWN_VERSION_ADDED + ) + ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.core.common.ParsingException.class, @@ -134,6 +141,22 @@ public class OpenSearchException extends RuntimeException implements Writeable, UNKNOWN_VERSION_ADDED ) ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.core.common.breaker.CircuitBreakingException.class, + org.opensearch.core.common.breaker.CircuitBreakingException::new, + 133, + UNKNOWN_VERSION_ADDED + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.core.tasks.TaskCancelledException.class, + org.opensearch.core.tasks.TaskCancelledException::new, + 146, + UNKNOWN_VERSION_ADDED + ) + ); } /** @@ -145,7 +168,7 @@ public OpenSearchException(Throwable cause) { /** * Construct a <code>OpenSearchException</code> with the specified detail message. - * + * <p> * The message can be parameterized using <code>{}</code> as placeholders for the given * arguments * @@ -159,7 +182,7 @@ public OpenSearchException(String msg, Object... args) { /** * Construct a <code>OpenSearchException</code> with the specified detail message * and nested exception. - * + * <p> * The message can be parameterized using <code>{}</code> as placeholders for the given * arguments * @@ -564,7 +587,7 @@ public static OpenSearchException innerFromXContent(XContentParser parser, boole * Static toXContent helper method that renders {@link OpenSearchException} or {@link Throwable} instances * as XContent, delegating the rendering to {@link OpenSearchException#toXContent(XContentBuilder, ToXContent.Params)} * or {@link #innerToXContent(XContentBuilder, ToXContent.Params, Throwable, String, String, Map, Map, Throwable)}. - * + * <p> * This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can * be parsed back using the {@code OpenSearchException.fromXContent(XContentParser)} method. */ @@ -583,7 +606,7 @@ public static void generateThrowableXContent(XContentBuilder builder, ToXContent * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack * trace. - * + * <p> * This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed * by the {@code #OpenSearchException.failureFromXContent(XContentParser)} method. */ diff --git a/server/src/main/java/org/opensearch/OpenSearchParseException.java b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java similarity index 94% rename from server/src/main/java/org/opensearch/OpenSearchParseException.java rename to libs/core/src/main/java/org/opensearch/OpenSearchParseException.java index c2516402b0d30..26aff04b30a56 100644 --- a/server/src/main/java/org/opensearch/OpenSearchParseException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchParseException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -40,8 +41,9 @@ /** * Unchecked exception that is translated into a {@code 400 BAD REQUEST} error when it bubbles out over HTTP. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchParseException extends OpenSearchException { public OpenSearchParseException(String msg, Object... args) { diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 9329f221922ea..66ba446d4fc54 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -33,6 +33,7 @@ package org.opensearch; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,6 +51,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Version implements Comparable<Version>, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA @@ -90,8 +92,16 @@ public class Version implements Comparable<Version>, ToXContentFragment { public static final Version V_2_8_0 = new Version(2080099, org.apache.lucene.util.Version.LUCENE_9_6_0); public static final Version V_2_8_1 = new Version(2080199, org.apache.lucene.util.Version.LUCENE_9_6_0); public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_9_1 = new Version(2090199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); + public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); + public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/libs/core/src/main/java/org/opensearch/core/ParseField.java b/libs/core/src/main/java/org/opensearch/core/ParseField.java index 5741f97d1d335..171b8eaf5c397 100644 --- a/libs/core/src/main/java/org/opensearch/core/ParseField.java +++ b/libs/core/src/main/java/org/opensearch/core/ParseField.java @@ -31,6 +31,7 @@ package org.opensearch.core; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentLocation; @@ -43,7 +44,11 @@ /** * Holds a field that can be found in a request while parsing and its different * variants, which may be deprecated. + * + * @opensearch.api + * */ +@PublicApi(since = "1.0.0") public class ParseField { private final String name; private final String[] deprecatedNames; diff --git a/server/src/main/java/org/opensearch/action/ActionListener.java b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java similarity index 98% rename from server/src/main/java/org/opensearch/action/ActionListener.java rename to libs/core/src/main/java/org/opensearch/core/action/ActionListener.java index 645ed4deec006..4fd55898a2cb5 100644 --- a/server/src/main/java/org/opensearch/action/ActionListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java @@ -30,13 +30,14 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.core.action; import org.opensearch.ExceptionsHelper; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.List; @@ -48,6 +49,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ActionListener<Response> { /** * Handle action response. This response may constitute a failure or a @@ -152,9 +154,9 @@ static <Response> ActionListener<Response> wrap(Runnable runnable) { /** * Creates a listener that wraps another listener, mapping response values via the given mapping function and passing along * exceptions to the delegate. - * + * <p> * Notice that it is considered a bug if the listener's onResponse or onFailure fails. onResponse failures will not call onFailure. - * + * <p> * If the function fails, the listener's onFailure handler will be called. The principle is that the mapped listener will handle * exceptions from the mapping function {@code fn} but it is the responsibility of {@code delegate} to handle its own exceptions * inside `onResponse` and `onFailure`. @@ -332,7 +334,7 @@ protected void innerOnFailure(Exception e) { /** * Completes the given listener with the result from the provided supplier accordingly. * This method is mainly used to complete a listener with a block of synchronous code. - * + * <p> * If the supplier fails, the listener's onFailure handler will be called. * It is the responsibility of {@code delegate} to handle its own exceptions inside `onResponse` and `onFailure`. */ diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java similarity index 89% rename from server/src/main/java/org/opensearch/action/ActionResponse.java rename to libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java index fd13971433d8b..7525bfb243aae 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionResponse.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.core.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; @@ -42,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ActionResponse extends TransportResponse { public ActionResponse() {} diff --git a/server/src/main/java/org/opensearch/action/NotifyOnceListener.java b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java similarity index 93% rename from server/src/main/java/org/opensearch/action/NotifyOnceListener.java rename to libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java index cc625962e54f6..f087322e0024c 100644 --- a/server/src/main/java/org/opensearch/action/NotifyOnceListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/NotifyOnceListener.java @@ -30,7 +30,9 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.core.action; + +import org.opensearch.common.annotation.PublicApi; import java.util.concurrent.atomic.AtomicBoolean; @@ -39,8 +41,9 @@ * the is called is only called once. Subclasses should implement notification logic with * innerOnResponse and innerOnFailure. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class NotifyOnceListener<Response> implements ActionListener<Response> { private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); diff --git a/libs/core/src/main/java/org/opensearch/core/action/ShardOperationFailedException.java b/libs/core/src/main/java/org/opensearch/core/action/ShardOperationFailedException.java index 9fdd4ae273a8b..7456dcd335f72 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ShardOperationFailedException.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ShardOperationFailedException.java @@ -34,8 +34,8 @@ import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContentObject; import java.util.Objects; diff --git a/libs/core/src/main/java/org/opensearch/core/action/support/DefaultShardOperationFailedException.java b/libs/core/src/main/java/org/opensearch/core/action/support/DefaultShardOperationFailedException.java index ccb35ad5c6c6f..777f8d04758d0 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/support/DefaultShardOperationFailedException.java +++ b/libs/core/src/main/java/org/opensearch/core/action/support/DefaultShardOperationFailedException.java @@ -34,15 +34,15 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java b/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java index d2cd7a3170792..b6dc7dc928b3e 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java +++ b/libs/core/src/main/java/org/opensearch/core/common/ParsingException.java @@ -35,10 +35,10 @@ import org.opensearch.OpenSearchException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/libs/core/src/main/java/org/opensearch/core/common/Strings.java b/libs/core/src/main/java/org/opensearch/core/common/Strings.java index d6c484e5e4746..8fdec670bd9f2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/Strings.java +++ b/libs/core/src/main/java/org/opensearch/core/common/Strings.java @@ -8,9 +8,18 @@ package org.opensearch.core.common; +import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.BufferedReader; +import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; import java.util.Arrays; @@ -24,9 +33,12 @@ import java.util.TreeSet; import java.util.function.Supplier; +import static java.util.Collections.unmodifiableSet; +import static org.opensearch.common.util.set.Sets.newHashSet; + /** * String utility class. - * + * <p> * TODO replace Strings in :server * * @opensearch.internal @@ -34,115 +46,29 @@ public class Strings { public static final String UNKNOWN_UUID_VALUE = "_na_"; public static final String[] EMPTY_ARRAY = new String[0]; + public static final Set<Character> INVALID_FILENAME_CHARS = unmodifiableSet( + newHashSet('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',') + ); - /** - * Split the specified string by commas to an array. - * - * @param s the string to split - * @return the array of split values - * @see String#split(String) - */ - public static String[] splitStringByCommaToArray(final String s) { - if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY; - else return s.split(","); - } - - /** - * Convenience method to return a Collection as a delimited (e.g. CSV) - * String. E.g. useful for <code>toString()</code> implementations. - * - * @param coll the Collection to display - * @param delim the delimiter to use (probably a ",") - * @param prefix the String to start each element with - * @param suffix the String to end each element with - * @return the delimited String - */ - public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix) { - StringBuilder sb = new StringBuilder(); - collectionToDelimitedString(coll, delim, prefix, suffix, sb); - return sb.toString(); - } - - public static void collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) { - Iterator<?> it = coll.iterator(); - while (it.hasNext()) { - sb.append(prefix).append(it.next()).append(suffix); - if (it.hasNext()) { - sb.append(delim); - } - } - } - - /** - * Convenience method to return a Collection as a delimited (e.g. CSV) - * String. E.g. useful for <code>toString()</code> implementations. - * - * @param coll the Collection to display - * @param delim the delimiter to use (probably a ",") - * @return the delimited String - */ - public static String collectionToDelimitedString(Iterable<?> coll, String delim) { - return collectionToDelimitedString(coll, delim, "", ""); - } + // no instance: + private Strings() {} - /** - * Convenience method to return a Collection as a CSV String. - * E.g. useful for <code>toString()</code> implementations. - * - * @param coll the Collection to display - * @return the delimited String - */ - public static String collectionToCommaDelimitedString(Iterable<?> coll) { - return collectionToDelimitedString(coll, ","); - } + // --------------------------------------------------------------------- + // General convenience methods for working with Strings + // --------------------------------------------------------------------- - /** - * Convenience method to return a String array as a delimited (e.g. CSV) - * String. E.g. useful for <code>toString()</code> implementations. - * - * @param arr the array to display - * @param delim the delimiter to use (probably a ",") - * @return the delimited String - */ - public static String arrayToDelimitedString(Object[] arr, String delim) { - StringBuilder sb = new StringBuilder(); - arrayToDelimitedString(arr, delim, sb); - return sb.toString(); - } - - public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { - if (isEmpty(arr)) { - return; - } - for (int i = 0; i < arr.length; i++) { - if (i > 0) { - sb.append(delim); + public static void spaceify(int spaces, String from, StringBuilder to) throws Exception { + try (BufferedReader reader = new BufferedReader(new StringReader(from))) { + String line; + while ((line = reader.readLine()) != null) { + for (int i = 0; i < spaces; i++) { + to.append(' '); + } + to.append(line).append('\n'); } - sb.append(arr[i]); } } - /** - * Convenience method to return a String array as a CSV String. - * E.g. useful for <code>toString()</code> implementations. - * - * @param arr the array to display - * @return the delimited String - */ - public static String arrayToCommaDelimitedString(Object[] arr) { - return arrayToDelimitedString(arr, ","); - } - - /** - * Determine whether the given array is empty: - * i.e. <code>null</code> or of zero length. - * - * @param array the array to check - */ - private static boolean isEmpty(Object[] array) { - return (array == null || array.length == 0); - } - /** * Check that the given CharSequence is neither <code>null</code> nor of length 0. * Note: Will return <code>true</code> for a CharSequence that purely consists of whitespace. @@ -161,6 +87,18 @@ public static boolean hasLength(CharSequence str) { return (str != null && str.length() > 0); } + /** + * Check that the given BytesReference is neither <code>null</code> nor of length 0 + * Note: Will return <code>true</code> for a BytesReference that purely consists of whitespace. + * + * @param bytesReference the BytesReference to check (may be <code>null</code>) + * @return <code>true</code> if the BytesReference is not null and has length + * @see Strings#hasLength(CharSequence) + */ + public static boolean hasLength(final BytesReference bytesReference) { + return (bytesReference != null && bytesReference.length() > 0); + } + /** * Check that the given String is neither <code>null</code> nor of length 0. * Note: Will return <code>true</code> for a String that purely consists of whitespace. @@ -169,7 +107,7 @@ public static boolean hasLength(CharSequence str) { * @return <code>true</code> if the String is not null and has length * @see Strings#hasLength(CharSequence) */ - public static boolean hasLength(String str) { + public static boolean hasLength(final String str) { return hasLength((CharSequence) str); } @@ -186,7 +124,7 @@ public static boolean hasLength(String str) { * @param str the CharSequence to check (may be <code>null</code>) * @return <code>true</code> if the CharSequence is either null or has a zero length */ - public static boolean isEmpty(CharSequence str) { + public static boolean isEmpty(final CharSequence str) { return hasLength(str) == false; } @@ -234,6 +172,42 @@ public static boolean hasText(String str) { return hasText((CharSequence) str); } + /** + * Trim all occurrences of the supplied leading character from the given String. + * + * @param str the String to check + * @param leadingCharacter the leading character to be trimmed + * @return the trimmed String + */ + public static String trimLeadingCharacter(String str, char leadingCharacter) { + if (hasLength(str) == false) { + return str; + } + StringBuilder sb = new StringBuilder(str); + while (sb.length() > 0 && sb.charAt(0) == leadingCharacter) { + sb.deleteCharAt(0); + } + return sb.toString(); + } + + /** + * Test whether the given string matches the given substring + * at the given index. + * + * @param str the original string (or StringBuilder) + * @param index the index in the original string to start matching against + * @param substring the substring to match at the given index + */ + public static boolean substringMatch(CharSequence str, int index, CharSequence substring) { + for (int j = 0; j < substring.length(); j++) { + int i = index + j; + if (i >= str.length() || str.charAt(i) != substring.charAt(j)) { + return false; + } + } + return true; + } + /** * Replace all occurrences of a substring within a string with * another string. @@ -263,24 +237,6 @@ public static String replace(String inString, String oldPattern, String newPatte return sb.toString(); } - /** - * Trim all occurrences of the supplied leading character from the given String. - * - * @param str the String to check - * @param leadingCharacter the leading character to be trimmed - * @return the trimmed String - */ - public static String trimLeadingCharacter(String str, char leadingCharacter) { - if (hasLength(str) == false) { - return str; - } - StringBuilder sb = new StringBuilder(str); - while (sb.length() > 0 && sb.charAt(0) == leadingCharacter) { - sb.deleteCharAt(0); - } - return sb.toString(); - } - /** * Delete all occurrences of the given substring. * @@ -314,16 +270,65 @@ public static String deleteAny(String inString, String charsToDelete) { return sb.toString(); } - public static void spaceify(int spaces, String from, StringBuilder to) throws Exception { - try (BufferedReader reader = new BufferedReader(new StringReader(from))) { - String line; - while ((line = reader.readLine()) != null) { - for (int i = 0; i < spaces; i++) { - to.append(' '); - } - to.append(line).append('\n'); + // --------------------------------------------------------------------- + // Convenience methods for working with formatted Strings + // --------------------------------------------------------------------- + + /** + * Quote the given String with single quotes. + * + * @param str the input String (e.g. "myString") + * @return the quoted String (e.g. "'myString'"), + * or <code>null</code> if the input was <code>null</code> + */ + public static String quote(String str) { + return (str != null ? "'" + str + "'" : null); + } + + /** + * Capitalize a <code>String</code>, changing the first letter to + * upper case as per {@link Character#toUpperCase(char)}. + * No other letters are changed. + * + * @param str the String to capitalize, may be <code>null</code> + * @return the capitalized String, <code>null</code> if null + */ + public static String capitalize(String str) { + return changeFirstCharacterCase(str, true); + } + + private static String changeFirstCharacterCase(String str, boolean capitalize) { + if (str == null || str.length() == 0) { + return str; + } + StringBuilder sb = new StringBuilder(str.length()); + if (capitalize) { + sb.append(Character.toUpperCase(str.charAt(0))); + } else { + sb.append(Character.toLowerCase(str.charAt(0))); + } + sb.append(str.substring(1)); + return sb.toString(); + } + + public static boolean validFileName(String fileName) { + for (int i = 0; i < fileName.length(); i++) { + char c = fileName.charAt(i); + if (INVALID_FILENAME_CHARS.contains(c)) { + return false; } } + return true; + } + + public static boolean validFileNameExcludingAstrix(String fileName) { + for (int i = 0; i < fileName.length(); i++) { + char c = fileName.charAt(i); + if (c != '*' && INVALID_FILENAME_CHARS.contains(c)) { + return false; + } + } + return true; } /** @@ -341,47 +346,6 @@ public static String[] toStringArray(final Collection<String> collection) { return collection.toArray(new String[0]); } - /** - * Take a String which is a delimited list and convert it to a String array. - * <p>A single delimiter can consists of more than one character: It will still - * be considered as single delimiter string, rather than as bunch of potential - * delimiter characters - in contrast to <code>tokenizeToStringArray</code>. - * - * @param str the input String - * @param delimiter the delimiter between elements (this is a single delimiter, - * rather than a bunch individual delimiter characters) - * @param charsToDelete a set of characters to delete. Useful for deleting unwanted - * line breaks: e.g. "\r\n\f" will delete all new lines and line feeds in a String. - * @return an array of the tokens in the list - * @see #tokenizeToStringArray - */ - public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { - if (str == null) { - return Strings.EMPTY_ARRAY; - } - if (delimiter == null) { - return new String[] { str }; - } - List<String> result = new ArrayList<>(); - if ("".equals(delimiter)) { - for (int i = 0; i < str.length(); i++) { - result.add(deleteAny(str.substring(i, i + 1), charsToDelete)); - } - } else { - int pos = 0; - int delPos; - while ((delPos = str.indexOf(delimiter, pos)) != -1) { - result.add(deleteAny(str.substring(pos, delPos), charsToDelete)); - pos = delPos + delimiter.length(); - } - if (str.length() > 0 && pos <= str.length()) { - // Add rest of String, but not in case of empty input. - result.add(deleteAny(str.substring(pos), charsToDelete)); - } - } - return toStringArray(result); - } - /** * Tokenize the specified string by commas to a set, trimming whitespace and ignoring empty tokens. * @@ -393,6 +357,41 @@ public static Set<String> tokenizeByCommaToSet(final String s) { return tokenizeToCollection(s, ",", HashSet::new); } + /** + * Split the specified string by commas to an array. + * + * @param s the string to split + * @return the array of split values + * @see String#split(String) + */ + public static String[] splitStringByCommaToArray(final String s) { + if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY; + else return s.split(","); + } + + /** + * Split a String at the first occurrence of the delimiter. + * Does not include the delimiter in the result. + * + * @param toSplit the string to split + * @param delimiter to split the string up with + * @return a two element array with index 0 being before the delimiter, and + * index 1 being after the delimiter (neither element includes the delimiter); + * or <code>null</code> if the delimiter wasn't found in the given input String + */ + public static String[] split(String toSplit, String delimiter) { + if (hasLength(toSplit) == false || hasLength(delimiter) == false) { + return null; + } + int offset = toSplit.indexOf(delimiter); + if (offset < 0) { + return null; + } + String beforeDelimiter = toSplit.substring(0, offset); + String afterDelimiter = toSplit.substring(offset + delimiter.length()); + return new String[] { beforeDelimiter, afterDelimiter }; + } + /** * Tokenize the given String into a String array via a StringTokenizer. * Trims tokens and omits empty tokens. @@ -446,6 +445,47 @@ private static <T extends Collection<String>> T tokenizeToCollection( return tokens; } + /** + * Take a String which is a delimited list and convert it to a String array. + * <p>A single delimiter can consists of more than one character: It will still + * be considered as single delimiter string, rather than as bunch of potential + * delimiter characters - in contrast to <code>tokenizeToStringArray</code>. + * + * @param str the input String + * @param delimiter the delimiter between elements (this is a single delimiter, + * rather than a bunch individual delimiter characters) + * @param charsToDelete a set of characters to delete. Useful for deleting unwanted + * line breaks: e.g. "\r\n\f" will delete all new lines and line feeds in a String. + * @return an array of the tokens in the list + * @see #tokenizeToStringArray + */ + public static String[] delimitedListToStringArray(String str, String delimiter, String charsToDelete) { + if (str == null) { + return Strings.EMPTY_ARRAY; + } + if (delimiter == null) { + return new String[] { str }; + } + List<String> result = new ArrayList<>(); + if ("".equals(delimiter)) { + for (int i = 0; i < str.length(); i++) { + result.add(deleteAny(str.substring(i, i + 1), charsToDelete)); + } + } else { + int pos = 0; + int delPos; + while ((delPos = str.indexOf(delimiter, pos)) != -1) { + result.add(deleteAny(str.substring(pos, delPos), charsToDelete)); + pos = delPos + delimiter.length(); + } + if (str.length() > 0 && pos <= str.length()) { + // Add rest of String, but not in case of empty input. + result.add(deleteAny(str.substring(pos), charsToDelete)); + } + } + return toStringArray(result); + } + /** * Take a String which is a delimited list and convert it to a String array. * <p>A single delimiter can consists of more than one character: It will still @@ -486,33 +526,293 @@ public static Set<String> commaDelimitedListToSet(String str) { return set; } - public static boolean isNullOrEmpty(@Nullable String s) { - return s == null || s.isEmpty(); + /** + * Convenience method to return a Collection as a delimited (e.g. CSV) + * String. E.g. useful for <code>toString()</code> implementations. + * + * @param coll the Collection to display + * @param delim the delimiter to use (probably a ",") + * @param prefix the String to start each element with + * @param suffix the String to end each element with + * @return the delimited String + */ + public static String collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix) { + StringBuilder sb = new StringBuilder(); + collectionToDelimitedString(coll, delim, prefix, suffix, sb); + return sb.toString(); + } + + public static void collectionToDelimitedString(Iterable<?> coll, String delim, String prefix, String suffix, StringBuilder sb) { + Iterator<?> it = coll.iterator(); + while (it.hasNext()) { + sb.append(prefix).append(it.next()).append(suffix); + if (it.hasNext()) { + sb.append(delim); + } + } } /** - * Capitalize a <code>String</code>, changing the first letter to - * upper case as per {@link Character#toUpperCase(char)}. - * No other letters are changed. + * Convenience method to return a Collection as a delimited (e.g. CSV) + * String. E.g. useful for <code>toString()</code> implementations. * - * @param str the String to capitalize, may be <code>null</code> - * @return the capitalized String, <code>null</code> if null + * @param coll the Collection to display + * @param delim the delimiter to use (probably a ",") + * @return the delimited String */ - public static String capitalize(String str) { - return changeFirstCharacterCase(str, true); + public static String collectionToDelimitedString(Iterable<?> coll, String delim) { + return collectionToDelimitedString(coll, delim, "", ""); } - private static String changeFirstCharacterCase(String str, boolean capitalize) { - if (str == null || str.length() == 0) { - return str; + /** + * Convenience method to return a Collection as a CSV String. + * E.g. useful for <code>toString()</code> implementations. + * + * @param coll the Collection to display + * @return the delimited String + */ + public static String collectionToCommaDelimitedString(Iterable<?> coll) { + return collectionToDelimitedString(coll, ","); + } + + /** + * Convenience method to return a String array as a delimited (e.g. CSV) + * String. E.g. useful for <code>toString()</code> implementations. + * + * @param arr the array to display + * @param delim the delimiter to use (probably a ",") + * @return the delimited String + */ + public static String arrayToDelimitedString(Object[] arr, String delim) { + StringBuilder sb = new StringBuilder(); + arrayToDelimitedString(arr, delim, sb); + return sb.toString(); + } + + public static void arrayToDelimitedString(Object[] arr, String delim, StringBuilder sb) { + if (isEmpty(arr)) { + return; } - StringBuilder sb = new StringBuilder(str.length()); - if (capitalize) { - sb.append(Character.toUpperCase(str.charAt(0))); + for (int i = 0; i < arr.length; i++) { + if (i > 0) { + sb.append(delim); + } + sb.append(arr[i]); + } + } + + /** + * Convenience method to return a String array as a CSV String. + * E.g. useful for <code>toString()</code> implementations. + * + * @param arr the array to display + * @return the delimited String + */ + public static String arrayToCommaDelimitedString(Object[] arr) { + return arrayToDelimitedString(arr, ","); + } + + /** + * Format the double value with a single decimal points, trimming trailing '.0'. + */ + public static String format1Decimals(double value, String suffix) { + String p = String.valueOf(value); + int ix = p.indexOf('.') + 1; + int ex = p.indexOf('E'); + char fraction = p.charAt(ix); + if (fraction == '0') { + if (ex != -1) { + return p.substring(0, ix - 1) + p.substring(ex) + suffix; + } else { + return p.substring(0, ix - 1) + suffix; + } } else { - sb.append(Character.toLowerCase(str.charAt(0))); + if (ex != -1) { + return p.substring(0, ix) + fraction + p.substring(ex) + suffix; + } else { + return p.substring(0, ix) + fraction + suffix; + } } - sb.append(str.substring(1)); - return sb.toString(); + } + + /** + * Determine whether the given array is empty: + * i.e. <code>null</code> or of zero length. + * + * @param array the array to check + */ + private static boolean isEmpty(final Object[] array) { + return (array == null || array.length == 0); + } + + public static byte[] toUTF8Bytes(CharSequence charSequence) { + return toUTF8Bytes(charSequence, new BytesRefBuilder()); + } + + public static byte[] toUTF8Bytes(CharSequence charSequence, BytesRefBuilder spare) { + spare.copyChars(charSequence); + return Arrays.copyOf(spare.bytes(), spare.length()); + } + + /** + * Return substring(beginIndex, endIndex) that is impervious to string length. + */ + public static String substring(String s, int beginIndex, int endIndex) { + if (s == null) { + return s; + } + + int realEndIndex = s.length() > 0 ? s.length() - 1 : 0; + + if (endIndex > realEndIndex) { + return s.substring(beginIndex); + } else { + return s.substring(beginIndex, endIndex); + } + } + + /** + * If an array only consists of zero or one element, which is "*" or "_all" return an empty array + * which is usually used as everything + */ + public static boolean isAllOrWildcard(String[] data) { + return CollectionUtils.isEmpty(data) || data.length == 1 && isAllOrWildcard(data[0]); + } + + /** + * Returns `true` if the string is `_all` or `*`. + */ + public static boolean isAllOrWildcard(String data) { + return "_all".equals(data) || "*".equals(data); + } + + /** + * Return a {@link String} that is the json representation of the provided {@link ToXContent}. + * Wraps the output into an anonymous object if needed. The content is not pretty-printed + * nor human readable. + */ + public static String toString(MediaType mediaType, ToXContent toXContent) { + return toString(mediaType, toXContent, false, false); + } + + /** + * Return a {@link String} that is the json representation of the provided {@link ToXContent}. + * Wraps the output into an anonymous object if needed. + * Allows to configure the params. + * The content is not pretty-printed nor human readable. + */ + public static String toString(MediaType mediaType, ToXContent toXContent, ToXContent.Params params) { + return toString(mediaType, toXContent, params, false, false); + } + + /** + * Return a {@link String} that is the json representation of the provided {@link ToXContent}. + * Wraps the output into an anonymous object if needed. Allows to control whether the outputted + * json needs to be pretty printed and human readable. + * + */ + public static String toString(MediaType mediaType, ToXContent toXContent, boolean pretty, boolean human) { + return toString(mediaType, toXContent, ToXContent.EMPTY_PARAMS, pretty, human); + } + + /** + * Return a {@link String} that is the json representation of the provided {@link ToXContent}. + * Wraps the output into an anonymous object if needed. + * Allows to configure the params. + * Allows to control whether the outputted json needs to be pretty printed and human readable. + */ + private static String toString(MediaType mediaType, ToXContent toXContent, ToXContent.Params params, boolean pretty, boolean human) { + try { + XContentBuilder builder = createBuilder(mediaType, pretty, human); + if (toXContent.isFragment()) { + builder.startObject(); + } + toXContent.toXContent(builder, params); + if (toXContent.isFragment()) { + builder.endObject(); + } + return builder.toString(); + } catch (IOException e) { + try { + XContentBuilder builder = createBuilder(mediaType, pretty, human); + builder.startObject(); + builder.field("error", "error building toString out of XContent: " + e.getMessage()); + builder.field("stack_trace", ExceptionsHelper.stackTrace(e)); + builder.endObject(); + return builder.toString(); + } catch (IOException e2) { + throw new OpenSearchException("cannot generate error message for deserialization", e); + } + } + } + + private static XContentBuilder createBuilder(MediaType mediaType, boolean pretty, boolean human) throws IOException { + XContentBuilder builder = XContentBuilder.builder(mediaType.xContent()); + if (pretty) { + builder.prettyPrint(); + } + if (human) { + builder.humanReadable(true); + } + return builder; + } + + /** + * Truncates string to a length less than length. Backtracks to throw out + * high surrogates. + */ + public static String cleanTruncate(String s, int length) { + if (s == null) { + return s; + } + /* + * Its pretty silly for you to truncate to 0 length but just in case + * someone does this shouldn't break. + */ + if (length == 0) { + return ""; + } + if (length >= s.length()) { + return s; + } + if (Character.isHighSurrogate(s.charAt(length - 1))) { + length--; + } + return s.substring(0, length); + } + + public static boolean isNullOrEmpty(@Nullable String s) { + return s == null || s.isEmpty(); + } + + public static String padStart(String s, int minimumLength, char c) { + if (s == null) { + throw new NullPointerException("s"); + } + if (s.length() >= minimumLength) { + return s; + } else { + StringBuilder sb = new StringBuilder(minimumLength); + for (int i = s.length(); i < minimumLength; i++) { + sb.append(c); + } + + sb.append(s); + return sb.toString(); + } + } + + public static String toLowercaseAscii(String in) { + StringBuilder out = new StringBuilder(); + Iterator<Integer> iter = in.codePoints().iterator(); + while (iter.hasNext()) { + int codepoint = iter.next(); + if (codepoint > 128) { + out.appendCodePoint(codepoint); + } else { + out.appendCodePoint(Character.toLowerCase(codepoint)); + } + } + return out.toString(); } } diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java similarity index 79% rename from server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java rename to libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java index 4cbd375e8c1ff..846950ff17c63 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.breaker; +package org.opensearch.core.common.breaker; import java.util.Locale; @@ -71,17 +71,23 @@ public interface CircuitBreaker { /** * The type of breaker - * + * can be {@link #MEMORY}, {@link #PARENT}, or {@link #NOOP} * @opensearch.internal */ enum Type { - // A regular or ChildMemoryCircuitBreaker + /** A regular or ChildMemoryCircuitBreaker */ MEMORY, - // A special parent-type for the hierarchy breaker service + /** A special parent-type for the hierarchy breaker service */ PARENT, - // A breaker where every action is a noop, it never breaks + /** A breaker where every action is a noop, it never breaks */ NOOP; + /** + * Converts string (case-insensitive) to breaker {@link Type} + * @param value "noop", "parent", or "memory" (case-insensitive) + * @return the breaker {@link Type} + * @throws IllegalArgumentException if value is not "noop", "parent", or "memory" + */ public static Type parseValue(String value) { switch (value.toLowerCase(Locale.ROOT)) { case "noop": @@ -98,13 +104,13 @@ public static Type parseValue(String value) { /** * The breaker durability - * + * can be {@link #TRANSIENT} or {@link #PERMANENT} * @opensearch.internal */ enum Durability { - // The condition that tripped the circuit breaker fixes itself eventually. + /** The condition that tripped the circuit breaker fixes itself eventually. */ TRANSIENT, - // The condition that tripped the circuit breaker requires manual intervention. + /** The condition that tripped the circuit breaker requires manual intervention. */ PERMANENT } @@ -120,11 +126,14 @@ enum Durability { * @param bytes number of bytes to add * @param label string label describing the bytes being added * @return the number of "used" bytes for the circuit breaker + * @throws CircuitBreakingException if the breaker tripped */ double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException; /** * Adjust the circuit breaker without tripping + * @param bytes number of bytes to add + * @return the number of "used" bytes for the circuit breaker */ long addWithoutBreaking(long bytes); @@ -154,7 +163,10 @@ enum Durability { String getName(); /** - * @return whether a tripped circuit breaker will reset itself (transient) or requires manual intervention (permanent). + * Returns the {@link Durability} of this breaker + * @return whether a tripped circuit breaker will + * reset itself ({@link Durability#TRANSIENT}) + * or requires manual intervention ({@link Durability#PERMANENT}). */ Durability getDurability(); diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java similarity index 92% rename from server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java rename to libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java index 4cab014912970..2df116dcad076 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java @@ -29,13 +29,13 @@ * GitHub history for details. */ -package org.opensearch.common.breaker; +package org.opensearch.core.common.breaker; import org.opensearch.OpenSearchException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -46,8 +46,11 @@ */ public class CircuitBreakingException extends OpenSearchException { + /** The number of bytes wanted */ private final long bytesWanted; + /** The circuit breaker limit */ private final long byteLimit; + /** The {@link CircuitBreaker.Durability} of the circuit breaker */ private final CircuitBreaker.Durability durability; public CircuitBreakingException(StreamInput in) throws IOException { @@ -88,6 +91,7 @@ public CircuitBreaker.Durability getDurability() { return durability; } + /** Always returns {@link RestStatus#TOO_MANY_REQUESTS} */ @Override public RestStatus status() { return RestStatus.TOO_MANY_REQUESTS; diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java new file mode 100644 index 0000000000000..17b9fefd27c99 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.common.breaker; + +/** + * A {@link CircuitBreaker} that doesn't increment or adjust, and all operations are + * basically noops. + * It never trips, limit is always -1, always returns 0 for all metrics. + * @opensearch.internal + */ +public class NoopCircuitBreaker implements CircuitBreaker { + + /** The limit of this breaker is always -1 */ + public static final int LIMIT = -1; + /** Name of this breaker */ + private final String name; + + /** + * Creates a new NoopCircuitBreaker (that never trip) with the given name + * @param name the name of this breaker + */ + public NoopCircuitBreaker(String name) { + this.name = name; + } + + /** + * This is a noop, a noop breaker never trip + * @param fieldName name of this noop breaker + * @param bytesNeeded bytes needed + */ + @Override + public void circuitBreak(String fieldName, long bytesNeeded) { + // noop + } + + /** + * This is a noop, always return 0 and never throw/trip + * @param bytes number of bytes to add + * @param label string label describing the bytes being added + * @return always return 0 + * @throws CircuitBreakingException never thrown + */ + @Override + public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + return 0; + } + + /** + * This is a noop, nothing is added, always return 0 + * @param bytes number of bytes to add (ignored) + * @return always return 0 + */ + @Override + public long addWithoutBreaking(long bytes) { + return 0; + } + + /** + * This is a noop, always return 0 + * @return always return 0 + */ + @Override + public long getUsed() { + return 0; + } + + /** + * A noop breaker have a constant limit of -1 + * @return always return -1 + */ + @Override + public long getLimit() { + return LIMIT; + } + + /** + * A noop breaker have no overhead, always return 0 + * @return always return 0 + */ + @Override + public double getOverhead() { + return 0; + } + + /** + * A noop breaker never trip, always return 0 + * @return always return 0 + */ + @Override + public long getTrippedCount() { + return 0; + } + + /** + * return the name of this breaker + * @return the name of this breaker + */ + @Override + public String getName() { + return this.name; + } + + /** + * A noop breaker {@link Durability} is always {@link Durability#PERMANENT} + * @return always return {@link Durability#PERMANENT } + */ + @Override + public Durability getDurability() { + return Durability.PERMANENT; + } + + /** + * Limit and overhead are constant for a noop breaker. + * this is a noop. + * @param limit the desired limit (ignored) + * @param overhead the desired overhead (ignored) + */ + @Override + public void setLimitAndOverhead(long limit, double overhead) { + // noop + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/package-info.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/package-info.java new file mode 100644 index 0000000000000..f9fb83d2207e1 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Foundation classes for the Circuit Breaker + */ +package org.opensearch.core.common.breaker; diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java index e054776d67fdc..a2bf7e499dee8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java @@ -49,14 +49,10 @@ */ public abstract class AbstractBytesReference implements BytesReference { - private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + /** we cache the hash of this reference since it can be quite costly to re-calculated it */ + private Integer hash = null; private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; - @Override - public int getInt(int index) { - return (get(index) & 0xFF) << 24 | (get(index + 1) & 0xFF) << 16 | (get(index + 2) & 0xFF) << 8 | get(index + 3) & 0xFF; - } - @Override public int indexOf(byte marker, int from) { final int to = length(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java index ae04ddcc19eee..d7a8414935143 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesArray.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.bytes; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.io.stream.StreamInput; @@ -83,6 +84,11 @@ public byte get(int index) { return bytes[offset + index]; } + @Override + public int getInt(int index) { + return (int) BitUtil.VH_BE_INT.get(bytes, offset + index); + } + @Override public int length() { return length; diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java index fc8e62c914e27..8cb65c9feb1ca 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.util.ByteArray; @@ -50,8 +51,9 @@ /** * A reference to bytes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BytesReference extends Comparable<BytesReference>, ToXContentFragment { /** @@ -151,9 +153,11 @@ static BytesReference fromByteArray(ByteArray byteArray, int length) { byte get(int index); /** - * Returns the integer read from the 4 bytes (BE) starting at the given index. + * Returns the integer read from the 4 bytes (big endian) starting at the given index. */ - int getInt(int index); + default int getInt(int index) { + return ((get(index) & 0xFF) << 24) | ((get(index + 1) & 0xFF) << 16) | ((get(index + 2) & 0xFF) << 8) | (get(index + 3) & 0xFF); + } /** * Finds the index of the first occurrence of the given marker between within the given bounds. diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java index 53915a3da824c..1a48abee2dbf8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java @@ -45,7 +45,7 @@ /** * A composite {@link BytesReference} that allows joining multiple bytes references * into one without copying. - * + * <p> * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/compress/package-info.java b/libs/core/src/main/java/org/opensearch/core/common/compress/package-info.java deleted file mode 100644 index 99459f99c42d8..0000000000000 --- a/libs/core/src/main/java/org/opensearch/core/common/compress/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Classes for core compress module */ -package org.opensearch.core.common.compress; diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index a50d1c165ed72..cad43f817faaf 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -8,6 +8,7 @@ package org.opensearch.core.common.io.stream; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import java.io.EOFException; @@ -17,7 +18,7 @@ * {@link StreamInput} version of Lucene's {@link org.apache.lucene.store.ByteArrayDataInput} * This is used as a replacement of Lucene ByteArrayDataInput for abstracting byte order changes * in Lucene's API - * + * <p> * Attribution given to apache lucene project under ALv2: * * Licensed to the Apache Software Foundation (ASF) under one or more @@ -121,4 +122,33 @@ public int read() throws IOException { return bytes[pos++] & 0xFF; } + @Override + public short readShort() throws IOException { + if (available() < Short.BYTES) { + throw new EOFException(); + } + short value = (short) BitUtil.VH_BE_SHORT.get(bytes, pos); + pos += Short.BYTES; + return value; + } + + @Override + public int readInt() throws IOException { + if (available() < Integer.BYTES) { + throw new EOFException(); + } + int value = (int) BitUtil.VH_BE_INT.get(bytes, pos); + pos += Integer.BYTES; + return value; + } + + @Override + public long readLong() throws IOException { + if (available() < Long.BYTES) { + throw new EOFException(); + } + long value = (long) BitUtil.VH_BE_LONG.get(bytes, pos); + pos += Long.BYTES; + return value; + } } diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java index ec707f147cade..123b52eb92876 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -41,19 +43,21 @@ /** * A registry for {@link Writeable.Reader} readers of {@link NamedWriteable}. - * + * <p> * The registration is keyed by the combination of the category class of {@link NamedWriteable}, and a name unique * to that category. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedWriteableRegistry { /** * An entry in the registry, made up of a category class and name, and a reader for that category class. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { /** The superclass of a {@link NamedWriteable} which will be read by {@link #reader}. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index 1d7321bf2c6de..ea23b3d81a775 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -46,15 +46,17 @@ import org.opensearch.Version; import org.opensearch.common.CharArrays; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.settings.SecureString; import org.opensearch.core.common.text.Text; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.Strings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.semver.SemverRange; import java.io.ByteArrayInputStream; import java.io.EOFException; @@ -95,7 +97,7 @@ /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. - * + * <p> * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamOutput}. Finally @@ -104,8 +106,9 @@ * lists, either by storing {@code List}s internally or just converting to and from a {@code List} when calling. This comment is repeated * on {@link StreamInput}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class StreamInput extends InputStream { private Version version = Version.CURRENT; @@ -347,7 +350,7 @@ public BigInteger readBigInteger() throws IOException { } public MediaType readMediaType() throws IOException { - return MediaTypeParserRegistry.fromMediaType(readString()); + return MediaTypeRegistry.fromMediaType(readString()); } @Nullable @@ -722,6 +725,8 @@ public Object readGenericValue() throws IOException { return readByte(); case 12: return readDate(); + case 13: + return readZonedDateTime(); case 14: return readBytesReference(); case 15: @@ -746,6 +751,8 @@ public Object readGenericValue() throws IOException { return readCollection(StreamInput::readGenericValue, HashSet::new, Collections.emptySet()); case 26: return readBigInteger(); + case 27: + return readSemverRange(); default: throw new IOException("Can't read unknown type [" + type + "]"); } @@ -1086,6 +1093,10 @@ public Version readVersion() throws IOException { return Version.fromId(readVInt()); } + public SemverRange readSemverRange() throws IOException { + return SemverRange.fromString(readString()); + } + /** Reads the {@link Version} from the input stream */ public Build readBuild() throws IOException { // the following is new for opensearch: we write the distribution to support any "forks" @@ -1124,7 +1135,7 @@ public <C extends NamedWriteable> C readNamedWriteable(@SuppressWarnings("unused * the corresponding entry in the registry by name, so that the proper object can be read and returned. * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. - * + * <p> * Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you * have a compelling reason to use this method instead. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index 566abf9f08f53..b7599265aece3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -45,14 +45,16 @@ import org.opensearch.Version; import org.opensearch.common.CharArrays; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable.WriteableRegistry; import org.opensearch.core.common.io.stream.Writeable.Writer; import org.opensearch.core.common.settings.SecureString; import org.opensearch.core.common.text.Text; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.semver.SemverRange; import java.io.EOFException; import java.io.FileNotFoundException; @@ -87,7 +89,7 @@ /** * A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing. - * + * <p> * This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamInput}. Finally @@ -96,8 +98,9 @@ * lists, either by storing {@code List}s internally or just converting to and from a {@code List} when calling. This comment is repeated * on {@link StreamInput}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class StreamOutput extends OutputStream { private static final int MAX_NESTED_EXCEPTION_LEVEL = 100; @@ -782,6 +785,10 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep o.writeByte((byte) 26); o.writeString(v.toString()); }); + writers.put(SemverRange.class, (o, v) -> { + o.writeByte((byte) 27); + o.writeSemverRange((SemverRange) v); + }); WRITERS = Collections.unmodifiableMap(writers); } @@ -804,6 +811,23 @@ private static Class<?> getGenericType(Object value) { } } + /** + * Returns the registered writer for the given class type. + */ + @SuppressWarnings("unchecked") + public static <W extends Writer<?>> W getWriter(Class<?> type) { + Writer<Object> writer = WriteableRegistry.getWriter(type); + if (writer == null) { + // fallback to this local hashmap + // todo: move all writers to the registry + writer = WRITERS.get(type); + } + if (writer == null) { + throw new IllegalArgumentException("can not write type [" + type + "]"); + } + return (W) writer; + } + /** * Notice: when serialization a map, the stream out map with the stream in map maybe have the * different key-value orders, they will maybe have different stream order. @@ -816,17 +840,8 @@ public void writeGenericValue(@Nullable Object value) throws IOException { return; } final Class<?> type = getGenericType(value); - Writer<Object> writer = WriteableRegistry.getWriter(type); - if (writer == null) { - // fallback to this local hashmap - // todo: move all writers to the registry - writer = WRITERS.get(type); - } - if (writer != null) { - writer.write(this, value); - } else { - throw new IllegalArgumentException("can not write type [" + type + "]"); - } + final Writer<Object> writer = getWriter(type); + writer.write(this, value); } public static void checkWriteable(@Nullable Object value) throws IllegalArgumentException { @@ -1091,6 +1106,10 @@ public void writeVersion(final Version version) throws IOException { writeVInt(version.id); } + public void writeSemverRange(final SemverRange range) throws IOException { + writeString(range.toString()); + } + /** Writes the OpenSearch {@link Build} informn to the output stream */ public void writeBuild(final Build build) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java index af9df51655414..960f4bec5eeb5 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/Writeable.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -41,8 +43,9 @@ * across the wire" using OpenSearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface Writeable { /** * A WriteableRegistry registers {@link Writer} methods for writing data types over a @@ -135,8 +138,11 @@ public static Class<?> getCustomClassFromInstance(final Object value) { * out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString); * } * </code></pre> + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Writer<V> { /** @@ -161,8 +167,11 @@ interface Writer<V> { * this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString); * } * </code></pre> + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "2.8.0") interface Reader<V> { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index 59492193d16dc..c7b9bee3cbf4d 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -30,6 +30,13 @@ * GitHub history for details. */ +/* + * This code is based on code from SFL4J 1.5.11 + * Copyright (c) 2004-2007 QOS.ch + * All rights reserved. + * SPDX-License-Identifier: MIT + */ + package org.opensearch.core.common.logging; import java.util.HashSet; @@ -37,6 +44,10 @@ /** * Format string for OpenSearch log messages. + * <p> + * This class is almost a copy of {@code org.slf4j.helpers.MessageFormatter}<p> + * The original code is licensed under the MIT License and is available at : + * <a href="https://github.com/qos-ch/slf4j/blob/7c164fab8d54f823dd55c01a5a839c153f578297/slf4j-api/src/main/java/org/slf4j/helpers/MessageFormatter.java">MessageFormatter.java</a> * * @opensearch.internal */ @@ -51,6 +62,17 @@ public static String format(final String messagePattern, final Object... argArra return format(null, messagePattern, argArray); } + /** + * (this is almost a copy of {@code org.slf4j.helpers.MessageFormatter.arrayFormat}) + * + * @param prefix the prefix to prepend to the formatted message (can be null) + * @param messagePattern the message pattern which will be parsed and formatted + * @param argArray an array of arguments to be substituted in place of formatting anchors + * @return null if messagePattern is null <p> + * messagePattern if argArray is (null or empty) and prefix is null <p> + * prefix + messagePattern if argArray is (null or empty) and prefix is not null <p> + * formatted message otherwise (even if prefix is null) + */ public static String format(final String prefix, final String messagePattern, final Object... argArray) { if (messagePattern == null) { return null; @@ -110,6 +132,13 @@ public static String format(final String prefix, final String messagePattern, fi return sbuf.toString(); } + /** + * Checks if (delimterStartIndex - 1) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if there is an escape char before the character at delimiterStartIndex.<p> + * Always returns false if delimiterStartIndex == 0 (edge case) + */ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex) { if (delimiterStartIndex == 0) { @@ -119,6 +148,13 @@ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex return potentialEscape == ESCAPE_CHAR; } + /** + * Checks if (delimterStartIndex - 2) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if (delimterStartIndex - 2) in messagePattern is an escape character. + * Always returns false if delimiterStartIndex is less than 2 (edge case) + */ static boolean isDoubleEscaped(String messagePattern, int delimiterStartIndex) { return delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR; } diff --git a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java index f5529bcebc82f..45ee72f558724 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java +++ b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.settings; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; import java.util.Arrays; import java.util.Objects; @@ -39,15 +41,16 @@ /** * A String implementations which allows clearing the underlying char array. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SecureString implements CharSequence, Closeable { private char[] chars; /** * Constructs a new SecureString which controls the passed in char array. - * + * <p> * Note: When this instance is closed, the array will be zeroed out. */ public SecureString(char[] chars) { @@ -56,7 +59,7 @@ public SecureString(char[] chars) { /** * Constructs a new SecureString from an existing String. - * + * <p> * NOTE: This is not actually secure, since the provided String cannot be deallocated, but * this constructor allows for easy compatibility between new and old apis. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java index ca5402edae59e..3a46bd4602297 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/text/Text.java +++ b/libs/core/src/main/java/org/opensearch/core/common/text/Text.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.text; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContentFragment; @@ -44,8 +45,9 @@ * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if * the other is requests, caches the other one in a local reference so no additional conversion will be needed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Text implements Comparable<Text>, ToXContentFragment { public static final Text[] EMPTY_ARRAY = new Text[0]; diff --git a/server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java similarity index 98% rename from server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java rename to libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java index 3a9c337f2d950..8908a172395f2 100644 --- a/server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java @@ -30,12 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.transport; +package org.opensearch.core.common.transport; +import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.network.InetAddresses; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java similarity index 88% rename from server/src/main/java/org/opensearch/common/transport/TransportAddress.java rename to libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 737e8f3496143..3b5fbb7d76307 100644 --- a/server/src/main/java/org/opensearch/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -30,12 +30,13 @@ * GitHub history for details. */ -package org.opensearch.common.transport; +package org.opensearch.core.common.transport; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TransportAddress implements Writeable, ToXContentFragment { /** @@ -71,6 +73,12 @@ public TransportAddress(InetAddress address, int port) { this(new InetSocketAddress(address, port)); } + /** + * Creates a new {@link TransportAddress} from a {@link InetSocketAddress}. + * @param address the address to wrap + * @throws IllegalArgumentException if the address is null or not resolved + * @see InetSocketAddress#getAddress() + */ public TransportAddress(InetSocketAddress address) { if (address == null) { throw new IllegalArgumentException("InetSocketAddress must not be null"); @@ -82,7 +90,9 @@ public TransportAddress(InetSocketAddress address) { } /** - * Read from a stream. + * Creates a new {@link TransportAddress} from a {@link StreamInput}. + * @param in the stream to read from + * @throws IOException if an I/O error occurs */ public TransportAddress(StreamInput in) throws IOException { final int len = in.readByte(); @@ -116,6 +126,8 @@ public String getAddress() { /** * Returns the addresses port + * @return the port number, or 0 if the socket is not bound yet. + * @see InetSocketAddress#getPort() */ public int getPort() { return address.getPort(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java b/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java new file mode 100644 index 0000000000000..21d2abfce958a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common / Base Transport classes used to implement the OpenSearch transport layer */ +package org.opensearch.core.common.transport; diff --git a/server/src/main/java/org/opensearch/common/unit/ByteSizeUnit.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java similarity index 96% rename from server/src/main/java/org/opensearch/common/unit/ByteSizeUnit.java rename to libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java index b95e39feb8fac..49eadbbb2bc00 100644 --- a/server/src/main/java/org/opensearch/common/unit/ByteSizeUnit.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java @@ -30,8 +30,9 @@ * GitHub history for details. */ -package org.opensearch.common.unit; +package org.opensearch.core.common.unit; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,9 +45,17 @@ * A {@code SizeUnit} does not maintain size information, but only * helps organize and use size representations that may be maintained * separately across various contexts. + * <p> + * It use conventional data storage values (base-2) : + * <ul> + * <li>1KB = 1024 bytes</li> + * <li>1MB = 1024KB</li> + * <li> ... </li> + * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ByteSizeUnit implements Writeable { BYTES { @Override diff --git a/server/src/main/java/org/opensearch/common/unit/ByteSizeValue.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java similarity index 88% rename from server/src/main/java/org/opensearch/common/unit/ByteSizeValue.java rename to libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java index a123c79464727..1ed6d2d204a99 100644 --- a/server/src/main/java/org/opensearch/common/unit/ByteSizeValue.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java @@ -30,16 +30,14 @@ * GitHub history for details. */ -package org.opensearch.common.unit; +package org.opensearch.core.common.unit; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.logging.LogConfigurator; -import org.opensearch.common.network.NetworkService; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,21 +48,11 @@ /** * A byte size value * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXContentFragment { - /** - * We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured - * leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any - * {@link ByteSizeValue} object constructed in, for example, settings in {@link NetworkService}. - * - * @opensearch.internal - */ - static class DeprecationLoggerHolder { - static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class); - } - public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); private final long size; @@ -262,14 +250,14 @@ private static ByteSizeValue parse( return new ByteSizeValue(Long.parseLong(s), unit); } catch (final NumberFormatException e) { try { - final double doubleValue = Double.parseDouble(s); - DeprecationLoggerHolder.deprecationLogger.deprecate( - "fractional_byte_values", - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [{}] found for setting [{}]", + Double.parseDouble(s); + throw new OpenSearchParseException( + "Failed to parse bytes value [{}]. Fractional bytes values have been " + + "deprecated since Legacy 6.2. Use non-fractional bytes values instead: found for setting [{}]", + e, initialInput, settingName ); - return new ByteSizeValue((long) (doubleValue * unit.toBytes(1))); } catch (final NumberFormatException ignored) { throw new OpenSearchParseException("failed to parse [{}]", e, initialInput); } diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/package-info.java b/libs/core/src/main/java/org/opensearch/core/common/unit/package-info.java new file mode 100644 index 0000000000000..79b5dcdcba3b6 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Common units of measurement used by the core library. These units of measurement classes exist + * in the core because they depend on core functionality beyond the common library (e.g., serializable). + * + * @opensearch.api + * @opensearch.experimental + */ +package org.opensearch.core.common.unit; diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java index e50f24417f8bc..f4d81c4ca4363 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/ByteArray.java @@ -33,14 +33,16 @@ package org.opensearch.core.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.nio.ByteBuffer; /** * Abstraction of an array of byte values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ByteArray extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/CollectionUtils.java b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java similarity index 79% rename from server/src/main/java/org/opensearch/common/util/CollectionUtils.java rename to libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java index 2037b2e46488f..5335c98182b64 100644 --- a/server/src/main/java/org/opensearch/common/util/CollectionUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.util; +package org.opensearch.core.common.util; import org.opensearch.common.collect.Iterators; import org.opensearch.core.common.Strings; @@ -73,6 +73,16 @@ public static boolean isEmpty(Object[] array) { /** * Return a rotated view of the given list with the given distance. + * <ul> + * <li>The distance can be negative, in which case the list is rotated to the left.</li> + * <li>The distance can be larger than the size of the list, in which case the list is rotated multiple times.</li> + * <li>The distance can be zero, in which case the list is not rotated.</li> + * <li>The list can be empty, in which case it remains empty.</li> + * </ul> + * @param list the list to rotate + * @param distance the distance to rotate (positive rotates right, negative rotates left) + * @return a rotated view of the given list with the given distance + * @see RotatedList */ public static <T> List<T> rotate(final List<T> list, int distance) { if (list.isEmpty()) { @@ -92,7 +102,13 @@ public static <T> List<T> rotate(final List<T> list, int distance) { } /** - * in place de-duplicates items in a list + * In place de-duplicates items in a list + * Noop if the list is empty or has one item. + * + * @throws NullPointerException if the list is `null` or comparator is `null` + * @param array the list to de-duplicate + * @param comparator the comparator to use to compare items + * @param <T> the type of the items in the list */ public static <T> void sortAndDedup(final List<T> array, Comparator<T> comparator) { // base case: one item @@ -115,6 +131,12 @@ public static <T> void sortAndDedup(final List<T> array, Comparator<T> comparato array.subList(deduped.nextIndex(), array.size()).clear(); } + /** + * Converts a collection of Integers to an array of ints. + * @param ints The collection of Integers to convert + * @return The array of ints + * @throws NullPointerException if ints is null + */ public static int[] toArray(Collection<Integer> ints) { Objects.requireNonNull(ints); return ints.stream().mapToInt(s -> s).toArray(); @@ -134,6 +156,12 @@ public static void ensureNoSelfReferences(Object value, String messageHint) { } } + /** + * Converts an object to an Iterable, if possible. + * @param value The object to convert + * @return The Iterable, or null if the object cannot be converted + */ + @SuppressWarnings("unchecked") private static Iterable<?> convert(Object value) { if (value == null) { return null; @@ -191,6 +219,13 @@ private static class RotatedList<T> extends AbstractList<T> implements RandomAcc private final List<T> in; private final int distance; + /** + * Creates a rotated list + * @param list The list to rotate + * @param distance The distance to rotate to the right + * @throws IllegalArgumentException if the distance is negative or greater than the size of the list; + * or if the list is not a {@link RandomAccess} list + */ RotatedList(List<T> list, int distance) { if (distance < 0 || distance >= list.size()) { throw new IllegalArgumentException(); @@ -217,6 +252,14 @@ public int size() { } } + /** + * Converts an {@link Iterable} to an {@link ArrayList}. + * @param elements The iterable to convert + * @param <E> the type the elements + * @return an {@link ArrayList} + * @throws NullPointerException if elements is null + */ + @SuppressWarnings("unchecked") public static <E> ArrayList<E> iterableAsArrayList(Iterable<? extends E> elements) { if (elements == null) { throw new NullPointerException("elements"); @@ -232,6 +275,7 @@ public static <E> ArrayList<E> iterableAsArrayList(Iterable<? extends E> element } } + @SuppressWarnings("unchecked") public static <E> ArrayList<E> arrayAsArrayList(E... elements) { if (elements == null) { throw new NullPointerException("elements"); @@ -239,6 +283,7 @@ public static <E> ArrayList<E> arrayAsArrayList(E... elements) { return new ArrayList<>(Arrays.asList(elements)); } + @SuppressWarnings("unchecked") public static <E> ArrayList<E> asArrayList(E first, E... other) { if (other == null) { throw new NullPointerException("other"); @@ -249,6 +294,7 @@ public static <E> ArrayList<E> asArrayList(E first, E... other) { return list; } + @SuppressWarnings("unchecked") public static <E> ArrayList<E> asArrayList(E first, E second, E... other) { if (other == null) { throw new NullPointerException("other"); @@ -292,11 +338,11 @@ public static <E> List<List<E>> eagerPartition(List<E> list, int size) { } /** - * Check if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. If - * collection contains a null element it means it is not empty. + * Checks if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. + * If collection contains a null element it means it is not empty. * * @param collection {@link Collection} - * @return boolean + * @return true if collection is null or {@code isEmpty()}, false otherwise * @param <E> Element */ public static <E> boolean isEmpty(final Collection<E> collection) { diff --git a/server/src/main/java/org/opensearch/common/compress/Compressor.java b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java similarity index 77% rename from server/src/main/java/org/opensearch/common/compress/Compressor.java rename to libs/core/src/main/java/org/opensearch/core/compress/Compressor.java index 1034e3a1da5da..5324ea6151e51 100644 --- a/server/src/main/java/org/opensearch/common/compress/Compressor.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java @@ -30,8 +30,10 @@ * GitHub history for details. */ -package org.opensearch.common.compress; +package org.opensearch.core.compress; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; @@ -39,10 +41,18 @@ import java.io.OutputStream; /** - * Compressor interface + * Compressor interface used for compressing {@link org.opensearch.core.xcontent.MediaType} and + * {@code org.opensearch.repositories.blobstore.BlobStoreRepository} implementations. + * <p> + * This is not to be confused with {@link org.apache.lucene.codecs.compressing.Compressor} which is used + * for codec implementations such as {@code org.opensearch.index.codec.customcodecs.Lucene95CustomCodec} + * for compressing {@link org.apache.lucene.document.StoredField}s * - * @opensearch.internal + * @opensearch.api - intended to be extended + * @opensearch.experimental - however, bwc is not guaranteed at this time */ +@ExperimentalApi +@PublicApi(since = "2.10.0") public interface Compressor { boolean isCompressed(BytesReference bytes); diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java new file mode 100644 index 0000000000000..af09a7aebba79 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.compress; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.spi.CompressorProvider; +import org.opensearch.core.xcontent.MediaTypeRegistry; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.ServiceLoader; +import java.util.stream.Collectors; + +/** + * A registry that wraps a static Map singleton which holds a mapping of unique String names (typically the + * compressor header as a string) to registerd {@link Compressor} implementations. + * <p> + * This enables plugins, modules, extensions to register their own compression implementations through SPI + * + * @opensearch.experimental + * @opensearch.internal + */ +@InternalApi +public final class CompressorRegistry { + + // the backing registry map + private static final Map<String, Compressor> registeredCompressors = ServiceLoader.load( + CompressorProvider.class, + CompressorProvider.class.getClassLoader() + ) + .stream() + .flatMap(p -> p.get().getCompressors().stream()) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + + // no instance: + private CompressorRegistry() {} + + /** + * Returns the default compressor + */ + public static Compressor defaultCompressor() { + return registeredCompressors.get("DEFLATE"); + } + + public static Compressor none() { + return registeredCompressors.get(NoneCompressor.NAME); + } + + public static boolean isCompressed(BytesReference bytes) { + return compressor(bytes) != null; + } + + @Nullable + public static Compressor compressor(final BytesReference bytes) { + for (Compressor compressor : registeredCompressors.values()) { + if (compressor.isCompressed(bytes) == true) { + // bytes should be either detected as compressed or as xcontent, + // if we have bytes that can be either detected as compressed or + // as a xcontent, we have a problem + assert MediaTypeRegistry.xContentType(bytes) == null; + return compressor; + } + } + + if (MediaTypeRegistry.xContentType(bytes) == null) { + throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes"); + } + + return null; + } + + /** Decompress the provided {@link BytesReference}. */ + public static BytesReference uncompress(BytesReference bytes) throws IOException { + Compressor compressor = compressor(bytes); + if (compressor == null) { + throw new NotCompressedException(); + } + return compressor.uncompress(bytes); + } + + /** + * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(BytesReference)}. + */ + public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException { + Compressor compressor = compressor(Objects.requireNonNull(bytes, "the BytesReference must not be null")); + return compressor == null ? bytes : compressor.uncompress(bytes); + } + + /** Returns a registered compressor by its registered name */ + public static Compressor getCompressor(final String name) { + if (registeredCompressors.containsKey(name)) { + return registeredCompressors.get(name); + } + throw new IllegalArgumentException("No registered compressor found by name [" + name + "]"); + } + + /** + * Returns the registered compressors as an Immutable collection + * <p> + * note: used for testing + */ + public static Map<String, Compressor> registeredCompressors() { + // no destructive danger as backing map is immutable + return registeredCompressors; + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/compress/NoneCompressor.java b/libs/core/src/main/java/org/opensearch/core/compress/NoneCompressor.java new file mode 100644 index 0000000000000..6e607ed701633 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/NoneCompressor.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.compress; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * {@link Compressor} no compressor implementation. + * + * @opensearch.api - registered name requires BWC support + * @opensearch.experimental - class methods might change + */ +public class NoneCompressor implements Compressor { + /** + * The name to register the compressor by + * + * @opensearch.api - requires BWC support + */ + @PublicApi(since = "2.10.0") + public static final String NAME = "NONE"; + + @Override + public boolean isCompressed(BytesReference bytes) { + return false; + } + + @Override + public int headerLength() { + return 0; + } + + @Override + public InputStream threadLocalInputStream(InputStream in) throws IOException { + return in; + } + + @Override + public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { + return out; + } + + @Override + public BytesReference uncompress(BytesReference bytesReference) throws IOException { + return bytesReference; + } + + @Override + public BytesReference compress(BytesReference bytesReference) throws IOException { + return bytesReference; + } + +} diff --git a/server/src/main/java/org/opensearch/common/compress/NotCompressedException.java b/libs/core/src/main/java/org/opensearch/core/compress/NotCompressedException.java similarity index 97% rename from server/src/main/java/org/opensearch/common/compress/NotCompressedException.java rename to libs/core/src/main/java/org/opensearch/core/compress/NotCompressedException.java index 7f070e0b499d8..91d6bc57f1cd6 100644 --- a/server/src/main/java/org/opensearch/common/compress/NotCompressedException.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/NotCompressedException.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.compress; +package org.opensearch.core.compress; /** * Exception indicating that we were expecting something compressed, which diff --git a/libs/core/src/main/java/org/opensearch/core/common/compress/NotXContentException.java b/libs/core/src/main/java/org/opensearch/core/compress/NotXContentException.java similarity index 96% rename from libs/core/src/main/java/org/opensearch/core/common/compress/NotXContentException.java rename to libs/core/src/main/java/org/opensearch/core/compress/NotXContentException.java index d1a3e7709a7d0..99337d5a26025 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/compress/NotXContentException.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/NotXContentException.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.core.common.compress; +package org.opensearch.core.compress; import org.opensearch.core.xcontent.XContent; diff --git a/libs/core/src/main/java/org/opensearch/core/compress/package-info.java b/libs/core/src/main/java/org/opensearch/core/compress/package-info.java new file mode 100644 index 0000000000000..c0365e45702bc --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Concrete {@link org.opensearch.core.compress.Compressor} implementations provided by the core library + * + * See {@link org.opensearch.core.compress.NoneCompressor} + */ +package org.opensearch.core.compress; diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java new file mode 100644 index 0000000000000..9b806618fe0a0 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.compress.spi; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.compress.Compressor; + +import java.util.List; +import java.util.Map; + +/** + * Service Provider Interface for plugins, modules, extensions providing custom + * compression algorithms + * <p> + * see {@link Compressor} for implementing methods + * and {@link org.opensearch.core.compress.CompressorRegistry} for the registration of custom + * Compressors + * + * @opensearch.experimental + * @opensearch.api + */ +@ExperimentalApi +@PublicApi(since = "2.10.0") +public interface CompressorProvider { + /** Extensions that implement their own concrete {@link Compressor}s provide them through this interface method*/ + List<Map.Entry<String, Compressor>> getCompressors(); +} diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/DefaultCompressorProvider.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/DefaultCompressorProvider.java new file mode 100644 index 0000000000000..3ca10b564ef68 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/DefaultCompressorProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.compress.spi; + +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; + +import java.util.AbstractMap.SimpleEntry; +import java.util.List; +import java.util.Map.Entry; + +/** + * Default {@link Compressor} implementations provided by the + * opensearch core library + * + * @opensearch.internal + */ +public class DefaultCompressorProvider implements CompressorProvider { + /** Returns the default {@link Compressor}s provided by the core library */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Override + public List<Entry<String, Compressor>> getCompressors() { + return List.of(new SimpleEntry(NoneCompressor.NAME, new NoneCompressor())); + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/package-info.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/package-info.java new file mode 100644 index 0000000000000..6e33cc8fb63d3 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The Service Provider Interface implementation for registering {@link org.opensearch.core.compress.Compressor} + * with the {@link org.opensearch.core.compress.CompressorRegistry} + * + * See {@link org.opensearch.core.compress.spi.DefaultCompressorProvider} as an example of registering the core + * {@link org.opensearch.core.compress.NoneCompressor} + */ +package org.opensearch.core.compress.spi; diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index fc5c5152a5500..a927179114188 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -32,11 +32,12 @@ package org.opensearch.core.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,14 +48,19 @@ /** * A value class representing the basic required properties of an OpenSearch index. + * <p> + * (This class is immutable.) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Index implements Writeable, ToXContentObject { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; + public static final String UNKNOWN_INDEX_NAME = "_unknown_"; + private static final ObjectParser<Builder, Void> INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { INDEX_PARSER.declareString(Builder::name, new ParseField(INDEX_NAME_KEY)); @@ -64,39 +70,74 @@ public class Index implements Writeable, ToXContentObject { private final String name; private final String uuid; + /** + * Creates a new Index instance with name and unique identifier + * + * @param name the name of the index + * @param uuid the unique identifier of the index + * @throws NullPointerException if either name or uuid are null + */ public Index(String name, String uuid) { this.name = Objects.requireNonNull(name); this.uuid = Objects.requireNonNull(uuid); } /** - * Read from a stream. + * Creates a new Index instance from a {@link StreamInput}. + * Reads the name and unique identifier from the stream. + * + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) */ public Index(StreamInput in) throws IOException { this.name = in.readString(); this.uuid = in.readString(); } + /** + * Gets the name of the index. + * + * @return the name of the index. + */ public String getName() { return this.name; } + /** + * Gets the unique identifier of the index. + * + * @return the unique identifier of the index. "_na_" if {@link Strings#UNKNOWN_UUID_VALUE}. + */ public String getUUID() { return uuid; } + /** + * Returns either the name and unique identifier of the index + * or only the name if the uuid is {@link Strings#UNKNOWN_UUID_VALUE}. + * <p> + * If we have a uuid we put it in the toString so it'll show up in logs + * which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + * + * @return {@code "[name/uuid]"} or {@code "[name]"} + */ @Override public String toString() { - /* - * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather - * than the name as the lookup key for the index. - */ if (Strings.UNKNOWN_UUID_VALUE.equals(uuid)) { return "[" + name + "]"; } return "[" + name + "/" + uuid + "]"; } + /** + * Checks if this index is the same as another index by comparing the name and unique identifier. + * If both uuid are {@link Strings#UNKNOWN_UUID_VALUE} then only the name is compared. + * + * @param o the index to compare to + * @return true if the name and unique identifier are the same, false otherwise. + */ @Override public boolean equals(Object o) { if (this == o) { @@ -116,6 +157,10 @@ public int hashCode() { return result; } + /** Writes the name and unique identifier to the {@link StreamOutput} + * + * @param out The stream to write to + */ @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index b01121c3f30d4..1e48cf1f476da 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -32,65 +32,117 @@ package org.opensearch.core.index.shard; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import java.io.IOException; /** * Allows for shard level components to be injected with the shard id. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardId implements Comparable<ShardId>, ToXContentFragment, Writeable { private final Index index; private final int shardId; private final int hashCode; + private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ShardId.class); + + /** + * Constructs a new shard id. + * @param index the index name + * @param shardId the shard id + */ public ShardId(Index index, int shardId) { this.index = index; this.shardId = shardId; this.hashCode = computeHashCode(); } + /** + * Constructs a new shard id with the given index name, index unique identifier, and shard id. + * @param index the index name + * @param indexUUID the index unique identifier + * @param shardId the shard id + */ public ShardId(String index, String indexUUID, int shardId) { this(new Index(index, indexUUID), shardId); } + /** + * Constructs a new shardId from a stream. + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) + */ public ShardId(StreamInput in) throws IOException { index = new Index(in); shardId = in.readVInt(); hashCode = computeHashCode(); } + public long getBaseRamBytesUsed() { + return BASE_RAM_BYTES_USED; + } + + /** + * Writes this shard id to a stream. + * @param out the stream to write to + * @throws IOException if an error occurs while writing to the stream + */ @Override public void writeTo(StreamOutput out) throws IOException { index.writeTo(out); out.writeVInt(shardId); } + /** + * Returns the index of this shard id. + * @return the index of this shard id + */ public Index getIndex() { return index; } + /** + * Returns the name of the index of this shard id. + * @return the name of the index of this shard id + */ public String getIndexName() { return index.getName(); } + /** + * Return the shardId of this shard id. + * @return the shardId of this shard id + * @see #getId() + */ public int id() { return this.shardId; } + /** + * Returns the shard id of this shard id. + * @return the shard id of this shard id + */ public int getId() { return id(); } + /** + * Returns a string representation of this shard id. + * @return "[indexName][shardId]" + */ @Override public String toString() { return "[" + index.getName() + "][" + shardId + "]"; @@ -98,9 +150,13 @@ public String toString() { /** * Parse the string representation of this shardId back to an object. + * <p> * We lose index uuid information here, but since we use toString in * rest responses, this is the best we can do to reconstruct the object * on the client side. + * + * @param shardIdString the string representation of the shard id + * (Expect a string of format "[indexName][shardId]" (square brackets included)) */ public static ShardId fromString(String shardIdString) { int splitPosition = shardIdString.indexOf("]["); @@ -120,17 +176,30 @@ public boolean equals(Object o) { return shardId == shardId1.shardId && index.equals(shardId1.index); } + /** Returns the hash code of this shard id. + * + * @return the hash code of this shard id + */ @Override public int hashCode() { return hashCode; } + /** Computes the hash code of this shard id. + * + * @return the hash code of this shard id. + */ private int computeHashCode() { int result = index != null ? index.hashCode() : 0; result = 31 * result + shardId; return result; } + /** + * Compares this ShardId with the specified ShardId. + * @param o the ShardId to be compared. + * @return a negative integer, zero, or a positive integer if this ShardId is less than, equal to, or greater than the specified ShardId + */ @Override public int compareTo(ShardId o) { if (o.getId() == shardId) { diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java new file mode 100644 index 0000000000000..992655efec8f0 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.indices.breaker; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Stats class encapsulating all of the different circuit breaker stats + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") +public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { + + /** An array of all the circuit breaker stats */ + private final CircuitBreakerStats[] allStats; + + /** + * Constructs the instance + * + * @param allStats an array of all the circuit breaker stats + */ + public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) { + this.allStats = allStats; + } + + /** + * Constructs the new instance from {@link StreamInput} + * @param in the {@link StreamInput} to read from + * @throws IOException If an error occurs while reading from the StreamInput + * @see #writeTo(StreamOutput) + */ + public AllCircuitBreakerStats(StreamInput in) throws IOException { + allStats = in.readArray(CircuitBreakerStats::new, CircuitBreakerStats[]::new); + } + + /** + * Writes this instance into a {@link StreamOutput} + * @param out the {@link StreamOutput} to write to + * @throws IOException if an error occurs while writing to the StreamOutput + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray(allStats); + } + + /** + * Returns inner stats instances for all circuit breakers + * @return inner stats instances for all circuit breakers + */ + public CircuitBreakerStats[] getAllStats() { + return this.allStats; + } + + /** + * Returns the stats for a specific circuit breaker + * @param name the name of the circuit breaker + * @return the {@link CircuitBreakerStats} for the circuit breaker, null if the circuit breaker with such name does not exist + */ + public CircuitBreakerStats getStats(String name) { + for (CircuitBreakerStats stats : allStats) { + if (stats.getName().equals(name)) { + return stats; + } + } + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.BREAKERS); + for (CircuitBreakerStats stats : allStats) { + if (stats != null) { + stats.toXContent(builder, params); + } + } + builder.endObject(); + return builder; + } + + /** + * Fields used for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String BREAKERS = "breakers"; + } +} diff --git a/server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java similarity index 83% rename from server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerService.java rename to libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java index b5cc1a6b1c6c5..dedeb0803271f 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerService.java @@ -30,22 +30,20 @@ * GitHub history for details. */ -package org.opensearch.indices.breaker; +package org.opensearch.core.indices.breaker; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.common.breaker.CircuitBreaker; /** * Interface for Circuit Breaker services, which provide breakers to classes * that load field data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class CircuitBreakerService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); - protected CircuitBreakerService() {} /** diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java new file mode 100644 index 0000000000000..ee71cf8d2ac0e --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.indices.breaker; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; + +/** + * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") +public class CircuitBreakerStats implements Writeable, ToXContentObject { + + /** The name of the circuit breaker */ + private final String name; + /** The limit size in byte of the circuit breaker. Field : "limit_size_in_bytes" */ + private final long limit; + /** The estimated size in byte of the breaker. Field : "estimated_size_in_bytes" */ + private final long estimated; + /** The number of times the breaker has been tripped. Field : "tripped" */ + private final long trippedCount; + /** The overhead of the breaker. Field : "overhead" */ + private final double overhead; + + /** + * Constructs new instance + * + * @param name The name of the circuit breaker + * @param limit The limit size in byte of the circuit breaker + * @param estimated The estimated size in byte of the breaker + * @param overhead The overhead of the breaker + * @param trippedCount The number of times the breaker has been tripped + * @see org.opensearch.core.common.breaker.CircuitBreaker + */ + public CircuitBreakerStats(String name, long limit, long estimated, double overhead, long trippedCount) { + this.name = name; + this.limit = limit; + this.estimated = estimated; + this.trippedCount = trippedCount; + this.overhead = overhead; + } + + /** + * Constructs new instance from the {@link StreamInput} + * + * @param in The StreamInput + * @throws IOException if an error occurs while reading from the StreamInput + * @see org.opensearch.core.common.breaker.CircuitBreaker + * @see #writeTo(StreamOutput) + */ + public CircuitBreakerStats(StreamInput in) throws IOException { + this.limit = in.readLong(); + this.estimated = in.readLong(); + this.overhead = in.readDouble(); + this.trippedCount = in.readLong(); + this.name = in.readString(); + } + + /** + * Writes this instance into a {@link StreamOutput} + * + * @param out The StreamOutput + * @throws IOException if an error occurs while writing to the StreamOutput + * @see #CircuitBreakerStats(StreamInput) + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(limit); + out.writeLong(estimated); + out.writeDouble(overhead); + out.writeLong(trippedCount); + out.writeString(name); + } + + /** + * Returns the name of the circuit breaker + * @return The name of the circuit breaker + */ + public String getName() { + return this.name; + } + + /** + * Returns the limit size in byte of the circuit breaker + * @return The limit size in byte of the circuit breaker + */ + public long getLimit() { + return this.limit; + } + + /** + * Returns the estimated size in byte of the breaker + * @return The estimated size in byte of the breaker + */ + public long getEstimated() { + return this.estimated; + } + + /** + * Returns the number of times the breaker has been tripped + * @return The number of times the breaker has been tripped + */ + public long getTrippedCount() { + return this.trippedCount; + } + + /** + * Returns the overhead of the breaker + * @return The overhead of the breaker + */ + public double getOverhead() { + return this.overhead; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(name.toLowerCase(Locale.ROOT)); + builder.field(Fields.LIMIT, limit); + builder.field(Fields.LIMIT_HUMAN, new ByteSizeValue(limit)); + builder.field(Fields.ESTIMATED, estimated); + builder.field(Fields.ESTIMATED_HUMAN, new ByteSizeValue(estimated)); + builder.field(Fields.OVERHEAD, overhead); + builder.field(Fields.TRIPPED_COUNT, trippedCount); + builder.endObject(); + return builder; + } + + /** + * Returns a String representation of this CircuitBreakerStats + * @return "[name,limit=limit/limit_human,estimated=estimated/estimated_human,overhead=overhead,tripped=trippedCount]" + */ + @Override + public String toString() { + return "[" + + this.name + + ",limit=" + + this.limit + + "/" + + new ByteSizeValue(this.limit) + + ",estimated=" + + this.estimated + + "/" + + new ByteSizeValue(this.estimated) + + ",overhead=" + + this.overhead + + ",tripped=" + + this.trippedCount + + "]"; + } + + /** + * Fields used for statistics + * + * @opensearch.internal + */ + static final class Fields { + static final String LIMIT = "limit_size_in_bytes"; + static final String LIMIT_HUMAN = "limit_size"; + static final String ESTIMATED = "estimated_size_in_bytes"; + static final String ESTIMATED_HUMAN = "estimated_size"; + static final String OVERHEAD = "overhead"; + static final String TRIPPED_COUNT = "tripped"; + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java new file mode 100644 index 0000000000000..49c5a393328b9 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.indices.breaker; + +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; + +/** + * Class that returns a breaker that use the NoopCircuitBreaker and never breaks + * + * @see org.opensearch.core.common.breaker.NoopCircuitBreaker + * @opensearch.internal + */ +public class NoneCircuitBreakerService extends CircuitBreakerService { + + private final CircuitBreaker breaker = new NoopCircuitBreaker(CircuitBreaker.FIELDDATA); + + public NoneCircuitBreakerService() { + super(); + } + + /** + * Returns a breaker that use the NoopCircuitBreaker and never breaks + * + * @param name name of the breaker (ignored) + * @return a NoopCircuitBreaker + */ + @Override + public CircuitBreaker getBreaker(String name) { + return breaker; + } + + @Override + public AllCircuitBreakerStats stats() { + return new AllCircuitBreakerStats(new CircuitBreakerStats[] { stats(CircuitBreaker.FIELDDATA) }); + } + + /** + * Always returns the same stats, a NoopCircuitBreaker never breaks and all operations are noops. + * + * @param name name of the breaker (ignored) + * @return always "fielddata", limit: -1, estimated: -1, overhead: 0, trippedCount: 0 + */ + @Override + public CircuitBreakerStats stats(String name) { + return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); + } + +} diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/package-info.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/package-info.java new file mode 100644 index 0000000000000..a98f9ab1d9f1e --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Top Level core circuit breaker implementation + * + * @opensearch.internal + * @opensearch.experimental + */ +package org.opensearch.core.indices.breaker; diff --git a/libs/core/src/main/java/org/opensearch/core/indices/package-info.java b/libs/core/src/main/java/org/opensearch/core/indices/package-info.java new file mode 100644 index 0000000000000..c80edf3d2f01a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/indices/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Top Level Package for implementations used across indexes + */ +package org.opensearch.core.indices; diff --git a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java index ae4f4c65b28d2..8441ce8b1b622 100644 --- a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java +++ b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java @@ -32,6 +32,7 @@ package org.opensearch.core.rest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,6 +48,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum RestStatus { /** * The client SHOULD continue with its request. This interim response is used to inform the client that the @@ -525,6 +527,15 @@ public int getStatus() { return status; } + /** + * Get category class of a rest status code. + * + * @return Integer representing class category of the concrete rest status code + */ + public int getStatusFamilyCode() { + return status / 100; + } + public static RestStatus readFrom(StreamInput in) throws IOException { return RestStatus.valueOf(in.readString()); } diff --git a/server/src/main/java/org/opensearch/node/ReportingService.java b/libs/core/src/main/java/org/opensearch/core/service/ReportingService.java similarity index 97% rename from server/src/main/java/org/opensearch/node/ReportingService.java rename to libs/core/src/main/java/org/opensearch/core/service/ReportingService.java index 969652e215e5e..3c88169f1e3b0 100644 --- a/server/src/main/java/org/opensearch/node/ReportingService.java +++ b/libs/core/src/main/java/org/opensearch/core/service/ReportingService.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.node; +package org.opensearch.core.service; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContent; diff --git a/libs/core/src/main/java/org/opensearch/core/service/package-info.java b/libs/core/src/main/java/org/opensearch/core/service/package-info.java new file mode 100644 index 0000000000000..d427c6e5934c9 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/service/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** OpenSearch Core Service Interfaces */ +package org.opensearch.core.service; diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancelledException.java b/libs/core/src/main/java/org/opensearch/core/tasks/TaskCancelledException.java similarity index 97% rename from server/src/main/java/org/opensearch/tasks/TaskCancelledException.java rename to libs/core/src/main/java/org/opensearch/core/tasks/TaskCancelledException.java index 38e50f56b24b2..6bdc1e42f351a 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancelledException.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/TaskCancelledException.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks; import org.opensearch.OpenSearchException; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/tasks/TaskId.java b/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java similarity index 95% rename from server/src/main/java/org/opensearch/tasks/TaskId.java rename to libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java index e7420fce397e2..d34d4acf00e6e 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskId.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Task id that consists of node id and id of the task on the node * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskId implements Writeable { public static final TaskId EMPTY_TASK_ID = new TaskId(); @@ -71,7 +73,7 @@ private TaskId() { } public TaskId(String taskId) { - if (org.opensearch.core.common.Strings.hasLength(taskId) && "unset".equals(taskId) == false) { + if (Strings.hasLength(taskId) && "unset".equals(taskId) == false) { String[] s = Strings.split(taskId, ":"); if (s == null || s.length != 2) { throw new IllegalArgumentException("malformed task id " + taskId); diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/package-info.java b/libs/core/src/main/java/org/opensearch/core/tasks/package-info.java new file mode 100644 index 0000000000000..e421816c6b541 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/tasks/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core Tasks Foundation classes used across the opensearch code base */ +package org.opensearch.core.tasks; diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java similarity index 78% rename from server/src/main/java/org/opensearch/tasks/ResourceStats.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java index 31c129b7b2ff9..e99afbb759031 100644 --- a/server/src/main/java/org/opensearch/tasks/ResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStats.java @@ -6,13 +6,16 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; + +import org.opensearch.common.annotation.PublicApi; /** * Different resource stats are defined. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStats { CPU("cpu_time_in_nanos"), MEMORY("memory_in_bytes"); diff --git a/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java similarity index 84% rename from server/src/main/java/org/opensearch/tasks/ResourceStatsType.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java index 138c74e128d70..2aedff2940d83 100644 --- a/server/src/main/java/org/opensearch/tasks/ResourceStatsType.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceStatsType.java @@ -6,13 +6,16 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; + +import org.opensearch.common.annotation.PublicApi; /** * Defines the different types of resource stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public enum ResourceStatsType { // resource stats of the worker thread reported directly from runnable. WORKER_STATS("worker_stats", false); diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java similarity index 95% rename from server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java index 652562e3f9e7a..a278b61894a65 100644 --- a/server/src/main/java/org/opensearch/tasks/ResourceUsageInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageInfo.java @@ -6,10 +6,11 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import java.util.Collections; import java.util.EnumMap; @@ -22,8 +23,9 @@ * It captures the resource usage information like memory, CPU about a particular execution of thread * for a specific stats type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageInfo { private static final Logger logger = LogManager.getLogger(ResourceUsageInfo.class); private final EnumMap<ResourceStats, ResourceStatsInfo> statsInfo = new EnumMap<>(ResourceStats.class); @@ -89,7 +91,7 @@ public String toString() { /** * Defines resource stats information. */ - static class ResourceStatsInfo { + public static class ResourceStatsInfo { private final long startValue; private final AtomicLong endValue; diff --git a/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java similarity index 80% rename from server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java index 931e3aa00d736..f4cce2de820a0 100644 --- a/server/src/main/java/org/opensearch/tasks/ResourceUsageMetric.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ResourceUsageMetric.java @@ -6,13 +6,16 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; + +import org.opensearch.common.annotation.PublicApi; /** * Information about resource usage * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ResourceUsageMetric { private final ResourceStats stats; private final long value; diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java similarity index 92% rename from server/src/main/java/org/opensearch/tasks/TaskResourceStats.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java index 59526cd229001..048c4a228fbd5 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceStats.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceStats.java @@ -6,14 +6,15 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -23,20 +24,21 @@ import java.util.Map; import java.util.Objects; -import static org.opensearch.tasks.Task.THREAD_INFO; - /** * Resource information about a currently running task. * <p> * Writeable TaskResourceStats objects are used to represent resource * snapshot information about currently running task. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceStats implements Writeable, ToXContentFragment { private final Map<String, TaskResourceUsage> resourceUsage; private final TaskThreadUsage threadUsage; + public static final String THREAD_INFO = "thread_info"; + public TaskResourceStats(Map<String, TaskResourceUsage> resourceUsage, TaskThreadUsage threadUsage) { this.resourceUsage = Objects.requireNonNull(resourceUsage, "resource usage is required"); this.threadUsage = Objects.requireNonNull(threadUsage, "thread usage is required"); @@ -117,7 +119,7 @@ public static TaskResourceStats fromXContent(XContentParser parser) throws IOExc @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } // Implements equals and hashcode for testing diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java similarity index 91% rename from server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java index 490adc3e7c637..654f1c5695937 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskResourceUsage.java @@ -6,15 +6,16 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -30,8 +31,9 @@ * Writeable TaskResourceUsage objects are used to represent resource usage * information of running tasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskResourceUsage implements Writeable, ToXContentFragment { private static final ParseField CPU_TIME_IN_NANOS = new ParseField("cpu_time_in_nanos"); private static final ParseField MEMORY_IN_BYTES = new ParseField("memory_in_bytes"); @@ -88,7 +90,7 @@ public static TaskResourceUsage fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } // Implements equals and hashcode for testing diff --git a/server/src/main/java/org/opensearch/tasks/TaskThreadUsage.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java similarity index 91% rename from server/src/main/java/org/opensearch/tasks/TaskThreadUsage.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java index d30a3d723a15a..abe03e3c520e0 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskThreadUsage.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/TaskThreadUsage.java @@ -6,15 +6,16 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -29,8 +30,9 @@ * Writeable TaskThreadExecutions objects are used to represent thread related resource usage of running tasks. * asd * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class TaskThreadUsage implements Writeable, ToXContentFragment { private static final String THREAD_EXECUTIONS = "thread_executions"; @@ -104,6 +106,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java similarity index 92% rename from server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java rename to libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java index de49d86d1d5c4..703fdfdf8a784 100644 --- a/server/src/main/java/org/opensearch/tasks/ThreadResourceInfo.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/ThreadResourceInfo.java @@ -6,7 +6,9 @@ * compatible open source license. */ -package org.opensearch.tasks; +package org.opensearch.core.tasks.resourcetracker; + +import org.opensearch.common.annotation.PublicApi; /** * Resource consumption information about a particular execution of thread. @@ -14,8 +16,9 @@ * It captures the resource usage information about a particular execution of thread * for a specific stats type like worker_stats or response_stats etc., * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.1.0") public class ThreadResourceInfo { private final long threadId; private volatile boolean isActive = true; diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/package-info.java b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/package-info.java new file mode 100644 index 0000000000000..b46b685ffaaf0 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/tasks/resourcetracker/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Resource tracking classes for tracking task resource consumption (e.g., memory, cpu) */ +package org.opensearch.core.tasks.resourcetracker; diff --git a/server/src/main/java/org/opensearch/transport/TransportMessage.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java similarity index 94% rename from server/src/main/java/org/opensearch/transport/TransportMessage.java rename to libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java index 78216047d530e..941babda40aa3 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessage.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java @@ -30,11 +30,11 @@ * GitHub history for details. */ -package org.opensearch.transport; +package org.opensearch.core.transport; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; /** * Message over the transport interface diff --git a/server/src/main/java/org/opensearch/transport/TransportResponse.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/transport/TransportResponse.java rename to libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java index 73713fa1447a8..038069e93a51b 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.transport; +package org.opensearch.core.transport; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/libs/core/src/main/java/org/opensearch/core/transport/package-info.java b/libs/core/src/main/java/org/opensearch/core/transport/package-info.java new file mode 100644 index 0000000000000..91db839f40305 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core Transport Layer classes used across the OpenSearch core */ +package org.opensearch.core.transport; diff --git a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java index 30c9f182fcae6..2aad068534b9d 100644 --- a/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/util/BytesRefUtils.java @@ -32,6 +32,7 @@ package org.opensearch.core.util; +import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; @@ -103,12 +104,12 @@ public static int sortAndDedup(final BytesRefArray bytes, final int[] indices) { return uniqueCount; } + /** + * Decodes a long value written as bytes in big endian order. + * @param bytes in big endian order + * @return long value + */ public static long bytesToLong(BytesRef bytes) { - int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); - int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] - & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); - return (((long) high) << 32) | (low & 0x0ffffffffL); + return (long) BitUtil.VH_BE_LONG.get(bytes.bytes, bytes.offset); } - } diff --git a/libs/core/src/main/java/org/opensearch/core/util/FileSystemUtils.java b/libs/core/src/main/java/org/opensearch/core/util/FileSystemUtils.java index d742e8584fa8b..99f48ed49dd39 100644 --- a/libs/core/src/main/java/org/opensearch/core/util/FileSystemUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/util/FileSystemUtils.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.util.io.IOUtils; import java.io.IOException; import java.io.InputStream; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index a0e2a54fce91c..32bbfc600f1f0 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; @@ -47,8 +48,9 @@ /** * Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AbstractObjectParser<Value, Context> { /** @@ -108,7 +110,7 @@ public abstract <T> void declareNamedObject( * * Unlike the other version of this method, "ordered" mode (arrays of * objects) is not supported. - * + * <p> * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -163,7 +165,7 @@ public abstract <T> void declareNamedObjects( * the order sent but tools that generate json are free to put object * members in an unordered Map, jumbling them. Thus, if you care about order * you can send the object in the second way. - * + * <p> * See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -366,10 +368,10 @@ public <T> void declareFieldArray( /** * Declares a set of fields that are required for parsing to succeed. Only one of the values * provided per String[] must be matched. - * + * <p> * E.g. <code>declareRequiredFieldSet("foo", "bar");</code> means at least one of "foo" or * "bar" fields must be present. If neither of those fields are present, an exception will be thrown. - * + * <p> * Multiple required sets can be configured: * * <pre><code> @@ -379,7 +381,7 @@ public <T> void declareFieldArray( * * requires that one of "foo" or "bar" fields are present, and also that one of "bizz" or * "buzz" fields are present. - * + * <p> * In JSON, it means any of these combinations are acceptable: * * <ul> @@ -415,12 +417,12 @@ public <T> void declareFieldArray( /** * Declares a set of fields of which at most one must appear for parsing to succeed - * + * <p> * E.g. <code>declareExclusiveFieldSet("foo", "bar");</code> means that only one of 'foo' * or 'bar' must be present, and if both appear then an exception will be thrown. Note * that this does not make 'foo' or 'bar' required - see {@link #declareRequiredFieldSet(String...)} * for required fields. - * + * <p> * Multiple exclusive sets may be declared * * @param exclusiveSet a set of field names, at most one of which must appear diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java index d50dd2e68d890..f6e5647532bee 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ContextParser.java @@ -32,11 +32,16 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * Reads an object from a parser using some context. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface ContextParser<Context, T> { T parse(XContentParser p, Context c) throws IOException; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java index 570a13ad8e093..a0e4027290742 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/DeprecationHandler.java @@ -32,12 +32,17 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Supplier; /** * Callback for notifying the creator of the {@link XContentParser} that * parsing hit a deprecated field. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DeprecationHandler { /** * Throws an {@link UnsupportedOperationException} when parsing hits a diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java index 54329038e1fc5..0a5cda324ddb7 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java @@ -45,7 +45,7 @@ */ public class MapXContentParser extends AbstractXContentParser { - private MediaType xContentType; + private MediaType mediaType; private TokenIterator iterator; private boolean closed; @@ -53,10 +53,10 @@ public MapXContentParser( NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Map<String, Object> map, - MediaType xContentType + MediaType mediaType ) { super(xContentRegistry, deprecationHandler); - this.xContentType = xContentType; + this.mediaType = mediaType; this.iterator = new MapIterator(null, null, map); } @@ -105,7 +105,7 @@ protected BigInteger doBigIntegerValue() throws IOException { @Override public MediaType contentType() { - return xContentType; + return mediaType; } @Override @@ -277,7 +277,7 @@ public Token currentToken() { /** * field name that the child element needs to inherit. - * + * <p> * In most cases this is the same as currentName() except with embedded arrays. In "foo": [[42]] the first START_ARRAY * token will have the name "foo", but the second START_ARRAY will have no name. */ diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java index c1409e551e47d..c58b3e80d98b5 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java @@ -32,15 +32,21 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; +import java.io.IOException; +import java.io.OutputStream; import java.util.Locale; /** * Abstracts a <a href="http://en.wikipedia.org/wiki/Internet_media_type">Media Type</a> and a format parameter. * Media types are used as values on Content-Type and Accept headers * format is an URL parameter, specifies response media type. + * + * @opensearch.api */ +@PublicApi(since = "2.1.0") public interface MediaType extends Writeable { /** * Returns a type part of a MediaType @@ -69,12 +75,20 @@ default String typeWithSubtype() { XContent xContent(); + boolean detectedXContent(final byte[] bytes, int offset, int length); + + boolean detectedXContent(final CharSequence content, final int length); + default String mediaType() { return mediaTypeWithoutParameters(); } String mediaTypeWithoutParameters(); + XContentBuilder contentBuilder() throws IOException; + + XContentBuilder contentBuilder(final OutputStream os) throws IOException; + /** * Accepts a format string, which is most of the time is equivalent to {@link MediaType#subtype()} * and attempts to match the value to an {@link MediaType}. @@ -82,7 +96,7 @@ default String mediaType() { * This method will return {@code null} if no match is found */ static MediaType fromFormat(String mediaType) { - return MediaTypeParserRegistry.fromFormat(mediaType); + return MediaTypeRegistry.fromFormat(mediaType); } /** @@ -93,7 +107,7 @@ static MediaType fromFormat(String mediaType) { */ static MediaType fromMediaType(String mediaTypeHeaderValue) { mediaTypeHeaderValue = removeVersionInMediaType(mediaTypeHeaderValue); - return MediaTypeParserRegistry.fromMediaType(mediaTypeHeaderValue); + return MediaTypeRegistry.fromMediaType(mediaTypeHeaderValue); } /** diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java deleted file mode 100644 index 62a26b4458b09..0000000000000 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.core.xcontent; - -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Parses supported internet media types - * - * @opensearch.internal - */ -public final class MediaTypeParserRegistry { - private static Map<String, MediaType> formatToMediaType = Map.of(); - private static Map<String, MediaType> typeWithSubtypeToMediaType = Map.of(); - - // Default mediaType singleton - private static MediaType DEFAULT_MEDIA_TYPE; - - public static void register(MediaType[] acceptedMediaTypes, Map<String, MediaType> additionalMediaTypes) { - // ensures the map is not overwritten: - Map<String, MediaType> typeMap = new HashMap<>(typeWithSubtypeToMediaType); - Map<String, MediaType> formatMap = new HashMap<>(formatToMediaType); - for (MediaType mediaType : acceptedMediaTypes) { - if (formatMap.containsKey(mediaType.format())) { - throw new IllegalArgumentException("unable to register mediaType: [" + mediaType.format() + "]. Type already exists."); - } - typeMap.put(mediaType.typeWithSubtype(), mediaType); - formatMap.put(mediaType.format(), mediaType); - } - for (Map.Entry<String, MediaType> entry : additionalMediaTypes.entrySet()) { - String typeWithSubtype = entry.getKey().toLowerCase(Locale.ROOT); - if (typeMap.containsKey(typeWithSubtype)) { - throw new IllegalArgumentException( - "unable to register mediaType: [" - + entry.getKey() - + "]. " - + "Type already exists and is mapped to: [." - + entry.getValue().format() - + "]" - ); - } - - MediaType mediaType = entry.getValue(); - typeMap.put(typeWithSubtype, mediaType); - formatMap.putIfAbsent(mediaType.format(), mediaType); // ignore if the additional type mapping already exists - } - - formatToMediaType = Map.copyOf(formatMap); - typeWithSubtypeToMediaType = Map.copyOf(typeMap); - } - - public static MediaType fromMediaType(String mediaType) { - ParsedMediaType parsedMediaType = parseMediaType(mediaType); - return parsedMediaType != null ? parsedMediaType.getMediaType() : null; - } - - public static MediaType fromFormat(String format) { - if (format == null) { - return null; - } - return formatToMediaType.get(format.toLowerCase(Locale.ROOT)); - } - - /** - * parsing media type that follows https://tools.ietf.org/html/rfc7231#section-3.1.1.1 - * @param headerValue a header value from Accept or Content-Type - * @return a parsed media-type - */ - public static ParsedMediaType parseMediaType(String headerValue) { - if (headerValue != null) { - String[] split = headerValue.toLowerCase(Locale.ROOT).split(";"); - - String[] typeSubtype = split[0].trim().split("/"); - if (typeSubtype.length == 2) { - String type = typeSubtype[0]; - String subtype = typeSubtype[1]; - MediaType mediaType = typeWithSubtypeToMediaType.get(type + "/" + subtype); - if (mediaType != null) { - Map<String, String> parameters = new HashMap<>(); - for (int i = 1; i < split.length; i++) { - // spaces are allowed between parameters, but not between '=' sign - String[] keyValueParam = split[i].trim().split("="); - if (keyValueParam.length != 2 || hasSpaces(keyValueParam[0]) || hasSpaces(keyValueParam[1])) { - return null; - } - parameters.put(keyValueParam[0], keyValueParam[1]); - } - return new ParsedMediaType(mediaType, parameters); - } - } - - } - return null; - } - - private static boolean hasSpaces(String s) { - return s.trim().equals(s) == false; - } - - /** - * A media type object that contains all the information provided on a Content-Type or Accept header - */ - public static class ParsedMediaType { - private final Map<String, String> parameters; - private final MediaType mediaType; - - public ParsedMediaType(MediaType mediaType, Map<String, String> parameters) { - this.parameters = parameters; - this.mediaType = mediaType; - } - - public MediaType getMediaType() { - return mediaType; - } - - public Map<String, String> getParameters() { - return parameters; - } - } - - public static void setDefaultMediaType(final MediaType mediaType) { - if (DEFAULT_MEDIA_TYPE != null) { - throw new RuntimeException( - "unable to reset the default media type from current default [" + DEFAULT_MEDIA_TYPE + "] to [" + mediaType + "]" - ); - } else { - DEFAULT_MEDIA_TYPE = mediaType; - } - } - - public static MediaType getDefaultMediaType() { - return DEFAULT_MEDIA_TYPE; - } -} diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java new file mode 100644 index 0000000000000..bbb55204712d1 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java @@ -0,0 +1,405 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.xcontent; + +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.spi.MediaTypeProvider; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.ServiceLoader; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Parses supported internet media types + * + * @opensearch.internal + */ +public final class MediaTypeRegistry { + private static Map<String, MediaType> formatToMediaType = Map.of(); + private static Map<String, MediaType> typeWithSubtypeToMediaType = Map.of(); + + // Default mediaType singleton + private static MediaType DEFAULT_MEDIA_TYPE; + public static final int GUESS_HEADER_LENGTH = 20; + + // JSON is a core type, so we create a static instance for implementations that require JSON format (e.g., tests) + // todo we should explore moving the concrete JSON implementation from the xcontent library to core + public static final MediaType JSON; + + static { + List<MediaType> mediaTypes = new ArrayList<>(); + Map<String, MediaType> amt = new HashMap<>(); + for (MediaTypeProvider provider : ServiceLoader.load(MediaTypeProvider.class, MediaTypeProvider.class.getClassLoader())) { + mediaTypes.addAll(provider.getMediaTypes()); + amt = Stream.of(amt, provider.getAdditionalMediaTypes()) + .flatMap(map -> map.entrySet().stream()) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + register(mediaTypes.toArray(new MediaType[0]), amt); + JSON = fromMediaType("application/json"); + setDefaultMediaType(JSON); + } + + private static void register(MediaType[] acceptedMediaTypes, Map<String, MediaType> additionalMediaTypes) { + // ensures the map is not overwritten: + Map<String, MediaType> typeMap = new HashMap<>(typeWithSubtypeToMediaType); + Map<String, MediaType> formatMap = new HashMap<>(formatToMediaType); + for (MediaType mediaType : acceptedMediaTypes) { + if (formatMap.containsKey(mediaType.format())) { + throw new IllegalArgumentException("unable to register mediaType: [" + mediaType.format() + "]. Type already exists."); + } + typeMap.put(mediaType.typeWithSubtype(), mediaType); + formatMap.put(mediaType.format(), mediaType); + } + for (Map.Entry<String, MediaType> entry : additionalMediaTypes.entrySet()) { + String typeWithSubtype = entry.getKey().toLowerCase(Locale.ROOT); + if (typeMap.containsKey(typeWithSubtype)) { + throw new IllegalArgumentException( + "unable to register mediaType: [" + + entry.getKey() + + "]. " + + "Type already exists and is mapped to: [." + + entry.getValue().format() + + "]" + ); + } + + MediaType mediaType = entry.getValue(); + typeMap.put(typeWithSubtype, mediaType); + formatMap.putIfAbsent(mediaType.format(), mediaType); // ignore if the additional type mapping already exists + } + + formatToMediaType = Map.copyOf(formatMap); + typeWithSubtypeToMediaType = Map.copyOf(typeMap); + } + + public static MediaType fromMediaType(String mediaType) { + ParsedMediaType parsedMediaType = parseMediaType(mediaType); + return parsedMediaType != null ? parsedMediaType.getMediaType() : null; + } + + public static MediaType fromFormat(String format) { + if (format == null) { + return null; + } + return formatToMediaType.get(format.toLowerCase(Locale.ROOT)); + } + + /** + * Returns a binary content builder for the provided content type. + */ + public static XContentBuilder contentBuilder(MediaType type) throws IOException { + for (var mediaType : formatToMediaType.values()) { + if (type == mediaType) { + return type.contentBuilder(); + } + } + throw new IllegalArgumentException("No matching content type for " + type); + } + + public static XContentBuilder contentBuilder(MediaType type, OutputStream outputStream) throws IOException { + for (var mediaType : formatToMediaType.values()) { + if (type == mediaType) { + return type.contentBuilder(outputStream); + } + } + throw new IllegalArgumentException("No matching content type for " + type); + } + + /** + * Guesses the content (type) based on the provided char sequence and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContent(final byte[] data, int offset, int length) { + MediaType type = mediaTypeFromBytes(data, offset, length); + if (type == null) { + throw new XContentParseException("Failed to derive xcontent"); + } + return type; + } + + /** + * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContent(byte[] data) { + return xContent(data, 0, data.length); + } + + /** + * Guesses the content (type) based on the provided char sequence and returns the corresponding {@link XContent} + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContent(CharSequence content) { + MediaType type = xContentType(content); + if (type == null) { + throw new XContentParseException("Failed to derive xcontent"); + } + return type; + } + + /** + * Guesses the content type based on the provided char sequence. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContentType(CharSequence content) { + int length = content.length() < GUESS_HEADER_LENGTH ? content.length() : GUESS_HEADER_LENGTH; + if (length == 0) { + return null; + } + for (var mediaType : formatToMediaType.values()) { + if (mediaType.detectedXContent(content, length)) { + return mediaType; + } + } + + // fallback for json + for (int i = 0; i < length; i++) { + char c = content.charAt(i); + if (c == '{') { + return MediaType.fromMediaType("application/json"); + } + if (Character.isWhitespace(c) == false) { + break; + } + } + return null; + } + + /** + * Guesses the content type based on the provided input stream without consuming it. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContentType(InputStream si) throws IOException { + /* + * We need to guess the content type. To do this, we look for the first non-whitespace character and then try to guess the content + * type on the GUESS_HEADER_LENGTH bytes that follow. We do this in a way that does not modify the initial read position in the + * underlying input stream. This is why the input stream must support mark/reset and why we repeatedly mark the read position and + * reset. + */ + if (si.markSupported() == false) { + throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass()); + } + si.mark(Integer.MAX_VALUE); + try { + // scan until we find the first non-whitespace character or the end of the stream + int current; + do { + current = si.read(); + if (current == -1) { + return null; + } + } while (Character.isWhitespace((char) current)); + // now guess the content type off the next GUESS_HEADER_LENGTH bytes including the current byte + final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH]; + firstBytes[0] = (byte) current; + int read = 1; + while (read < GUESS_HEADER_LENGTH) { + final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read); + if (r == -1) { + break; + } + read += r; + } + return mediaTypeFromBytes(firstBytes, 0, read); + } finally { + si.reset(); + } + + } + + /** + * Guesses the content type based on the provided bytes. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType xContentType(BytesReference bytes) { + if (bytes instanceof BytesArray) { + final BytesArray array = (BytesArray) bytes; + return mediaTypeFromBytes(array.array(), array.offset(), array.length()); + } + try { + final InputStream inputStream = bytes.streamInput(); + assert inputStream.markSupported(); + return xContentType(inputStream); + } catch (IOException e) { + assert false : "Should not happen, we're just reading bytes from memory"; + throw new UncheckedIOException(e); + } + } + + /** + * Guesses the content type based on the provided bytes. + * + * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. + * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. + * This method is deprecated to prevent usages of it from spreading further without specific reasons. + */ + @Deprecated + public static MediaType mediaTypeFromBytes(final byte[] data, int offset, int length) { + int totalLength = data.length; + if (totalLength == 0 || length == 0) { + return null; + } else if ((offset + length) > totalLength) { + return null; + } + for (var mediaType : formatToMediaType.values()) { + if (mediaType.detectedXContent(data, offset, length)) { + return mediaType; + } + } + + // a last chance for JSON + int jsonStart = 0; + // JSON may be preceded by UTF-8 BOM + if (length > 3 && data[offset] == (byte) 0xEF && data[offset + 1] == (byte) 0xBB && data[offset + 2] == (byte) 0xBF) { + jsonStart = 3; + } + + for (int i = jsonStart; i < length; i++) { + byte b = data[offset + i]; + if (b == '{') { + return fromMediaType("application/json"); + } + if (Character.isWhitespace(b) == false) { + break; + } + } + + return null; + } + + /** + * parsing media type that follows https://tools.ietf.org/html/rfc7231#section-3.1.1.1 + * @param headerValue a header value from Accept or Content-Type + * @return a parsed media-type + */ + public static ParsedMediaType parseMediaType(String headerValue) { + if (headerValue != null) { + String[] split = headerValue.toLowerCase(Locale.ROOT).split(";"); + + String[] typeSubtype = split[0].trim().split("/"); + if (typeSubtype.length == 2) { + String type = typeSubtype[0]; + String subtype = typeSubtype[1]; + MediaType mediaType = typeWithSubtypeToMediaType.get(type + "/" + subtype); + if (mediaType != null) { + Map<String, String> parameters = new HashMap<>(); + for (int i = 1; i < split.length; i++) { + // spaces are allowed between parameters, but not between '=' sign + String[] keyValueParam = split[i].trim().split("="); + if (keyValueParam.length != 2 || hasSpaces(keyValueParam[0]) || hasSpaces(keyValueParam[1])) { + return null; + } + parameters.put(keyValueParam[0], keyValueParam[1]); + } + return new ParsedMediaType(mediaType, parameters); + } + } + + } + return null; + } + + private static boolean hasSpaces(String s) { + return s.trim().equals(s) == false; + } + + /** + * A media type object that contains all the information provided on a Content-Type or Accept header + */ + public static class ParsedMediaType { + private final Map<String, String> parameters; + private final MediaType mediaType; + + public ParsedMediaType(MediaType mediaType, Map<String, String> parameters) { + this.parameters = parameters; + this.mediaType = mediaType; + } + + public MediaType getMediaType() { + return mediaType; + } + + public Map<String, String> getParameters() { + return parameters; + } + } + + private static void setDefaultMediaType(final MediaType mediaType) { + if (DEFAULT_MEDIA_TYPE != null) { + throw new RuntimeException( + "unable to reset the default media type from current default [" + DEFAULT_MEDIA_TYPE + "] to [" + mediaType + "]" + ); + } else { + DEFAULT_MEDIA_TYPE = mediaType; + } + } + + public static MediaType getDefaultMediaType() { + return DEFAULT_MEDIA_TYPE; + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java b/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java index 10718ba98fe17..9d876825c5196 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -49,8 +50,9 @@ /** * Main registry for serializable content (e.g., field mappers, aggregations) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedXContentRegistry { /** * The empty {@link NamedXContentRegistry} for use when you are sure that you aren't going to call @@ -64,6 +66,7 @@ public class NamedXContentRegistry { /** * An entry in the {@linkplain NamedXContentRegistry} containing the name of the object and the parser that can parse it. */ + @PublicApi(since = "1.0.0") public static class Entry { /** The class that this entry can read. */ public final Class<?> categoryClass; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java index 365b36c755dd2..04d0bce27c04f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParser.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -83,8 +84,9 @@ * It's highly recommended to use the high level declare methods like {@link #declareString(BiConsumer, ParseField)} instead of * {@link #declareField} which can be used to implement exceptional parsing operations not covered by the high level methods. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ObjectParser<Value, Context> extends AbstractObjectParser<Value, Context> implements BiFunction<XContentParser, Context, Value>, diff --git a/server/src/main/java/org/opensearch/common/xcontent/ObjectParserHelper.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParserHelper.java similarity index 84% rename from server/src/main/java/org/opensearch/common/xcontent/ObjectParserHelper.java rename to libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParserHelper.java index 77c13dc5de60e..b29ca9dea56c0 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/ObjectParserHelper.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ObjectParserHelper.java @@ -30,17 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.xcontent; +package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.AbstractObjectParser; -import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.function.BiConsumer; @@ -62,7 +57,7 @@ public void declareRawObject( final ParseField field ) { final CheckedFunction<XContentParser, BytesReference, IOException> bytesParser = p -> { - try (XContentBuilder builder = JsonXContent.contentBuilder()) { + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.copyCurrentStructure(p); return BytesReference.bytes(builder); } diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java index 90dd0cbfb9a1a..ee8dad198df09 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/ToXContent.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Map; @@ -42,15 +43,17 @@ * The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value * but those that don't may or may not require emitting a startObject and an endObject. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ToXContent { /** * Base parameters class * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Params { String param(String key); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java index dbc0041af42b5..1ebdd69d2b7a3 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContent.java @@ -32,6 +32,8 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -41,7 +43,10 @@ /** * A generic abstraction on top of handling content, inspired by JSON and pull parsing. + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContent { /** * The type this content handles and produces. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index 061837d27ed0a..976f353100c55 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -32,6 +32,9 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.bytes.BytesReference; + import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.Flushable; @@ -59,6 +62,7 @@ /** * A utility to build XContent (ie json). */ +@PublicApi(since = "1.0.0") public final class XContentBuilder implements Closeable, Flushable { /** @@ -151,6 +155,14 @@ public static XContentBuilder builder(XContent xContent, Set<String> includes, S DATE_TRANSFORMERS = Collections.unmodifiableMap(dateTransformers); } + /** + * Returns a string representation of the builder (only applicable for text based xcontent). + */ + @Override + public String toString() { + return BytesReference.bytes(this).utf8ToString(); + } + /** * The writer interface for the serializable content builder * @@ -714,7 +726,7 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce /** * Writes the binary content of the given byte array as UTF-8 bytes. - * + * <p> * Use {@link XContentParser#charBuffer()} to read the value back */ public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java index 0535da1a584be..9b13ebb23be86 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java @@ -37,7 +37,7 @@ /** * This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent. - * + * <p> * It is <b>greatly</b> preferred that you implement {@link ToXContentFragment} * in the class for encoding, however, in some situations you may not own the * class, in which case you can add an implementation here for encoding it. @@ -63,7 +63,7 @@ public interface XContentBuilderExtension { * Used for plugging in a human readable version of a class's encoding. It is assumed that * the human readable equivalent is <b>always</b> behind the {@code toString()} method, so * this transformer returns the raw value to be used. - * + * <p> * An example implementation: * * <pre> @@ -79,7 +79,7 @@ public interface XContentBuilderExtension { /** * Used for plugging a transformer for a date or time type object into a String (or other * encodable object). - * + * <p> * For example: * * <pre> diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentHelper.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentHelper.java new file mode 100644 index 0000000000000..a99a12273a6f0 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentHelper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.xcontent; + +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; + +/** + * Core XContent Helper Utilities + * + * @opensearch.internal + */ +public final class XContentHelper { + // no instance + private XContentHelper() {} + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link MediaType}. Wraps the output into a new anonymous object according to the value returned + * by the {@link ToXContent#isFragment()} method returns. + */ + @Deprecated + public static BytesReference toXContent(ToXContent toXContent, MediaType mediaType, boolean humanReadable) throws IOException { + return toXContent(toXContent, mediaType, ToXContent.EMPTY_PARAMS, humanReadable); + } + + /** + * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided + * {@link MediaType}. Wraps the output into a new anonymous object according to the value returned + * by the {@link ToXContent#isFragment()} method returns. + */ + public static BytesReference toXContent(ToXContent toXContent, MediaType mediaType, ToXContent.Params params, boolean humanReadable) + throws IOException { + try (XContentBuilder builder = XContentBuilder.builder(mediaType.xContent())) { + builder.humanReadable(humanReadable); + if (toXContent.isFragment()) { + builder.startObject(); + } + toXContent.toXContent(builder, params); + if (toXContent.isFragment()) { + builder.endObject(); + } + return BytesReference.bytes(builder); + } + } +} diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java index 328eaa4bc36e9..85c3579b74cd5 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import java.io.Closeable; import java.io.IOException; @@ -44,17 +45,18 @@ /** * Interface for pull - parsing {@link XContent} see {@code XContentType} for supported types. - * + * <p> * To obtain an instance of this class use the following pattern: * * <pre> - * XContentType xContentType = XContentType.JSON; - * XContentParser parser = xContentType.xContent().createParser( + * MediaType mediaType = MediaTypeRegistry.JSON; + * XContentParser parser = mediaType.xContent().createParser( * NamedXContentRegistry.EMPTY, ParserField."{\"key\" : \"value\"}"); * </pre> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface XContentParser extends Closeable { /** @@ -202,11 +204,11 @@ <T> Map<String, T> map(Supplier<Map<String, T>> mapFactory, CheckedFunction<XCon /** * Method that can be used to determine whether calling of textCharacters() would be the most efficient way to * access textual content for the event parser currently points to. - * + * <p> * Default implementation simply returns false since only actual * implementation class has knowledge of its internal buffering * state. - * + * <p> * This method shouldn't be used to check if the token contains text or not. */ boolean hasTextCharacters(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java index fff3d5f83f82e..b10be393f9adb 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java @@ -33,8 +33,8 @@ package org.opensearch.core.xcontent; import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.XContentParser.Token; import java.io.IOException; @@ -142,10 +142,10 @@ public static Object parseFieldsValue(XContentParser parser) throws IOException * This method expects that the current field name is the concatenation of a type, a delimiter and a name * (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, * "#" is the delimiter and "foo" the name of the object to parse). - * + * <p> * It also expected that following this field name is either an Object or an array xContent structure and * the cursor points to the start token of this structure. - * + * <p> * The method splits the field's name to extract the type and name and then parses the object * using the {@link XContentParser#namedObject(Class, String, Object)} method. * diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java index d1cdda4aeb8be..337cf9f95fe5f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java @@ -43,7 +43,7 @@ /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. - * + * <p> * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be * used for parsing objects that should be ignored if they are malformed. diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/spi/MediaTypeProvider.java b/libs/core/src/main/java/org/opensearch/core/xcontent/spi/MediaTypeProvider.java new file mode 100644 index 0000000000000..eeaadc1698df6 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/spi/MediaTypeProvider.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core.xcontent.spi; + +import org.opensearch.core.xcontent.MediaType; + +import java.util.List; +import java.util.Map; + +/** + * Service Provider Interface for plugins, modules, extensions providing + * their own Media Types + * + * @opensearch.experimental + * @opensearch.api + */ +public interface MediaTypeProvider { + /** Extensions that implement their own concrete {@link MediaType}s provide them through this interface method */ + List<MediaType> getMediaTypes(); + + /** Extensions that implement additional {@link MediaType} aliases provide them through this interface method */ + Map<String, MediaType> getAdditionalMediaTypes(); +} diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/spi/package-info.java b/libs/core/src/main/java/org/opensearch/core/xcontent/spi/package-info.java new file mode 100644 index 0000000000000..67ccd981dafa8 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/spi/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Service Provider Interface for extensible media types */ +package org.opensearch.core.xcontent.spi; diff --git a/libs/core/src/main/java/org/opensearch/semver/SemverRange.java b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java new file mode 100644 index 0000000000000..da87acc7124aa --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/SemverRange.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.Version; +import org.opensearch.common.Nullable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.expr.Caret; +import org.opensearch.semver.expr.Equal; +import org.opensearch.semver.expr.Expression; +import org.opensearch.semver.expr.Tilde; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Arrays.stream; + +/** + * Represents a single semver range that allows for specifying which {@code org.opensearch.Version}s satisfy the range. + * It is composed of a range version and a range operator. Following are the supported operators: + * <ul> + * <li>'=' Requires exact match with the range version. For example, =1.2.3 range would match only 1.2.3</li> + * <li>'~' Allows for patch version variability starting from the range version. For example, ~1.2.3 range would match versions greater than or equal to 1.2.3 but less than 1.3.0</li> + * <li>'^' Allows for patch and minor version variability starting from the range version. For example, ^1.2.3 range would match versions greater than or equal to 1.2.3 but less than 2.0.0</li> + * </ul> + */ +public class SemverRange implements ToXContentFragment { + + private final Version rangeVersion; + private final RangeOperator rangeOperator; + + public SemverRange(final Version rangeVersion, final RangeOperator rangeOperator) { + this.rangeVersion = rangeVersion; + this.rangeOperator = rangeOperator; + } + + /** + * Constructs a {@code SemverRange} from its string representation. + * @param range given range + * @return a {@code SemverRange} + */ + public static SemverRange fromString(final String range) { + RangeOperator rangeOperator = RangeOperator.fromRange(range); + String version = range.replaceFirst(rangeOperator.asEscapedString(), ""); + if (!Version.stringHasLength(version)) { + throw new IllegalArgumentException("Version cannot be empty"); + } + return new SemverRange(Version.fromString(version), rangeOperator); + } + + /** + * Return the range operator for this range. + * @return range operator + */ + public RangeOperator getRangeOperator() { + return rangeOperator; + } + + /** + * Return the version for this range. + * @return the range version + */ + public Version getRangeVersion() { + return rangeVersion; + } + + /** + * Check if range is satisfied by given version string. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + */ + public boolean isSatisfiedBy(final String versionToEvaluate) { + return isSatisfiedBy(Version.fromString(versionToEvaluate)); + } + + /** + * Check if range is satisfied by given version. + * + * @param versionToEvaluate version to check + * @return {@code true} if range is satisfied by version, {@code false} otherwise + * @see #isSatisfiedBy(String) + */ + public boolean isSatisfiedBy(final Version versionToEvaluate) { + return this.rangeOperator.expression.evaluate(this.rangeVersion, versionToEvaluate); + } + + @Override + public boolean equals(@Nullable final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SemverRange range = (SemverRange) o; + return Objects.equals(rangeVersion, range.rangeVersion) && rangeOperator == range.rangeOperator; + } + + @Override + public int hashCode() { + return Objects.hash(rangeVersion, rangeOperator); + } + + @Override + public String toString() { + return rangeOperator.asString() + rangeVersion; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder.value(toString()); + } + + /** + * A range operator. + */ + public enum RangeOperator { + + EQ("=", new Equal()), + TILDE("~", new Tilde()), + CARET("^", new Caret()), + DEFAULT("", new Equal()); + + private final String operator; + private final Expression expression; + + RangeOperator(final String operator, final Expression expression) { + this.operator = operator; + this.expression = expression; + } + + /** + * String representation of the range operator. + * + * @return range operator as string + */ + public String asString() { + return operator; + } + + /** + * Escaped string representation of the range operator, + * if operator is a regex character. + * + * @return range operator as escaped string, if operator is a regex character + */ + public String asEscapedString() { + if (Objects.equals(operator, "^")) { + return "\\^"; + } + return operator; + } + + public static RangeOperator fromRange(final String range) { + Optional<RangeOperator> rangeOperator = stream(values()).filter( + operator -> operator != DEFAULT && range.startsWith(operator.asString()) + ).findFirst(); + return rangeOperator.orElse(DEFAULT); + } + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java new file mode 100644 index 0000000000000..ce2b74dde0865 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Caret.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing for minor and patch version variability. + */ +public class Caret implements Expression { + + /** + * Checks if the given version is compatible with the range version allowing for minor and + * patch version variability. + * Allows all versions starting from the rangeVersion upto next major version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString((rangeVersion.major + 1) + ".0.0"); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java new file mode 100644 index 0000000000000..d3e1d63060b77 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Equal.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate equality of versions. + */ +public class Equal implements Expression { + + /** + * Checks if a given version matches a certain range version. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are equal {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + return versionToEvaluate.equals(rangeVersion); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java new file mode 100644 index 0000000000000..68bb4e249836a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Expression.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * An evaluation expression. + */ +public interface Expression { + + /** + * Evaluates an expression. + * + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return the result of the expression evaluation + */ + boolean evaluate(final Version rangeVersion, final Version versionToEvaluate); +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java new file mode 100644 index 0000000000000..5f62ffe62ddeb --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/Tilde.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; + +/** + * Expression to evaluate version compatibility allowing patch version variability. + */ +public class Tilde implements Expression { + + /** + * Checks if the given version is compatible with a range version allowing for patch version variability. + * Allows all versions starting from the rangeVersion upto next minor version (exclusive). + * @param rangeVersion the version specified in range + * @param versionToEvaluate the version to evaluate + * @return {@code true} if the versions are compatible {@code false} otherwise + */ + @Override + public boolean evaluate(final Version rangeVersion, final Version versionToEvaluate) { + Version lower = rangeVersion; + Version upper = Version.fromString(rangeVersion.major + "." + (rangeVersion.minor + 1) + "." + 0); + return versionToEvaluate.onOrAfter(lower) && versionToEvaluate.before(upper); + } +} diff --git a/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java new file mode 100644 index 0000000000000..06cf9feaaaf8f --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/expr/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Expressions library module */ +package org.opensearch.semver.expr; diff --git a/libs/core/src/main/java/org/opensearch/semver/package-info.java b/libs/core/src/main/java/org/opensearch/semver/package-info.java new file mode 100644 index 0000000000000..ada935582d408 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/semver/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Semver library module */ +package org.opensearch.semver; diff --git a/libs/core/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider b/libs/core/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider new file mode 100644 index 0000000000000..181b802952c60 --- /dev/null +++ b/libs/core/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +org.opensearch.core.compress.spi.DefaultCompressorProvider diff --git a/server/src/test/java/org/opensearch/action/ActionListenerTests.java b/libs/core/src/test/java/org/opensearch/core/action/ActionListenerTests.java similarity index 99% rename from server/src/test/java/org/opensearch/action/ActionListenerTests.java rename to libs/core/src/test/java/org/opensearch/core/action/ActionListenerTests.java index e56deb6088722..8d2bef3db68ea 100644 --- a/server/src/test/java/org/opensearch/action/ActionListenerTests.java +++ b/libs/core/src/test/java/org/opensearch/core/action/ActionListenerTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.core.action; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.CheckedConsumer; diff --git a/server/src/test/java/org/opensearch/action/NotifyOnceListenerTests.java b/libs/core/src/test/java/org/opensearch/core/action/NotifyOnceListenerTests.java similarity index 98% rename from server/src/test/java/org/opensearch/action/NotifyOnceListenerTests.java rename to libs/core/src/test/java/org/opensearch/core/action/NotifyOnceListenerTests.java index 79593f85c2890..948cd752a27c3 100644 --- a/server/src/test/java/org/opensearch/action/NotifyOnceListenerTests.java +++ b/libs/core/src/test/java/org/opensearch/core/action/NotifyOnceListenerTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action; +package org.opensearch.core.action; import org.opensearch.test.OpenSearchTestCase; diff --git a/libs/core/src/test/java/org/opensearch/core/action/support/DefaultShardOperationFailedExceptionTests.java b/libs/core/src/test/java/org/opensearch/core/action/support/DefaultShardOperationFailedExceptionTests.java index d037b062e95d0..ad52f2dc12b8f 100644 --- a/libs/core/src/test/java/org/opensearch/core/action/support/DefaultShardOperationFailedExceptionTests.java +++ b/libs/core/src/test/java/org/opensearch/core/action/support/DefaultShardOperationFailedExceptionTests.java @@ -39,18 +39,19 @@ import org.apache.lucene.store.LockObtainFailedException; import org.opensearch.OpenSearchException; import org.opensearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.core.xcontent.XContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.EOFException; @@ -92,7 +93,7 @@ public void testToXContent() throws IOException { assertEquals( "{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\"," + "\"reason\":{\"type\":\"exception\",\"reason\":\"foo\"}}", - Strings.toString(XContentType.JSON, exception) + Strings.toString(MediaTypeRegistry.JSON, exception) ); } { @@ -102,7 +103,7 @@ public void testToXContent() throws IOException { assertEquals( "{\"shard\":-1,\"index\":null,\"status\":\"INTERNAL_SERVER_ERROR\",\"reason\":{\"type\":\"exception\"," + "\"reason\":\"foo\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"bar\"}}}", - Strings.toString(XContentType.JSON, exception) + Strings.toString(MediaTypeRegistry.JSON, exception) ); } { @@ -112,7 +113,7 @@ public void testToXContent() throws IOException { assertEquals( "{\"shard\":2,\"index\":\"test\",\"status\":\"INTERNAL_SERVER_ERROR\"," + "\"reason\":{\"type\":\"illegal_state_exception\",\"reason\":\"bar\"}}", - Strings.toString(XContentType.JSON, exception) + Strings.toString(MediaTypeRegistry.JSON, exception) ); } { @@ -124,7 +125,7 @@ public void testToXContent() throws IOException { assertEquals( "{\"shard\":1,\"index\":\"test\",\"status\":\"BAD_REQUEST\"," + "\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"foo\"}}", - Strings.toString(XContentType.JSON, exception) + Strings.toString(MediaTypeRegistry.JSON, exception) ); } } diff --git a/libs/core/src/test/java/org/opensearch/core/common/StringsTests.java b/libs/core/src/test/java/org/opensearch/core/common/StringsTests.java index 532251e02e685..b79bb6fc89f9e 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/StringsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/StringsTests.java @@ -9,10 +9,54 @@ package org.opensearch.core.common; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.test.OpenSearchTestCase; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + /** tests for Strings utility class */ public class StringsTests extends OpenSearchTestCase { + public void testIsAllOrWildCardString() { + assertThat(Strings.isAllOrWildcard("_all"), is(true)); + assertThat(Strings.isAllOrWildcard("*"), is(true)); + assertThat(Strings.isAllOrWildcard("foo"), is(false)); + assertThat(Strings.isAllOrWildcard(""), is(false)); + assertThat(Strings.isAllOrWildcard((String) null), is(false)); + } + + public void testSubstring() { + assertEquals(null, Strings.substring(null, 0, 1000)); + assertEquals("foo", Strings.substring("foo", 0, 1000)); + assertEquals("foo", Strings.substring("foo", 0, 3)); + assertEquals("oo", Strings.substring("foo", 1, 3)); + assertEquals("oo", Strings.substring("foo", 1, 100)); + assertEquals("f", Strings.substring("foo", 0, 1)); + } + + public void testCleanTruncate() { + assertEquals(null, Strings.cleanTruncate(null, 10)); + assertEquals("foo", Strings.cleanTruncate("foo", 10)); + assertEquals("foo", Strings.cleanTruncate("foo", 3)); + // Throws out high surrogates + assertEquals("foo", Strings.cleanTruncate("foo\uD83D\uDEAB", 4)); + // But will keep the whole character + assertEquals("foo\uD83D\uDEAB", Strings.cleanTruncate("foo\uD83D\uDEAB", 5)); + /* + * Doesn't take care around combining marks. This example has its + * meaning changed because that last codepoint is supposed to combine + * backwards into the find "o" and be represented as the "o" with a + * circle around it with a slash through it. As in "no 'o's allowed + * here. + */ + assertEquals("o", org.opensearch.core.common.Strings.cleanTruncate("o\uD83D\uDEAB", 1)); + assertEquals("", org.opensearch.core.common.Strings.cleanTruncate("foo", 0)); + } + public void testSplitStringToSet() { assertEquals(Strings.tokenizeByCommaToSet(null), Sets.newHashSet()); assertEquals(Strings.tokenizeByCommaToSet(""), Sets.newHashSet()); @@ -25,4 +69,49 @@ public void testSplitStringToSet() { assertEquals(Strings.tokenizeByCommaToSet(" aa "), Sets.newHashSet("aa")); assertEquals(Strings.tokenizeByCommaToSet(" "), Sets.newHashSet()); } + + public void testToStringToXContent() { + final ToXContent toXContent; + final boolean error; + if (randomBoolean()) { + if (randomBoolean()) { + error = false; + toXContent = (builder, params) -> builder.field("ok", "here").field("catastrophe", ""); + } else { + error = true; + toXContent = (builder, params) -> builder.startObject().field("ok", "here").field("catastrophe", "").endObject(); + } + } else { + if (randomBoolean()) { + error = false; + toXContent = (ToXContentObject) (builder, params) -> builder.startObject() + .field("ok", "here") + .field("catastrophe", "") + .endObject(); + } else { + error = true; + toXContent = (ToXContentObject) (builder, params) -> builder.field("ok", "here").field("catastrophe", ""); + } + } + + String toString = Strings.toString(MediaTypeRegistry.JSON, toXContent); + if (error) { + assertThat(toString, containsString("\"error\":\"error building toString out of XContent:")); + assertThat(toString, containsString("\"stack_trace\":")); + } else { + assertThat(toString, containsString("\"ok\":\"here\"")); + assertThat(toString, containsString("\"catastrophe\":\"\"")); + } + } + + public void testToStringToXContentWithOrWithoutParams() { + ToXContent toXContent = (builder, params) -> builder.field("color_from_param", params.param("color", "red")); + // Rely on the default value of "color" param when params are not passed + assertThat(Strings.toString(MediaTypeRegistry.JSON, toXContent), containsString("\"color_from_param\":\"red\"")); + // Pass "color" param explicitly + assertThat( + Strings.toString(MediaTypeRegistry.JSON, toXContent, new ToXContent.MapParams(Collections.singletonMap("color", "blue"))), + containsString("\"color_from_param\":\"blue\"") + ); + } } diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java index 626900de7f691..646acefc09c48 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/BaseStreamTests.java @@ -36,12 +36,11 @@ import org.opensearch.common.CheckedBiConsumer; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.settings.SecureString; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -49,6 +48,7 @@ import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -400,10 +400,10 @@ public void testOptionalInstantSerialization() throws IOException { } } - public void testJodaDateTimeSerialization() throws IOException { + public void testJavaDateTimeSerialization() throws IOException { final BytesStreamOutput output = new BytesStreamOutput(); long millis = randomIntBetween(0, Integer.MAX_VALUE); - JodaCompatibleZonedDateTime time = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); + ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); output.writeGenericValue(time); final BytesReference bytesReference = output.bytes(); diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/NamedWriteableRegistryTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/NamedWriteableRegistryTests.java index 77e5af9036cea..96f2b93524067 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/NamedWriteableRegistryTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/NamedWriteableRegistryTests.java @@ -32,12 +32,12 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.test.OpenSearchTestCase; + import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import org.opensearch.test.OpenSearchTestCase; - public class NamedWriteableRegistryTests extends OpenSearchTestCase { private static class DummyNamedWriteable implements NamedWriteable { diff --git a/server/src/test/java/org/opensearch/common/unit/ByteSizeUnitTests.java b/libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeUnitTests.java similarity index 91% rename from server/src/test/java/org/opensearch/common/unit/ByteSizeUnitTests.java rename to libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeUnitTests.java index c6bcceec99fbd..07b9131602ac3 100644 --- a/server/src/test/java/org/opensearch/common/unit/ByteSizeUnitTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeUnitTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.unit; +package org.opensearch.core.common.unit; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; @@ -38,12 +38,12 @@ import java.io.IOException; -import static org.opensearch.common.unit.ByteSizeUnit.BYTES; -import static org.opensearch.common.unit.ByteSizeUnit.GB; -import static org.opensearch.common.unit.ByteSizeUnit.KB; -import static org.opensearch.common.unit.ByteSizeUnit.MB; -import static org.opensearch.common.unit.ByteSizeUnit.PB; -import static org.opensearch.common.unit.ByteSizeUnit.TB; +import static org.opensearch.core.common.unit.ByteSizeUnit.BYTES; +import static org.opensearch.core.common.unit.ByteSizeUnit.GB; +import static org.opensearch.core.common.unit.ByteSizeUnit.KB; +import static org.opensearch.core.common.unit.ByteSizeUnit.MB; +import static org.opensearch.core.common.unit.ByteSizeUnit.PB; +import static org.opensearch.core.common.unit.ByteSizeUnit.TB; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/common/unit/ByteSizeValueTests.java b/libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeValueTests.java similarity index 97% rename from server/src/test/java/org/opensearch/common/unit/ByteSizeValueTests.java rename to libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeValueTests.java index 99c1feb78527f..def1694a72ba4 100644 --- a/server/src/test/java/org/opensearch/common/unit/ByteSizeValueTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/unit/ByteSizeValueTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.unit; +package org.opensearch.core.common.unit; import org.opensearch.OpenSearchParseException; import org.opensearch.core.common.io.stream.Writeable.Reader; @@ -336,12 +336,10 @@ public void testParseInvalidNumber() throws IOException { public void testParseFractionalNumber() throws IOException { ByteSizeUnit unit = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values())); String fractionalValue = "23.5" + unit.getSuffix(); - ByteSizeValue instance = ByteSizeValue.parseBytesSizeValue(fractionalValue, "test"); - assertEquals(fractionalValue, instance.toString()); - assertWarnings( - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [" - + fractionalValue - + "] found for setting [test]" + // test exception is thrown: fractional byte size values has been deprecated since Legacy 6.2 + OpenSearchParseException e = expectThrows( + OpenSearchParseException.class, + () -> ByteSizeValue.parseBytesSizeValue(fractionalValue, "test") ); } diff --git a/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java index c97b74ee0d624..c9b9b7c9918ea 100644 --- a/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/internal/net/NetUtilsTests.java @@ -33,8 +33,8 @@ package org.opensearch.core.internal.net; import org.apache.lucene.util.Constants; -import org.opensearch.common.util.net.NetUtils; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; import org.opensearch.test.OpenSearchTestCase; import java.lang.Runtime.Version; diff --git a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java index 421263b883f2a..214f9292ae3a5 100644 --- a/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/util/BytesRefUtilsTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.util.BytesRefArray; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -90,8 +89,12 @@ public void testSortByteRefArray() { } public void testBytesToLong() { - final long value = randomLong(); - final BytesReference buffer = BytesReference.fromByteBuffer(ByteBuffer.allocate(8).putLong(value).flip()); - assertThat(BytesRefUtils.bytesToLong(buffer.toBytesRef()), equalTo(value)); + long value = randomLong(); + int paddingStart = randomIntBetween(0, 10); + int paddingEnd = randomIntBetween(0, 10); + byte[] bytes = new byte[paddingStart + Long.BYTES + paddingEnd]; + ByteBuffer.wrap(bytes).putLong(paddingStart, value); + BytesRef bytesRef = new BytesRef(bytes, paddingStart, Long.BYTES); + assertThat(BytesRefUtils.bytesToLong(bytesRef), equalTo(value)); } } diff --git a/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java index a26ce634187f5..8b29378dfde12 100644 --- a/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/util/FileSystemUtilsTests.java @@ -32,8 +32,8 @@ package org.opensearch.core.util; -import org.apache.lucene.util.Constants; import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; +import org.apache.lucene.util.Constants; import org.opensearch.common.io.PathUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java new file mode 100644 index 0000000000000..af1d95b2561b7 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/SemverRangeTests.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver; + +import org.opensearch.test.OpenSearchTestCase; + +public class SemverRangeTests extends OpenSearchTestCase { + + public void testRangeWithEqualsOperator() { + SemverRange range = SemverRange.fromString("=1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.EQ); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithDefaultOperator() { + SemverRange range = SemverRange.fromString("1.2.3"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.DEFAULT); + assertTrue(range.isSatisfiedBy("1.2.3")); + assertFalse(range.isSatisfiedBy("1.2.4")); + assertFalse(range.isSatisfiedBy("1.3.3")); + assertFalse(range.isSatisfiedBy("2.2.3")); + } + + public void testRangeWithTildeOperator() { + SemverRange range = SemverRange.fromString("~2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.TILDE); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.3.12")); + + assertFalse(range.isSatisfiedBy("2.3.0")); + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("2.4.0")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testRangeWithCaretOperator() { + SemverRange range = SemverRange.fromString("^2.3.4"); + assertEquals(range.getRangeOperator(), SemverRange.RangeOperator.CARET); + assertTrue(range.isSatisfiedBy("2.3.4")); + assertTrue(range.isSatisfiedBy("2.3.5")); + assertTrue(range.isSatisfiedBy("2.4.12")); + + assertFalse(range.isSatisfiedBy("2.3.3")); + assertFalse(range.isSatisfiedBy("3.0.0")); + } + + public void testInvalidRanges() { + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("=1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("~1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^")); + assertEquals("Version cannot be empty", ex.getMessage()); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("^1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + ex = expectThrows(IllegalArgumentException.class, () -> SemverRange.fromString("$1.2")); + assertTrue(ex.getMessage().contains("the version needs to contain major, minor, and revision, and optionally the build")); + + expectThrows(NumberFormatException.class, () -> SemverRange.fromString("$1.2.3")); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java new file mode 100644 index 0000000000000..3cb168d42cda0 --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/CaretTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class CaretTests extends OpenSearchTestCase { + + public void testMinorAndPatchVersionVariability() { + Caret caretExpr = new Caret(); + Version rangeVersion = Version.fromString("1.2.3"); + + // Compatible versions + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.3.3"))); + assertTrue(caretExpr.evaluate(rangeVersion, Version.fromString("1.9.9"))); + + // Incompatible versions + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(caretExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java new file mode 100644 index 0000000000000..fb090865157ed --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/EqualTests.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class EqualTests extends OpenSearchTestCase { + + public void testEquality() { + Equal equalExpr = new Equal(); + Version rangeVersion = Version.fromString("1.2.3"); + assertTrue(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertFalse(equalExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + } +} diff --git a/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java new file mode 100644 index 0000000000000..8666611645c3a --- /dev/null +++ b/libs/core/src/test/java/org/opensearch/semver/expr/TildeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.semver.expr; + +import org.opensearch.Version; +import org.opensearch.test.OpenSearchTestCase; + +public class TildeTests extends OpenSearchTestCase { + + public void testPatchVersionVariability() { + Tilde tildeExpr = new Tilde(); + Version rangeVersion = Version.fromString("1.2.3"); + + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.3"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.4"))); + assertTrue(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.9"))); + + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.2.2"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("1.3.0"))); + assertFalse(tildeExpr.evaluate(rangeVersion, Version.fromString("2.0.0"))); + } +} diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index 9861847c9e1ea..828d4b7de450e 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -194,26 +194,25 @@ public DissectParser(String pattern, String appendSeparator) { * @throws DissectException if unable to dissect a pair into it's parts. */ public Map<String, String> parse(String inputString) { - /** - * - * This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against - * another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes - * of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match - * all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for - * (the delimiter) is generally small and rare the naive approach is efficient. - * - * In this case the string that is walked is the input string, and the string being searched for is the current delimiter. - * For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the - * input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered - * list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. - * - * There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should - * results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of - * {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. - * However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key - * without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and - * b=bar. - * + /* + + This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against + another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes + of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match + all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for + (the delimiter) is generally small and rare the naive approach is efficient. + + In this case the string that is walked is the input string, and the string being searched for is the current delimiter. + For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the + input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered + list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. + + There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should + results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of + {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. + However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key + without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and + b=bar. */ DissectMatch dissectMatch = new DissectMatch(appendSeparator, maxMatches, maxResults, appendCount, referenceCount); Iterator<DissectPair> it = matchPairs.iterator(); @@ -232,7 +231,10 @@ public Map<String, String> parse(String inputString) { int lookAheadMatches; // start walking the input string byte by byte, look ahead for matches where needed // if a match is found jump forward to the end of the match - for (; i < input.length; i++) { + while (i < input.length) { + // start is only used to record the value of i + int start = i; + lookAheadMatches = 0; // potential match between delimiter and input string if (delimiter.length > 0 && input[i] == delimiter[0]) { @@ -284,8 +286,14 @@ public Map<String, String> parse(String inputString) { delimiter = dissectPair.getDelimiter().getBytes(StandardCharsets.UTF_8); // i is always one byte after the last found delimiter, aka the start of the next value valueStart = i; + } else { + i++; } + } else { + i++; } + // i should change anyway + assert (i != start); } // the last key, grab the rest of the input (unless consecutive delimiters already grabbed the last key) // and there is no trailing delimiter diff --git a/libs/dissect/src/test/java/org/opensearch/dissect/DissectParserTests.java b/libs/dissect/src/test/java/org/opensearch/dissect/DissectParserTests.java index fcd0dc8e248aa..665fe63e31b1d 100644 --- a/libs/dissect/src/test/java/org/opensearch/dissect/DissectParserTests.java +++ b/libs/dissect/src/test/java/org/opensearch/dissect/DissectParserTests.java @@ -34,10 +34,10 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; + import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; -import org.mockito.internal.util.collections.Sets; import java.util.ArrayList; import java.util.Arrays; @@ -45,6 +45,8 @@ import java.util.List; import java.util.Map; +import org.mockito.internal.util.collections.Sets; + import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiAlphanumOfLengthBetween; public class DissectParserTests extends OpenSearchTestCase { diff --git a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java index 6f8b0dc6929cc..c05f316b53b9c 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java @@ -39,12 +39,19 @@ * and optional altitude in meters. */ public class Circle implements Geometry { + + /** Empty circle : x=0, y=0, z=NaN radius=-1 */ public static final Circle EMPTY = new Circle(); + /** Latitude of the center of the circle in degrees */ private final double y; + /** Longitude of the center of the circle in degrees */ private final double x; + /** Altitude of the center of the circle in meters (NaN if irrelevant) */ private final double z; + /** Radius of the circle in meters */ private final double radiusMeters; + /** Create an {@link #EMPTY} circle */ private Circle() { y = 0; x = 0; @@ -52,10 +59,23 @@ private Circle() { radiusMeters = -1; } + /** + * Create a circle with no altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double radiusMeters) { this(x, y, Double.NaN, radiusMeters); } + /** + * Create a circle with altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param z Altitude of the center of the circle in meters + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double z, final double radiusMeters) { this.y = y; this.x = x; @@ -66,39 +86,68 @@ public Circle(final double x, final double y, final double z, final double radiu } } + /** + * @return The type of this geometry (always {@link ShapeType#CIRCLE}) + */ @Override public ShapeType type() { return ShapeType.CIRCLE; } + /** + * @return The y (latitude) of the center of the circle in degrees + */ public double getY() { return y; } + /** + * @return The x (longitude) of the center of the circle in degrees + */ public double getX() { return x; } + /** + * @return The radius of the circle in meters + */ public double getRadiusMeters() { return radiusMeters; } + /** + * @return The altitude of the center of the circle in meters (NaN if irrelevant) + */ public double getZ() { return z; } + /** + * @return The latitude (y) of the center of the circle in degrees + */ public double getLat() { return y; } + /** + * @return The longitude (x) of the center of the circle in degrees + */ public double getLon() { return x; } + /** + * @return The altitude (z) of the center of the circle in meters (NaN if irrelevant) + */ public double getAlt() { return z; } + /** + * Compare this circle to another circle. + * @param o The other circle + * @return True if the two circles are equal in all their properties. False if null or different. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -111,6 +160,9 @@ public boolean equals(Object o) { return (Double.compare(circle.z, z) == 0); } + /** + * @return The hashcode of this circle. + */ @Override public int hashCode() { int result; @@ -126,11 +178,23 @@ public int hashCode() { return result; } + /** + * Visit this circle with a {@link GeometryVisitor}. + * + * @param visitor The visitor + * @param <T> The return type of the visitor + * @param <E> The exception type of the visitor + * @return The result of the visitor + * @throws E The exception thrown by the visitor + */ @Override public <T, E extends Exception> T visit(GeometryVisitor<T, E> visitor) throws E { return visitor.visit(this); } + /** + * @return True if this circle is empty (radius less than 0) + */ @Override public boolean isEmpty() { return radiusMeters < 0; @@ -141,6 +205,9 @@ public String toString() { return WellKnownText.INSTANCE.toWKT(this); } + /** + * @return True if this circle has an altitude. False if NaN. + */ @Override public boolean hasZ() { return Double.isNaN(z) == false; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java index 664e7e68d96a5..c946cc2473202 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java @@ -48,8 +48,8 @@ public class BitUtil { // magic numbers for bit interleaving /** * Interleaves the first 32 bits of each long value - * - * Adapted from: http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + * <p> + * Adapted from: <a href="http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN">bithacks.html#InterleaveBMN</a> */ public static long interleave(int even, int odd) { long v1 = 0x00000000FFFFFFFFL & even; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java index 8b3b841e221e5..33c423e136613 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java @@ -39,12 +39,12 @@ /** * Utilities for converting to/from the GeoHash standard - * + * <p> * The geohash long format is represented as lon/lat (x/y) interleaved with the 4 least significant bits * representing the level (1-12) [xyxy...xyxyllll] - * + * <p> * This differs from a morton encoded value which interleaves lat/lon (y/x). - * + * <p> * NOTE: this will replace {@code org.opensearch.common.geo.GeoHashUtils} */ public class Geohash { diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index 2585916aae38b..7aa3347ba4f4b 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -32,15 +32,6 @@ package org.opensearch.grok; -import org.jcodings.specific.UTF8Encoding; -import org.joni.Matcher; -import org.joni.NameEntry; -import org.joni.Option; -import org.joni.Regex; -import org.joni.Region; -import org.joni.Syntax; -import org.joni.exception.ValueException; - import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -56,6 +47,15 @@ import java.util.Stack; import java.util.function.Consumer; +import org.jcodings.specific.UTF8Encoding; +import org.joni.Matcher; +import org.joni.NameEntry; +import org.joni.Option; +import org.joni.Regex; +import org.joni.Region; +import org.joni.Syntax; +import org.joni.exception.ValueException; + import static java.util.Collections.unmodifiableList; public final class Grok { @@ -151,7 +151,7 @@ private void validatePatternBank() { /** * Checks whether patterns reference each other in a circular manner and, if so, fail with an exception. * Also checks for malformed pattern definitions and fails with an exception. - * + * <p> * In a pattern, anything between <code>%{</code> and <code>}</code> or <code>:</code> is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. diff --git a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java index 44be133b90fa6..6d1985f2165cd 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java +++ b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureConfig.java @@ -32,8 +32,6 @@ package org.opensearch.grok; -import org.joni.NameEntry; - import java.nio.charset.StandardCharsets; import java.util.function.Consumer; import java.util.function.DoubleConsumer; @@ -41,6 +39,8 @@ import java.util.function.IntConsumer; import java.util.function.LongConsumer; +import org.joni.NameEntry; + /** * Configuration for a value that {@link Grok} can capture. */ diff --git a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureExtracter.java b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureExtracter.java index b551150aac28c..b6d881de4fac1 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureExtracter.java +++ b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureExtracter.java @@ -32,13 +32,13 @@ package org.opensearch.grok; -import org.joni.Region; - import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import org.joni.Region; + import static java.util.Collections.emptyMap; /** diff --git a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureType.java b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureType.java index 7f9555d01e386..0bd3bb47e55df 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureType.java +++ b/libs/grok/src/main/java/org/opensearch/grok/GrokCaptureType.java @@ -33,11 +33,12 @@ package org.opensearch.grok; import org.opensearch.grok.GrokCaptureConfig.NativeExtracterMap; -import org.joni.Region; import java.nio.charset.StandardCharsets; import java.util.function.Consumer; +import org.joni.Region; + /** * The type defined for the field in the pattern. */ diff --git a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java index 70b4570ee69ad..d5b7566ecc90f 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java +++ b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java @@ -31,8 +31,6 @@ package org.opensearch.grok; -import org.joni.Matcher; - import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -40,11 +38,13 @@ import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import org.joni.Matcher; + /** * Protects against long running operations that happen between the register and unregister invocations. * Threads that invoke {@link #register(Matcher)}, but take too long to invoke the {@link #unregister(Matcher)} method * will be interrupted. - * + * <p> * This is needed for Joni's {@link org.joni.Matcher#search(int, int, int)} method, because * it can end up spinning endlessly if the regular expression is too complex. Joni has checks * that for every 30k iterations it checks if the current thread is interrupted and if so diff --git a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java index bdcde57f91bb3..a37689e051c67 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java @@ -51,16 +51,16 @@ import java.util.function.IntConsumer; import java.util.function.LongConsumer; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; import static org.opensearch.grok.GrokCaptureType.BOOLEAN; import static org.opensearch.grok.GrokCaptureType.DOUBLE; import static org.opensearch.grok.GrokCaptureType.FLOAT; import static org.opensearch.grok.GrokCaptureType.INTEGER; import static org.opensearch.grok.GrokCaptureType.LONG; import static org.opensearch.grok.GrokCaptureType.STRING; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; public class GrokTests extends OpenSearchTestCase { public void testMatchWithoutCaptures() { diff --git a/libs/grok/src/test/java/org/opensearch/grok/MatcherWatchdogTests.java b/libs/grok/src/test/java/org/opensearch/grok/MatcherWatchdogTests.java index 0290e200e4382..4a12ccab32f6a 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/MatcherWatchdogTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/MatcherWatchdogTests.java @@ -31,24 +31,26 @@ package org.opensearch.grok; +import org.opensearch.test.OpenSearchTestCase; + import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.opensearch.test.OpenSearchTestCase; + import org.joni.Matcher; import org.mockito.Mockito; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class MatcherWatchdogTests extends OpenSearchTestCase { diff --git a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java index 797dfe859fa6c..0e29661978716 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java @@ -116,10 +116,10 @@ protected void handleException(Exception e) { /** * Schedules a channel to be closed by the selector event loop with which it is registered. - * + * <p> * If the channel is open and the state can be transitioned to closed, the close operation will * be scheduled with the event loop. - * + * <p> * Depending on the underlying protocol of the channel, a close operation might simply close the socket * channel or may involve reading and writing messages. */ diff --git a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java index a38a33182afea..4ed745723515c 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java @@ -512,12 +512,12 @@ private void handleQueuedWrites() { * This is a convenience method to be called after some object (normally channels) are enqueued with this * selector. This method will check if the selector is still open. If it is open, normal operation can * proceed. - * + * <p> * If the selector is closed, then we attempt to remove the object from the queue. If the removal * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If * the object cannot be removed from the queue, then the object has already been handled by the selector * and operation can proceed normally. - * + * <p> * If this method is called from the selector thread, we will not allow the queuing to occur as the * selector thread can manipulate its queues internally even if it is no longer open. * diff --git a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java index 898ce7e4e913b..ab48cc2357e7f 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ServerChannelContext.java @@ -129,6 +129,7 @@ private void configureSocket(ServerSocket socket) throws IOException { socket.setReuseAddress(config.tcpReuseAddress()); } + @SuppressWarnings("removal") protected static SocketChannel accept(ServerSocketChannel serverSocketChannel) throws IOException { try { assert serverSocketChannel.isBlocking() == false; diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 12a1e80055823..530aa1d86afc7 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -59,7 +59,7 @@ * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will * be called. This context will need to implement all protocol related logic. Additionally, if any special * close behavior is required, it should be implemented in this context. - * + * <p> * The only methods of the context that should ever be called from a non-selector thread are * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ @@ -388,6 +388,7 @@ private void configureSocket(Socket socket, boolean isConnectComplete) throws IO } } + @SuppressWarnings("removal") private static void connect(SocketChannel socketChannel, InetSocketAddress remoteAddress) throws IOException { try { AccessController.doPrivileged((PrivilegedExceptionAction<Boolean>) () -> socketChannel.connect(remoteAddress)); diff --git a/libs/nio/src/test/java/org/opensearch/nio/NioSelectorTests.java b/libs/nio/src/test/java/org/opensearch/nio/NioSelectorTests.java index 6af1cb64dac18..2fe5f3cc3f115 100644 --- a/libs/nio/src/test/java/org/opensearch/nio/NioSelectorTests.java +++ b/libs/nio/src/test/java/org/opensearch/nio/NioSelectorTests.java @@ -37,7 +37,6 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; @@ -51,13 +50,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import org.mockito.ArgumentCaptor; + import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyLong; -import static org.mockito.Mockito.isNull; -import static org.mockito.Mockito.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.isNull; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.same; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/libs/nio/src/test/java/org/opensearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/opensearch/nio/SocketChannelContextTests.java index 801ee7023482c..1e559b597ca89 100644 --- a/libs/nio/src/test/java/org/opensearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/opensearch/nio/SocketChannelContextTests.java @@ -35,8 +35,6 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.net.InetSocketAddress; @@ -53,12 +51,15 @@ import java.util.function.Consumer; import java.util.function.IntFunction; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; + import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.isNull; -import static org.mockito.Mockito.same; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.isNull; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.same; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java index 4a200a5dfa9bd..969fa91b50538 100644 --- a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java +++ b/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java @@ -65,6 +65,7 @@ protected Class<?> findClass(String name) throws ClassNotFoundException { /** * Return a new classloader across the parent and extended loaders. */ + @SuppressWarnings("removal") public static ExtendedPluginsClassLoader create(ClassLoader parent, List<ClassLoader> extendedLoaders) { return AccessController.doPrivileged( (PrivilegedAction<ExtendedPluginsClassLoader>) () -> new ExtendedPluginsClassLoader(parent, extendedLoaders) diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java index f41c49844997d..a2531f4a9156e 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecureSM.java @@ -76,6 +76,7 @@ * @see <a href="http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html"> * http://cs.oswego.edu/pipermail/concurrency-interest/2009-August/006508.html</a> */ +@SuppressWarnings("removal") public class SecureSM extends SecurityManager { private final String[] classesThatCanExit; diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java index fe239fea8129e..3c8e78a902fcb 100644 --- a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java +++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java @@ -18,6 +18,7 @@ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory; import java.util.concurrent.ForkJoinWorkerThread; +@SuppressWarnings("removal") public class SecuredForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory { static AccessControlContext contextWithPermissions(Permission... perms) { Permissions permissions = new Permissions(); diff --git a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java index 8f184328957a1..fd666c70cfebb 100644 --- a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java +++ b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/SecureSMTests.java @@ -32,15 +32,16 @@ package org.opensearch.secure_sm; -import junit.framework.TestCase; - import java.security.Permission; import java.security.Policy; import java.security.ProtectionDomain; import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import junit.framework.TestCase; + /** Simple tests for SecureSM */ +@SuppressWarnings("removal") public class SecureSMTests extends TestCase { static { // install a mock security policy: diff --git a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/ThreadPermissionTests.java b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/ThreadPermissionTests.java index 774e87f08c634..8d821f5bb27aa 100644 --- a/libs/secure-sm/src/test/java/org/opensearch/secure_sm/ThreadPermissionTests.java +++ b/libs/secure-sm/src/test/java/org/opensearch/secure_sm/ThreadPermissionTests.java @@ -32,10 +32,10 @@ package org.opensearch.secure_sm; -import junit.framework.TestCase; - import java.security.AllPermission; +import junit.framework.TestCase; + /** * Simple tests for ThreadPermission */ diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DefaultJdkTrustConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DefaultJdkTrustConfig.java index 04277e1528a92..859b74b200dc6 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DefaultJdkTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DefaultJdkTrustConfig.java @@ -36,6 +36,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.IOException; import java.nio.file.Path; import java.security.GeneralSecurityException; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DiagnosticTrustManager.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DiagnosticTrustManager.java index 75b0b831940d8..a3856038bbca1 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DiagnosticTrustManager.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/DiagnosticTrustManager.java @@ -36,6 +36,7 @@ import javax.net.ssl.SSLSession; import javax.net.ssl.SSLSocket; import javax.net.ssl.X509ExtendedTrustManager; + import java.net.Socket; import java.security.GeneralSecurityException; import java.security.cert.CertificateException; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/EmptyKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/EmptyKeyConfig.java index ff5db9a04ff74..b5c7416c792a6 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/EmptyKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/EmptyKeyConfig.java @@ -33,6 +33,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.X509ExtendedKeyManager; + import java.nio.file.Path; import java.util.Collection; import java.util.Collections; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/KeyStoreUtil.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/KeyStoreUtil.java index b2d5732022212..b6b6cdd90af14 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/KeyStoreUtil.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/KeyStoreUtil.java @@ -40,6 +40,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java index 038299fd6d161..bfc29a5801b11 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemKeyConfig.java @@ -34,6 +34,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; + import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemTrustConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemTrustConfig.java index 236ea6cd6f431..50b5887e7f355 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemTrustConfig.java @@ -34,6 +34,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java index 533c8bf429e60..8a3730ee554f9 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/PemUtils.java @@ -41,6 +41,7 @@ import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; + import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslClientAuthenticationMode.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslClientAuthenticationMode.java index e22ae45afd5fb..17d3749567990 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslClientAuthenticationMode.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslClientAuthenticationMode.java @@ -32,6 +32,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.SSLParameters; + import java.util.Collections; import java.util.LinkedHashMap; import java.util.Locale; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java index 27c3918ed6da0..23acb0ff269e2 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfiguration.java @@ -35,6 +35,7 @@ import javax.net.ssl.SSLContext; import javax.net.ssl.X509ExtendedKeyManager; import javax.net.ssl.X509ExtendedTrustManager; + import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.NoSuchAlgorithmException; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationKeys.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationKeys.java index eb801bd124a50..5d28cbe1005b2 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationKeys.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationKeys.java @@ -33,6 +33,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.TrustManagerFactory; + import java.security.KeyStore; import java.util.Arrays; import java.util.HashSet; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java index 2cd9f4f31fc7f..0b06a0692197e 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslConfigurationLoader.java @@ -35,6 +35,7 @@ import javax.crypto.Cipher; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; + import java.nio.file.Path; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslDiagnostics.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslDiagnostics.java index 29de6f6d14031..f0a7d5bff4964 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslDiagnostics.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslDiagnostics.java @@ -35,6 +35,7 @@ import org.opensearch.common.Nullable; import javax.net.ssl.SSLSession; + import java.security.cert.CertificateEncodingException; import java.security.cert.CertificateParsingException; import java.security.cert.X509Certificate; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslKeyConfig.java index cb87965e83754..250c407a69bf5 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslKeyConfig.java @@ -33,6 +33,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.X509ExtendedKeyManager; + import java.nio.file.Path; import java.util.Collection; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslTrustConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslTrustConfig.java index 38e51778da225..4dd5890f19311 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/SslTrustConfig.java @@ -33,6 +33,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.X509ExtendedTrustManager; + import java.nio.file.Path; import java.util.Collection; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreKeyConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreKeyConfig.java index 42246d918192d..b3b7b7dc346a6 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreKeyConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreKeyConfig.java @@ -34,6 +34,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; + import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreTrustConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreTrustConfig.java index d509956368a82..556cb052c4391 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreTrustConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/StoreTrustConfig.java @@ -33,6 +33,7 @@ package org.opensearch.common.ssl; import javax.net.ssl.X509ExtendedTrustManager; + import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; diff --git a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/TrustEverythingConfig.java b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/TrustEverythingConfig.java index c8b6ab2407e69..c366210133687 100644 --- a/libs/ssl-config/src/main/java/org/opensearch/common/ssl/TrustEverythingConfig.java +++ b/libs/ssl-config/src/main/java/org/opensearch/common/ssl/TrustEverythingConfig.java @@ -34,6 +34,7 @@ import javax.net.ssl.SSLEngine; import javax.net.ssl.X509ExtendedTrustManager; + import java.net.Socket; import java.nio.file.Path; import java.security.cert.X509Certificate; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java index e767787d67d6d..dd325662ce50d 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/DefaultJdkTrustConfigTests.java @@ -36,6 +36,7 @@ import org.junit.Assert; import javax.net.ssl.X509ExtendedTrustManager; + import java.security.cert.X509Certificate; import java.util.Locale; import java.util.Optional; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java index 59d6a36b222ff..688f03a1e51fa 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemKeyConfigTests.java @@ -36,6 +36,7 @@ import org.hamcrest.Matchers; import javax.net.ssl.X509ExtendedKeyManager; + import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java index 4617727d83857..e664e379d1e97 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/PemTrustConfigTests.java @@ -35,6 +35,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; +import javax.net.ssl.X509ExtendedTrustManager; + import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; @@ -49,8 +51,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.net.ssl.X509ExtendedTrustManager; - public class PemTrustConfigTests extends OpenSearchTestCase { public void testBuildTrustConfigFromSinglePemFile() throws Exception { diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java index 13b708df497e0..5af7ddc73e680 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationLoaderTests.java @@ -33,12 +33,13 @@ package org.opensearch.common.ssl; import org.opensearch.common.settings.MockSecureSettings; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.test.OpenSearchTestCase; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; + import java.nio.file.Path; import java.util.Arrays; import java.util.List; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationTests.java index a629afc7f0d0b..ee907952c52ff 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslConfigurationTests.java @@ -32,17 +32,19 @@ package org.opensearch.common.ssl; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; -import org.mockito.Mockito; import javax.net.ssl.SSLContext; + import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.mockito.Mockito; + import static org.opensearch.common.ssl.SslConfigurationLoader.DEFAULT_CIPHERS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java index 38ec0c3b5ff78..c966b4259219f 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/SslDiagnosticsTests.java @@ -35,10 +35,10 @@ import org.opensearch.common.Nullable; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; -import org.mockito.Mockito; import javax.net.ssl.SSLSession; import javax.security.auth.x500.X500Principal; + import java.io.IOException; import java.nio.file.Path; import java.security.PublicKey; @@ -56,6 +56,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import org.mockito.Mockito; + public class SslDiagnosticsTests extends OpenSearchTestCase { // Some constants for use in mock certificates diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java index de7420d27c170..7806671d02793 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreKeyConfigTests.java @@ -37,6 +37,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; + import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; diff --git a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java index cbe0418f99ddf..5609f0fa2c877 100644 --- a/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java +++ b/libs/ssl-config/src/test/java/org/opensearch/common/ssl/StoreTrustConfigTests.java @@ -37,6 +37,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; diff --git a/libs/telemetry/build.gradle b/libs/telemetry/build.gradle index ce94698836b4f..f8499482a6093 100644 --- a/libs/telemetry/build.gradle +++ b/libs/telemetry/build.gradle @@ -10,6 +10,8 @@ */ dependencies { + api project(':libs:opensearch-common') + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java index 65c974a0d0c36..0f973f50fc640 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java @@ -8,14 +8,16 @@ package org.opensearch.telemetry; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; /** * Interface defining telemetry * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface Telemetry { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java new file mode 100644 index 0000000000000..c62288d280e2f --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Counter adds the value to the existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Counter { + + /** + * add value. + * @param value value to be added. + */ + void add(double value); + + /** + * add value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void add(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java new file mode 100644 index 0000000000000..f38fdd6412d79 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.io.IOException; + +/** + * Default implementation for {@link MetricsRegistry} + */ +class DefaultMetricsRegistry implements MetricsRegistry { + private final MetricsTelemetry metricsTelemetry; + + /** + * Constructor + * @param metricsTelemetry metrics telemetry. + */ + public DefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + this.metricsTelemetry = metricsTelemetry; + } + + @Override + public Counter createCounter(String name, String description, String unit) { + return metricsTelemetry.createCounter(name, description, unit); + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return metricsTelemetry.createUpDownCounter(name, description, unit); + } + + @Override + public Histogram createHistogram(String name, String description, String unit) { + return metricsTelemetry.createHistogram(name, description, unit); + } + + @Override + public void close() throws IOException { + metricsTelemetry.close(); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java new file mode 100644 index 0000000000000..95ada626e21ee --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Histogram.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Histogram records the value for an existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Histogram { + + /** + * record value. + * @param value value to be added. + */ + void record(double value); + + /** + * record value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void record(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java new file mode 100644 index 0000000000000..94d19bda31f34 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; + +/** + * MetricsRegistry helps in creating the metric instruments. + * @opensearch.experimental + */ +@ExperimentalApi +public interface MetricsRegistry extends Closeable { + + /** + * Creates the counter. + * @param name name of the counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createCounter(String name, String description, String unit); + + /** + * Creates the upDown counter. + * @param name name of the upDown counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createUpDownCounter(String name, String description, String unit); + + /** + * Creates the histogram type of Metric. Implementation framework will take care + * of the bucketing strategy. + * + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram. + */ + Histogram createHistogram(String name, String description, String unit); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java index fa3b7fd192f1a..fb3dec8152b4f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -8,9 +8,14 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Interface for metrics telemetry providers + * + * @opensearch.experimental */ -public interface MetricsTelemetry { +@ExperimentalApi +public interface MetricsTelemetry extends MetricsRegistry { } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java new file mode 100644 index 0000000000000..c1daf564dd3bc --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Counter} + * {@opensearch.internal} + */ +@InternalApi +public class NoopCounter implements Counter { + + /** + * No-op Counter instance + */ + public final static NoopCounter INSTANCE = new NoopCounter(); + + private NoopCounter() {} + + @Override + public void add(double value) { + + } + + @Override + public void add(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java new file mode 100644 index 0000000000000..20e72bccad899 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopHistogram.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Histogram} + * {@opensearch.internal} + */ +@InternalApi +public class NoopHistogram implements Histogram { + + /** + * No-op Histogram instance + */ + public final static NoopHistogram INSTANCE = new NoopHistogram(); + + private NoopHistogram() {} + + @Override + public void record(double value) { + + } + + @Override + public void record(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java new file mode 100644 index 0000000000000..d3dda68cfae71 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +import java.io.IOException; + +/** + *No-op {@link MetricsRegistry} + * {@opensearch.internal} + */ +@InternalApi +public class NoopMetricsRegistry implements MetricsRegistry { + + /** + * No-op Meter instance + */ + public final static NoopMetricsRegistry INSTANCE = new NoopMetricsRegistry(); + + private NoopMetricsRegistry() {} + + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java new file mode 100644 index 0000000000000..7c7ed08044993 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * {@opensearch.internal} + */ +@InternalApi +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java new file mode 100644 index 0000000000000..f2a8764f8021d --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to create tags for a meter. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Tags { + private final Map<String, Object> tagsMap; + /** + * Empty value. + */ + public final static Tags EMPTY = new Tags(Collections.emptyMap()); + + /** + * Factory method. + * @return tags. + */ + public static Tags create() { + return new Tags(new HashMap<>()); + } + + /** + * Constructor. + */ + private Tags(Map<String, Object> tagsMap) { + this.tagsMap = tagsMap; + } + + /** + * Add String attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, String value) { + Objects.requireNonNull(value, "value cannot be null"); + tagsMap.put(key, value); + return this; + } + + /** + * Add long attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, long value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add double attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, double value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add boolean attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, boolean value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Returns the attribute map. + * @return tags map + */ + public Map<String, ?> getTagsMap() { + return Collections.unmodifiableMap(tagsMap); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java new file mode 100644 index 0000000000000..70bc9be992b32 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * @opensearch.experimental + */ +@ExperimentalApi +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java index 150a32b14d0f8..6919995e0ef65 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java @@ -8,11 +8,14 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Base span * * @opensearch.internal */ +@InternalApi public abstract class AbstractSpan implements Span { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java new file mode 100644 index 0000000000000..dc1a775839adb --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Objects; + +/** + * Default implementation of Scope + * + * @opensearch.internal + */ +@InternalApi +final class DefaultScopedSpan implements ScopedSpan { + + private final Span span; + + private final SpanScope spanScope; + + /** + * Creates Scope instance for the given span + * + * @param span underlying span + * @param spanScope span scope. + */ + public DefaultScopedSpan(Span span, SpanScope spanScope) { + this.span = Objects.requireNonNull(span); + this.spanScope = Objects.requireNonNull(spanScope); + } + + @Override + public void addAttribute(String key, String value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, long value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, double value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, boolean value) { + span.addAttribute(key, value); + } + + @Override + public void addEvent(String event) { + span.addEvent(event); + } + + @Override + public void setError(Exception exception) { + span.setError(exception); + } + + /** + * Executes the runnable to end the scope + */ + @Override + public void close() { + span.endSpan(); + spanScope.close(); + } + + /** + * Returns span. + * @return the span associated with this scope + */ + Span getSpan() { + return span; + } + + /** + * Returns {@link SpanScope} + * @return spanScope + */ + SpanScope getSpanScope() { + return spanScope; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java index 356b72187de74..93600da510977 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -8,65 +8,81 @@ package org.opensearch.telemetry.tracing; -import java.util.function.Consumer; +import org.opensearch.common.annotation.InternalApi; + +import java.util.Objects; /** - * Default implementation of Scope + * Default implementation for {@link SpanScope} * * @opensearch.internal */ -final class DefaultSpanScope implements SpanScope { - +@InternalApi +class DefaultSpanScope implements SpanScope { private final Span span; - - private final Consumer<Span> onCloseConsumer; + private final SpanScope previousSpanScope; + private final Span beforeSpan; + private static final ThreadLocal<SpanScope> spanScopeThreadLocal = new ThreadLocal<>(); + private final TracerContextStorage<String, Span> tracerContextStorage; /** - * Creates Scope instance for the given span - * - * @param span underlying span - * @param onCloseConsumer consumer to execute on scope close + * Constructor + * @param span span + * @param previousSpanScope before attached span scope. */ - public DefaultSpanScope(Span span, Consumer<Span> onCloseConsumer) { - this.span = span; - this.onCloseConsumer = onCloseConsumer; + private DefaultSpanScope( + Span span, + final Span beforeSpan, + SpanScope previousSpanScope, + TracerContextStorage<String, Span> tracerContextStorage + ) { + this.span = Objects.requireNonNull(span); + this.beforeSpan = beforeSpan; + this.previousSpanScope = previousSpanScope; + this.tracerContextStorage = tracerContextStorage; } - @Override - public void addSpanAttribute(String key, String value) { - span.addAttribute(key, value); + /** + * Creates the SpanScope object. + * @param span span. + * @param tracerContextStorage tracer context storage. + * @return SpanScope spanScope + */ + public static SpanScope create(Span span, TracerContextStorage<String, Span> tracerContextStorage) { + final SpanScope beforeSpanScope = spanScopeThreadLocal.get(); + final Span beforeSpan = tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); + SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpan, beforeSpanScope, tracerContextStorage); + return newSpanScope; } @Override - public void addSpanAttribute(String key, long value) { - span.addAttribute(key, value); + public void close() { + detach(); } @Override - public void addSpanAttribute(String key, double value) { - span.addAttribute(key, value); + public SpanScope attach() { + spanScopeThreadLocal.set(this); + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, this.span); + return this; } - @Override - public void addSpanAttribute(String key, boolean value) { - span.addAttribute(key, value); + private void detach() { + spanScopeThreadLocal.set(previousSpanScope); + if (beforeSpan != null) { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, beforeSpan); + } else { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, null); + } } @Override - public void addSpanEvent(String event) { - span.addEvent(event); + public Span getSpan() { + return span; } - @Override - public void setError(Exception exception) { - span.setError(exception); + static SpanScope getCurrentSpanScope() { + return spanScopeThreadLocal.get(); } - /** - * Executes the runnable to end the scope - */ - @Override - public void close() { - onCloseConsumer.accept(span); - } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index ea59eec645420..8f1a26d99e725 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -8,8 +8,13 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + import java.io.Closeable; import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; /** * @@ -18,8 +23,12 @@ * * @opensearch.internal */ +@InternalApi class DefaultTracer implements Tracer { - static final String THREAD_NAME = "th_name"; + /** + * Current thread name. + */ + static final String THREAD_NAME = "thread.name"; private final TracingTelemetry tracingTelemetry; private final TracerContextStorage<String, Span> tracerContextStorage; @@ -36,11 +45,16 @@ public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage<Str } @Override - public SpanScope startSpan(String spanName) { - Span span = createSpan(spanName, getCurrentSpan()); - setCurrentSpanInContext(span); + public Span startSpan(SpanCreationContext context) { + Span parentSpan = null; + if (context.getParent() != null) { + parentSpan = context.getParent().getSpan(); + } else { + parentSpan = getCurrentSpanInternal(); + } + Span span = createSpan(context, parentSpan); addDefaultAttributes(span); - return new DefaultSpanScope(span, (scopeSpan) -> endSpan(scopeSpan)); + return span; } @Override @@ -48,24 +62,35 @@ public void close() throws IOException { ((Closeable) tracingTelemetry).close(); } - // Visible for testing - Span getCurrentSpan() { + private Span getCurrentSpanInternal() { return tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); } - private void endSpan(Span span) { - if (span != null) { - span.endSpan(); - setCurrentSpanInContext(span.getParentSpan()); - } + @Override + public SpanContext getCurrentSpan() { + final Span currentSpan = tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); + return (currentSpan == null) ? null : new SpanContext(currentSpan); + } + + @Override + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + Span span = startSpan(spanCreationContext); + SpanScope spanScope = withSpanInScope(span); + return new DefaultScopedSpan(span, spanScope); + } + + @Override + public SpanScope withSpanInScope(Span span) { + return DefaultSpanScope.create(span, tracerContextStorage).attach(); } - private Span createSpan(String spanName, Span parentSpan) { - return tracingTelemetry.createSpan(spanName, parentSpan); + @Override + public boolean isRecording() { + return true; } - private void setCurrentSpanInContext(Span span) { - tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, span); + private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } /** @@ -76,4 +101,10 @@ protected void addDefaultAttributes(Span span) { span.addAttribute(THREAD_NAME, Thread.currentThread().getName()); } + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map<String, Collection<String>> headers) { + Optional<Span> propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); + return startSpan(spanCreationContext.parent(propagatedSpan.map(SpanContext::new).orElse(null))); + } + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java new file mode 100644 index 0000000000000..b320bc415de29 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.noop.NoopScopedSpan; + +/** + * An auto-closeable that represents scoped span. + * It provides interface for all the span operations. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface ScopedSpan extends AutoCloseable { + /** + * No-op Scope implementation + */ + ScopedSpan NO_OP = new NoopScopedSpan(); + + /** + * Adds string attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, String value); + + /** + * Adds long attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, long value); + + /** + * Adds double attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, double value); + + /** + * Adds boolean attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, boolean value); + + /** + * Adds an event to the {@link Span}. + * + * @param event event name + */ + void addEvent(String event); + + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + + /** + * closes the scope + */ + @Override + void close(); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java index 6cb1c8234f3de..00b64492c281e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java @@ -8,13 +8,16 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + /** * An interface that represents a tracing span. * Spans are created by the Tracer.startSpan method. * Span must be ended by calling SpanScope.close which internally calls Span's endSpan. * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface Span { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java new file mode 100644 index 0000000000000..e5e62c795e5d0 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Wrapped Span will be exposed to the code outside of tracing package for sharing the {@link Span} without having access to + * its properties. + * + * @opensearch.experimental + */ +@ExperimentalApi +public final class SpanContext { + private final Span span; + + /** + * Constructor. + * @param span span to be wrapped. + */ + public SpanContext(Span span) { + this.span = span; + } + + Span getSpan() { + return span; + } + + /** + * Sets the error for the current span behind this context + * @param cause error + */ + public void setError(final Exception cause) { + span.setError(cause); + } + + /** + * Ends current span + */ + public void endSpan() { + span.endSpan(); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java new file mode 100644 index 0000000000000..6af7c440f8de9 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.attributes.Attributes; + +/** + * Context for span details. + * + * @opensearch.experimental + */ +@ExperimentalApi +public final class SpanCreationContext { + private String spanName; + private Attributes attributes; + private SpanKind spanKind = SpanKind.INTERNAL; + private SpanContext parent; + + /** + * Constructor. + */ + private SpanCreationContext() {} + + /** + * Sets the span type to server. + * @return spanCreationContext + */ + public static SpanCreationContext server() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.SERVER; + return spanCreationContext; + } + + /** + * Sets the span type to client. + * @return spanCreationContext + */ + public static SpanCreationContext client() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.CLIENT; + return spanCreationContext; + } + + /** + * Sets the span type to internal. + * @return spanCreationContext + */ + public static SpanCreationContext internal() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.INTERNAL; + return spanCreationContext; + } + + /** + * Sets the span name. + * @param spanName span name. + * @return spanCreationContext + */ + public SpanCreationContext name(String spanName) { + this.spanName = spanName; + return this; + } + + /** + * Sets the span attributes. + * @param attributes attributes. + * @return spanCreationContext + */ + public SpanCreationContext attributes(Attributes attributes) { + this.attributes = attributes; + return this; + } + + /** + * Sets the parent for span + * @param parent parent span context + * @return spanCreationContext + */ + public SpanCreationContext parent(SpanContext parent) { + this.parent = parent; + return this; + } + + /** + * Returns the span name. + * @return span name + */ + public String getSpanName() { + return spanName; + } + + /** + * Returns the span attributes. + * @return attributes. + */ + public Attributes getAttributes() { + return attributes; + } + + /** + * Returns the span kind. + * @return spankind. + */ + public SpanKind getSpanKind() { + return spanKind; + } + + /** + * Returns the parent span + * @return parent. + */ + public SpanContext getParent() { + return parent; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java new file mode 100644 index 0000000000000..d674bb2c866f2 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Type of Span. + */ +@PublicApi(since = "2.11.0") +public enum SpanKind { + /** + * Span represents the client side code. + */ + CLIENT, + /** + * Span represents the server side code. + */ + SERVER, + + /** + * Span represents the internal calls. This is the default value of a span type. + */ + INTERNAL; +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java index 180136ecf7a57..945682c3df390 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java @@ -8,11 +8,14 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Wrapper class to hold reference of Span * * @opensearch.internal */ +@InternalApi final class SpanReference { private Span span; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java index cf67165d889bc..8bccd5774a340 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java @@ -8,67 +8,34 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.tracing.noop.NoopSpanScope; /** * An auto-closeable that represents scope of the span. - * It provides interface for all the span operations. + * + * @opensearch.experimental */ +@ExperimentalApi public interface SpanScope extends AutoCloseable { + /** * No-op Scope implementation */ SpanScope NO_OP = new NoopSpanScope(); - /** - * Adds string attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, String value); - - /** - * Adds long attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, long value); - - /** - * Adds double attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, double value); - - /** - * Adds boolean attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, boolean value); - - /** - * Adds an event to the {@link Span}. - * - * @param event event name - */ - void addSpanEvent(String event); + @Override + void close(); /** - * Records error in the span - * - * @param exception exception to be recorded + * Attaches span to the {@link SpanScope} + * @return spanScope */ - void setError(Exception exception); + SpanScope attach(); /** - * closes the scope + * Returns span attached with the {@link SpanScope} + * @return span. */ - @Override - void close(); + Span getSpan(); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index d422b58aa0a9f..9b49ca7668992 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -8,22 +8,55 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.transport.TransportTracer; + import java.io.Closeable; /** * Tracer is the interface used to create a {@link Span} * It automatically handles the context propagation between threads, tasks, nodes etc. - * + * <p> * All methods on the Tracer object are multi-thread safe. + * + * @opensearch.experimental */ -public interface Tracer extends Closeable { - +@ExperimentalApi +public interface Tracer extends TransportTracer, Closeable { /** - * Starts the {@link Span} with given name + * Starts the {@link Span} with given {@link SpanCreationContext} * - * @param spanName span name + * @param context span context + * @return span, must be closed. + */ + Span startSpan(SpanCreationContext context); + + /** + * Returns the current span. + * @return current wrapped span. + */ + SpanContext getCurrentSpan(); + + /** + * Start the span and scoped it. This must be used for scenarios where {@link SpanScope} and {@link Span} lifecycles + * are same and ends within the same thread where created. + * @param spanCreationContext span creation context * @return scope of the span, must be closed with explicit close or with try-with-resource */ - SpanScope startSpan(String spanName); + ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext); + + /** + * Creates the Span Scope for a current thread. It's mandatory to scope the span just after creation so that it will + * automatically manage the attach /detach to the current thread. + * @param span span to be scoped + * @return ScopedSpan + */ + SpanScope withSpanInScope(Span span); + + /** + * Tells if the traces are being recorded or not + * @return boolean + */ + boolean isRecording(); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java index d85b404b0ce41..958d054948483 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Storage interface used for storing tracing context * @param <K> key type @@ -15,6 +17,7 @@ * * @opensearch.internal */ +@InternalApi public interface TracerContextStorage<K, V> { /** * Key for storing current span diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java index 3e4a377d33a3d..d7d48d1db10d6 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -8,14 +8,19 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collection; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; /** * Interface defining the tracing related context propagation * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface TracingContextPropagator { /** @@ -23,7 +28,15 @@ public interface TracingContextPropagator { * @param props properties * @return current span */ - Span extract(Map<String, String> props); + Optional<Span> extract(Map<String, String> props); + + /** + * Extracts current span from HTTP headers. + * + * @param headers request headers to extract the context from + * @return current span + */ + Optional<Span> extractFromHeaders(Map<String, Collection<String>> headers); /** * Injects tracing context diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java index bce955fc2d99e..f04a505088424 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -8,22 +8,26 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + import java.io.Closeable; /** * Interface for tracing telemetry providers * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface TracingTelemetry extends Closeable { /** * Creates span with provided arguments - * @param spanName name of the span - * @param parentSpan span's parent span + * + * @param spanCreationContext span creation context. + * @param parentSpan parent span. * @return span instance */ - Span createSpan(String spanName, Span parentSpan); + Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan); /** * provides tracing context propagator @@ -31,9 +35,4 @@ public interface TracingTelemetry extends Closeable { */ TracingContextPropagator getContextPropagator(); - /** - * closes the resource - */ - void close(); - } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java new file mode 100644 index 0000000000000..6dcc9c5468b38 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.attributes; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to create attributes for a span. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Attributes { + private final Map<String, Object> attributesMap; + /** + * Empty value. + */ + public final static Attributes EMPTY = new Attributes(Collections.emptyMap()); + + /** + * Factory method. + * @return attributes. + */ + public static Attributes create() { + return new Attributes(new HashMap<>()); + } + + /** + * Constructor. + */ + private Attributes(Map<String, Object> attributesMap) { + this.attributesMap = attributesMap; + } + + /** + * Add String attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Attributes addAttribute(String key, String value) { + Objects.requireNonNull(value, "value cannot be null"); + attributesMap.put(key, value); + return this; + } + + /** + * Add long attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Attributes addAttribute(String key, long value) { + attributesMap.put(key, value); + return this; + }; + + /** + * Add double attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Attributes addAttribute(String key, double value) { + attributesMap.put(key, value); + return this; + }; + + /** + * Add boolean attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Attributes addAttribute(String key, boolean value) { + attributesMap.put(key, value); + return this; + }; + + /** + * Returns the attribute map. + * @return attributes map + */ + public Map<String, ?> getAttributesMap() { + return Collections.unmodifiableMap(attributesMap); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java new file mode 100644 index 0000000000000..ccd56786f63ef --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains attributes management + */ +package org.opensearch.telemetry.tracing.attributes; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java new file mode 100644 index 0000000000000..fc296d3689645 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.ScopedSpan; + +/** + * No-op implementation of SpanScope + * + * @opensearch.internal + */ +@InternalApi +public final class NoopScopedSpan implements ScopedSpan { + + /** + * No-args constructor + */ + public NoopScopedSpan() {} + + @Override + public void addAttribute(String key, String value) { + + } + + @Override + public void addAttribute(String key, long value) { + + } + + @Override + public void addAttribute(String key, double value) { + + } + + @Override + public void addAttribute(String key, boolean value) { + + } + + @Override + public void addEvent(String event) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java new file mode 100644 index 0000000000000..f41e11017d155 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.Span; + +/** + * No-op implementation of {@link org.opensearch.telemetry.tracing.Span} + * + * @opensearch.internal + */ +@InternalApi +public class NoopSpan implements Span { + + /** + * No-op Span instance + */ + public final static NoopSpan INSTANCE = new NoopSpan(); + + private NoopSpan() { + + } + + @Override + public void endSpan() { + + } + + @Override + public Span getParentSpan() { + return null; + } + + @Override + public String getSpanName() { + return "noop-span"; + } + + @Override + public void addAttribute(String key, String value) { + + } + + @Override + public void addAttribute(String key, Long value) { + + } + + @Override + public void addAttribute(String key, Double value) { + + } + + @Override + public void addAttribute(String key, Boolean value) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void addEvent(String event) { + + } + + @Override + public String getTraceId() { + return "noop-trace-id"; + } + + @Override + public String getSpanId() { + return "noop-span-id"; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java index a1d16d1d80d00..bb04a67657d6e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java @@ -8,52 +8,36 @@ package org.opensearch.telemetry.tracing.noop; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanScope; /** - * No-op implementation of SpanScope + * No-op implementation of {@link SpanScope} * * @opensearch.internal */ -public final class NoopSpanScope implements SpanScope { - +@InternalApi +public class NoopSpanScope implements SpanScope { /** - * No-args constructor + * Constructor. */ - public NoopSpanScope() {} - - @Override - public void addSpanAttribute(String key, String value) { - - } - - @Override - public void addSpanAttribute(String key, long value) { + public NoopSpanScope() { } @Override - public void addSpanAttribute(String key, double value) { - - } - - @Override - public void addSpanAttribute(String key, boolean value) { - - } - - @Override - public void addSpanEvent(String event) { + public void close() { } @Override - public void setError(Exception exception) { - + public SpanScope attach() { + return this; } @Override - public void close() { - + public Span getSpan() { + return NoopSpan.INSTANCE; } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index a1768d7d59116..c57eaccf1f3df 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -8,14 +8,23 @@ package org.opensearch.telemetry.tracing.noop; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.ScopedSpan; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; +import java.util.Collection; +import java.util.Map; + /** * No-op implementation of Tracer * * @opensearch.internal */ +@InternalApi public class NoopTracer implements Tracer { /** @@ -26,12 +35,37 @@ public class NoopTracer implements Tracer { private NoopTracer() {} @Override - public SpanScope startSpan(String spanName) { + public Span startSpan(SpanCreationContext context) { + return NoopSpan.INSTANCE; + } + + @Override + public SpanContext getCurrentSpan() { + return new SpanContext(NoopSpan.INSTANCE); + } + + @Override + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + return ScopedSpan.NO_OP; + } + + @Override + public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; } + @Override + public boolean isRecording() { + return false; + } + @Override public void close() { } + + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map<String, Collection<String>> header) { + return NoopSpan.INSTANCE; + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java new file mode 100644 index 0000000000000..8a61dd70d6d54 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.runnable; + +import org.opensearch.telemetry.tracing.ScopedSpan; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * Wraps the runnable and add instrumentation to trace the {@link Runnable} + */ +public class TraceableRunnable implements Runnable { + private final Runnable runnable; + private final SpanCreationContext spanCreationContext; + private final Tracer tracer; + + /** + * Constructor. + * @param tracer tracer + * @param spanCreationContext spanCreationContext + * @param runnable runnable. + */ + public TraceableRunnable(Tracer tracer, SpanCreationContext spanCreationContext, Runnable runnable) { + this.tracer = tracer; + this.spanCreationContext = spanCreationContext; + this.runnable = runnable; + } + + @Override + public void run() { + try (ScopedSpan spanScope = tracer.startScopedSpan(spanCreationContext)) { + runnable.run(); + } + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/package-info.java new file mode 100644 index 0000000000000..9f696a4ac573e --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains tracing related classes + */ +package org.opensearch.telemetry.tracing.runnable; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java new file mode 100644 index 0000000000000..5883d7de8e83a --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/TransportTracer.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.transport; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; + +import java.util.Collection; +import java.util.Map; + +/** + * TransportTracer helps in creating a {@link Span} which reads the incoming tracing information + * from the HTTP or TCP transport headers and propagate the span accordingly. + * <p> + * All methods on the Tracer object are multi-thread safe. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TransportTracer { + /** + * Start the span with propagating the tracing info from the HttpRequest header. + * + * @param spanCreationContext span name. + * @param headers transport headers + * @return the span instance + */ + Span startSpan(SpanCreationContext spanCreationContext, Map<String, Collection<String>> headers); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java new file mode 100644 index 0000000000000..87ffcc43184bb --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains HTTP or TCP transport related tracer capabilities + */ +package org.opensearch.telemetry.tracing.transport; diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java new file mode 100644 index 0000000000000..02f126075845b --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DefaultMetricsRegistryTests extends OpenSearchTestCase { + + private MetricsTelemetry metricsTelemetry; + private DefaultMetricsRegistry defaultMeterRegistry; + + @Override + public void setUp() throws Exception { + super.setUp(); + metricsTelemetry = mock(MetricsTelemetry.class); + defaultMeterRegistry = new DefaultMetricsRegistry(metricsTelemetry); + } + + public void testCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testCounter", + "test counter", + "1" + ); + assertSame(mockCounter, counter); + } + + public void testUpDownCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createUpDownCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createUpDownCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testUpDownCounter", + "test up-down counter", + "1" + ); + assertSame(mockCounter, counter); + } + + public void testHistogram() { + Histogram mockHistogram = mock(Histogram.class); + when(defaultMeterRegistry.createHistogram(any(String.class), any(String.class), any(String.class))).thenReturn(mockHistogram); + Histogram histogram = defaultMeterRegistry.createHistogram( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testHistogram", + "test up-down counter", + "ms" + ); + assertSame(mockHistogram, histogram); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java new file mode 100644 index 0000000000000..1d4871fe1419e --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class DefaultScopedSpanTests extends OpenSearchTestCase { + + @SuppressWarnings("unchecked") + public void testClose() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.close(); + + verify(mockSpan).endSpan(); + verify(mockSpanScope).close(); + } + + public void testAddSpanAttributeString() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", "value"); + + verify(mockSpan).addAttribute("key", "value"); + } + + public void testAddSpanAttributeLong() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", 1L); + + verify(mockSpan).addAttribute("key", 1L); + } + + public void testAddSpanAttributeDouble() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", 1.0); + + verify(mockSpan).addAttribute("key", 1.0); + } + + public void testAddSpanAttributeBoolean() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", true); + + verify(mockSpan).addAttribute("key", true); + } + + public void testAddEvent() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testSetError() { + Span mockSpan = mock(Span.class); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + Exception ex = new Exception("error"); + defaultSpanScope.setError(ex); + + verify(mockSpan).setError(ex); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java deleted file mode 100644 index eea6b77ce6e1e..0000000000000 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.telemetry.tracing; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.function.Consumer; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; - -public class DefaultSpanScopeTests extends OpenSearchTestCase { - - @SuppressWarnings("unchecked") - public void testClose() { - Span mockSpan = mock(Span.class); - Consumer<Span> mockConsumer = mock(Consumer.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, mockConsumer); - defaultSpanScope.close(); - - verify(mockConsumer).accept(mockSpan); - } - - public void testAddSpanAttributeString() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", "value"); - - verify(mockSpan).addAttribute("key", "value"); - } - - public void testAddSpanAttributeLong() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", 1L); - - verify(mockSpan).addAttribute("key", 1L); - } - - public void testAddSpanAttributeDouble() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", 1.0); - - verify(mockSpan).addAttribute("key", 1.0); - } - - public void testAddSpanAttributeBoolean() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", true); - - verify(mockSpan).addAttribute("key", true); - } - - public void testAddEvent() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanEvent("eventName"); - - verify(mockSpan).addEvent("eventName"); - } - - public void testSetError() { - Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - Exception ex = new Exception("error"); - defaultSpanScope.setError(ex); - - verify(mockSpan).setError(ex); - } - -} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 2b7a379b0051a..2182b3ea28ac8 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -8,14 +8,27 @@ package org.opensearch.telemetry.tracing; -import org.junit.Assert; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.telemetry.tracing.MockSpan; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DefaultTracerTests extends OpenSearchTestCase { @@ -24,31 +37,359 @@ public class DefaultTracerTests extends OpenSearchTestCase { private Span mockSpan; private Span mockParentSpan; + private ThreadPool threadPool; + private ExecutorService executorService; + private SpanCreationContext spanCreationContext; + @Override public void setUp() throws Exception { super.setUp(); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "default tracer tests").build()); + executorService = threadPool.executor(ThreadPool.Names.GENERIC); setupMocks(); } @Override public void tearDown() throws Exception { super.tearDown(); + executorService.shutdown(); + threadPool.shutdownNow(); } public void testCreateSpan() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); + defaultTracer.startSpan(spanCreationContext); + + String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); + assertEquals("span_name", spanName); + assertTrue(defaultTracer.isRecording()); + } + + @SuppressWarnings("unchecked") + public void testCreateSpanWithAttributesWithMock() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + Attributes attributes = Attributes.create().addAttribute("name", "value"); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); + } + + @SuppressWarnings("unchecked") + public void testCreateSpanWithAttributesWithParentMock() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + Attributes attributes = Attributes.create().addAttribute("name", "value"); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); + verify(mockTracerContextStorage, never()).get(TracerContextStorage.CURRENT_SPAN); + } + + public void testCreateSpanWithAttributes() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + DefaultTracer defaultTracer = new DefaultTracer( + tracingTelemetry, + new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) + ); + + SpanCreationContext spanCreationContext = buildSpanCreationContext( + "span_name", + Attributes.create().addAttribute("key1", 1.0).addAttribute("key2", 2l).addAttribute("key3", true).addAttribute("key4", "key4"), + null + ); + + Span span = defaultTracer.startSpan(spanCreationContext); + + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); + assertEquals(1.0, ((MockSpan) span).getAttribute("key1")); + assertEquals(2l, ((MockSpan) span).getAttribute("key2")); + assertEquals(true, ((MockSpan) span).getAttribute("key3")); + assertEquals("key4", ((MockSpan) span).getAttribute("key4")); + span.endSpan(); + } + + public void testCreateSpanWithParent() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + DefaultTracer defaultTracer = new DefaultTracer( + tracingTelemetry, + new ThreadContextBasedTracerContextStorage(new ThreadContext(Settings.EMPTY), tracingTelemetry) + ); - Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpanName()); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", null, null); + + Span span = defaultTracer.startSpan(spanCreationContext, null); + + try (final SpanScope scope = defaultTracer.withSpanInScope(span)) { + SpanContext parentSpan = defaultTracer.getCurrentSpan(); + + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); + + try (final ScopedSpan span1 = defaultTracer.startScopedSpan(spanCreationContext1)) { + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + } + } finally { + span.endSpan(); + } } - public void testEndSpanByClosingScope() { + @SuppressWarnings("unchecked") + public void testCreateSpanWithContext() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - try (SpanScope spanScope = defaultTracer.startSpan("span_name")) { - verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockSpan); + Attributes attributes = Attributes.create().addAttribute("name", "value"); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); + } + + public void testCreateSpanWithNullParent() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + DefaultTracer defaultTracer = new DefaultTracer( + tracingTelemetry, + new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) + ); + + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + + Span span = defaultTracer.startSpan(spanCreationContext); + + assertThat(defaultTracer.getCurrentSpan(), is(nullValue())); + span.endSpan(); + } + + public void testEndSpanByClosingScopedSpan() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + scopedSpan.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan).getSpan()).hasEnded()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + + } + + public void testEndSpanByClosingScopedSpanMultiple() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + ScopedSpan scopedSpan1 = defaultTracer.startScopedSpan(spanCreationContext1); + + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan1).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + + scopedSpan1.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan1).getSpan()).hasEnded()); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + + scopedSpan.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan).getSpan()).hasEnded()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + + } + + public void testEndSpanByClosingSpanScope() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + Span span = defaultTracer.startSpan(spanCreationContext); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope, DefaultSpanScope.getCurrentSpanScope()); + + span.endSpan(); + spanScope.close(); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertTrue(((MockSpan) span).hasEnded()); + + } + + public void testEndSpanByClosingSpanScopeMultiple() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name", Attributes.EMPTY, null)); + Span span1 = defaultTracer.startSpan(buildSpanCreationContext("span_name_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + SpanScope spanScope1 = defaultTracer.withSpanInScope(span1); + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope1, DefaultSpanScope.getCurrentSpanScope()); + + span1.endSpan(); + spanScope1.close(); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope, DefaultSpanScope.getCurrentSpanScope()); + assertTrue(((MockSpan) span1).hasEnded()); + assertFalse(((MockSpan) span).hasEnded()); + span.endSpan(); + spanScope.close(); + + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + assertTrue(((MockSpan) span).hasEnded()); + assertTrue(((MockSpan) span1).hasEnded()); + + } + + /** + * 1. CreateSpan in ThreadA (NotScopedSpan) + * 2. create Async task and pass the span + * 3. Scope.close + * 4. verify the current_span is still the same on async thread as the 2 + * 5. verify the main thread has current span as null. + */ + public void testSpanAcrossThreads() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + + CompletableFuture<?> asyncTask = CompletableFuture.runAsync(() -> { + // create a span + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + + CompletableFuture<?> asyncTask1 = CompletableFuture.runAsync(() -> { + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); + assertEquals(spanT2, defaultTracer.getCurrentSpan().getSpan()); + + spanScopeT2.close(); + spanT2.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask1.join(); + spanScope.close(); + span.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); + } + + public void testSpanCloseOnThread2() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + final Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t1", Attributes.EMPTY, null)); + try (SpanScope spanScope = defaultTracer.withSpanInScope(span)) { + CompletableFuture<?> asyncTask = CompletableFuture.runAsync(() -> async(new ActionListener<Boolean>() { + @Override + public void onResponse(Boolean response) { + try (SpanScope s = defaultTracer.withSpanInScope(span)) { + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + } finally { + span.endSpan(); + } + } + + @Override + public void onFailure(Exception e) { + + } + }), executorService); + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + asyncTask.join(); } - verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockParentSpan); + assertEquals(null, defaultTracer.getCurrentSpan()); + } + + private void async(ActionListener<Boolean> actionListener) { + actionListener.onResponse(true); + } + + /** + * 1. CreateSpan in ThreadA (NotScopedSpan) + * 2. create Async task and pass the span + * 3. Inside Async task start a new span. + * 4. Scope.close + * 5. Parent Scope.close + * 6. verify the current_span is still the same on async thread as the 2 + * 7. verify the main thread has current span as null. + */ + public void testSpanAcrossThreadsMultipleSpans() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + + CompletableFuture<?> asyncTask = CompletableFuture.runAsync(() -> { + // create a parent span + Span parentSpan = defaultTracer.startSpan(buildSpanCreationContext("p_span_name", Attributes.EMPTY, null)); + SpanScope parentSpanScope = defaultTracer.withSpanInScope(parentSpan); + // create a span + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + + CompletableFuture<?> asyncTask1 = CompletableFuture.runAsync(() -> { + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); + Span spanT21 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT21 = defaultTracer.withSpanInScope(spanT21); + assertEquals(spanT21, defaultTracer.getCurrentSpan().getSpan()); + spanScopeT21.close(); + spanT21.endSpan(); + + spanScopeT2.close(); + spanT2.endSpan(); + + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + + asyncTask1.join(); + + spanScope.close(); + span.endSpan(); + parentSpanScope.close(); + parentSpan.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); } public void testClose() throws IOException { @@ -71,7 +412,16 @@ private void setupMocks() { when(mockSpan.getParentSpan()).thenReturn(mockParentSpan); when(mockParentSpan.getSpanId()).thenReturn("parent_span_id"); when(mockParentSpan.getTraceId()).thenReturn("trace_id"); - when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockParentSpan, mockSpan); - when(mockTracingTelemetry.createSpan("span_name", mockParentSpan)).thenReturn(mockSpan); + spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, mockParentSpan); + when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockSpan, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + } + + private SpanCreationContext buildSpanCreationContext(String spanName, Attributes attributes, Span parentSpan) { + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name(spanName).attributes(attributes); + if (parentSpan != null) { + spanCreationContext.parent(new SpanContext(parentSpan)); + } + return spanCreationContext; } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java new file mode 100644 index 0000000000000..4c4f762653d57 --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.runnable.TraceableRunnable; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.telemetry.tracing.MockSpan; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +public class TraceableRunnableTests extends OpenSearchTestCase { + + private final ThreadContextBasedTracerContextStorage contextStorage = new ThreadContextBasedTracerContextStorage( + new ThreadContext(Settings.EMPTY), + new MockTracingTelemetry() + ); + + public void testRunnableWithNullParent() throws Exception { + String spanName = "testRunnable"; + final DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); + final AtomicBoolean isRunnableCompleted = new AtomicBoolean(false); + final AtomicReference<String> spanNameCaptured = new AtomicReference<>(); + final AtomicReference<String> attributeValue = new AtomicReference<>(); + TraceableRunnable traceableRunnable = new TraceableRunnable( + defaultTracer, + SpanCreationContext.internal().name(spanName).attributes(Attributes.create().addAttribute("name", "value")), + () -> { + spanNameCaptured.set(defaultTracer.getCurrentSpan().getSpan().getSpanName()); + attributeValue.set((String) ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("name")); + isRunnableCompleted.set(true); + } + ); + traceableRunnable.run(); + assertTrue(isRunnableCompleted.get()); + assertEquals(spanName, spanNameCaptured.get()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals("value", attributeValue.get()); + } + + public void testRunnableWithParent() throws Exception { + String spanName = "testRunnable"; + String parentSpanName = "parentSpan"; + DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); + ScopedSpan scopedSpan = defaultTracer.startScopedSpan( + SpanCreationContext.internal().name(parentSpanName).attributes(Attributes.EMPTY) + ); + SpanContext parentSpanContext = defaultTracer.getCurrentSpan(); + AtomicReference<SpanContext> currentSpan = new AtomicReference<>(); + final AtomicBoolean isRunnableCompleted = new AtomicBoolean(false); + TraceableRunnable traceableRunnable = new TraceableRunnable( + defaultTracer, + SpanCreationContext.internal() + .name(spanName) + .attributes(Attributes.create().addAttribute("name", "value")) + .parent(parentSpanContext), + () -> { + isRunnableCompleted.set(true); + currentSpan.set(defaultTracer.getCurrentSpan()); + } + ); + traceableRunnable.run(); + assertTrue(isRunnableCompleted.get()); + assertEquals(spanName, currentSpan.get().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpan(), currentSpan.get().getSpan().getParentSpan()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpan(), defaultTracer.getCurrentSpan().getSpan()); + scopedSpan.close(); + } +} diff --git a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 deleted file mode 100644 index ec6781b968eed..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6fe1836469a69b3ff66037c324d75fc66ef137c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..68646a1e66ffc --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.16.2.jar.sha1 @@ -0,0 +1 @@ +b4f588bf070f77b604c645a7d60b71eae2e6ea09 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 deleted file mode 100644 index 0022265a84b68..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -baafc85c70765594add14bd93f3efd68e1945b76 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..828f73962c333 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.2.jar.sha1 @@ -0,0 +1 @@ +1a1a3036016ea2ae3061c0bb46cba6968ff7faae \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 deleted file mode 100644 index 2b8caad846fec..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -16d1dd22f7d641459ed056399d4f7df0220f1176 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..e50314e0dd746 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.16.2.jar.sha1 @@ -0,0 +1 @@ +209fd9ae0e6c6b233b0c14baa8f17acea71e5766 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 deleted file mode 100644 index 4ad7255e2318f..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58194ff9f51915ad6bf6b6f24818232d7566418a \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..feb51c61bfc7c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.2.jar.sha1 @@ -0,0 +1 @@ +13088f6762211f264bc0ebf5467be96d8e9e3ebf \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 b/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 deleted file mode 100644 index d09dea5564729..0000000000000 --- a/libs/x-content/licenses/snakeyaml-2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3aab2116756442bf0d4cd1c089b24d34c3baa253 \ No newline at end of file diff --git a/libs/x-content/licenses/snakeyaml-2.1.jar.sha1 b/libs/x-content/licenses/snakeyaml-2.1.jar.sha1 new file mode 100644 index 0000000000000..5586b210a9736 --- /dev/null +++ b/libs/x-content/licenses/snakeyaml-2.1.jar.sha1 @@ -0,0 +1 @@ +c79f47315517560b5bd6a62376ee385e48105437 \ No newline at end of file diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java new file mode 100644 index 0000000000000..2f4dada29780d --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent; + +import com.fasterxml.jackson.core.StreamReadConstraints; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Consolidates the XContent constraints (primarily reflecting Jackson's {@link StreamReadConstraints} constraints) + * + * @opensearch.internal + */ +@InternalApi +public interface XContentContraints { + final String DEFAULT_CODEPOINT_LIMIT_PROPERTY = "opensearch.xcontent.codepoint.max"; + final String DEFAULT_MAX_STRING_LEN_PROPERTY = "opensearch.xcontent.string.length.max"; + final String DEFAULT_MAX_NAME_LEN_PROPERTY = "opensearch.xcontent.name.length.max"; + final String DEFAULT_MAX_DEPTH_PROPERTY = "opensearch.xcontent.depth.max"; + + final int DEFAULT_MAX_STRING_LEN = Integer.parseInt(System.getProperty(DEFAULT_MAX_STRING_LEN_PROPERTY, "50000000" /* ~50 Mb */)); + + final int DEFAULT_MAX_NAME_LEN = Integer.parseInt( + System.getProperty(DEFAULT_MAX_NAME_LEN_PROPERTY, "50000" /* StreamReadConstraints.DEFAULT_MAX_NAME_LEN */) + ); + + final int DEFAULT_MAX_DEPTH = Integer.parseInt( + System.getProperty(DEFAULT_MAX_DEPTH_PROPERTY, "1000" /* StreamReadConstraints.DEFAULT_MAX_DEPTH */) + ); + + final int DEFAULT_CODEPOINT_LIMIT = Integer.parseInt(System.getProperty(DEFAULT_CODEPOINT_LIMIT_PROPERTY, "52428800" /* ~50 Mb */)); +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentFactory.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentFactory.java index 76a2046dd768a..9f423bc9abad3 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentFactory.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentFactory.java @@ -32,19 +32,14 @@ package org.opensearch.common.xcontent; -import com.fasterxml.jackson.dataformat.cbor.CBORConstants; -import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; import org.opensearch.common.xcontent.yaml.YamlXContent; -import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParseException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; /** @@ -52,13 +47,11 @@ */ public class XContentFactory { - static final int GUESS_HEADER_LENGTH = 20; - /** * Returns a content builder using JSON format ({@link org.opensearch.common.xcontent.XContentType#JSON}. */ public static XContentBuilder jsonBuilder() throws IOException { - return contentBuilder(XContentType.JSON); + return MediaTypeRegistry.contentBuilder(XContentType.JSON); } /** @@ -72,7 +65,7 @@ public static XContentBuilder jsonBuilder(OutputStream os) throws IOException { * Returns a content builder using SMILE format ({@link org.opensearch.common.xcontent.XContentType#SMILE}. */ public static XContentBuilder smileBuilder() throws IOException { - return contentBuilder(XContentType.SMILE); + return MediaTypeRegistry.contentBuilder(XContentType.SMILE); } /** @@ -86,7 +79,7 @@ public static XContentBuilder smileBuilder(OutputStream os) throws IOException { * Returns a content builder using YAML format ({@link org.opensearch.common.xcontent.XContentType#YAML}. */ public static XContentBuilder yamlBuilder() throws IOException { - return contentBuilder(XContentType.YAML); + return MediaTypeRegistry.contentBuilder(XContentType.YAML); } /** @@ -100,271 +93,6 @@ public static XContentBuilder yamlBuilder(OutputStream os) throws IOException { * Returns a content builder using CBOR format ({@link org.opensearch.common.xcontent.XContentType#CBOR}. */ public static XContentBuilder cborBuilder() throws IOException { - return contentBuilder(XContentType.CBOR); - } - - /** - * Constructs a new cbor builder that will output the result into the provided output stream. - */ - public static XContentBuilder cborBuilder(OutputStream os) throws IOException { - return new XContentBuilder(CborXContent.cborXContent, os); - } - - /** - * Constructs a xcontent builder that will output the result into the provided output stream. - */ - public static XContentBuilder contentBuilder(MediaType type, OutputStream outputStream) throws IOException { - if (type == XContentType.JSON) { - return jsonBuilder(outputStream); - } else if (type == XContentType.SMILE) { - return smileBuilder(outputStream); - } else if (type == XContentType.YAML) { - return yamlBuilder(outputStream); - } else if (type == XContentType.CBOR) { - return cborBuilder(outputStream); - } - throw new IllegalArgumentException("No matching content type for " + type); - } - - /** - * Returns a binary content builder for the provided media type. - */ - public static XContentBuilder contentBuilder(MediaType type) throws IOException { - if (type instanceof XContentType) { - return contentBuilder((XContentType) (type)); - } - throw new IllegalArgumentException("Content type [" + type.getClass().getName() + "] not supported"); - } - - /** - * Returns a binary content builder for the provided content type. - */ - public static XContentBuilder contentBuilder(XContentType type) throws IOException { - if (type == XContentType.JSON) { - return JsonXContent.contentBuilder(); - } else if (type == XContentType.SMILE) { - return SmileXContent.contentBuilder(); - } else if (type == XContentType.YAML) { - return YamlXContent.contentBuilder(); - } else if (type == XContentType.CBOR) { - return CborXContent.contentBuilder(); - } - throw new IllegalArgumentException("No matching content type for " + type); - } - - /** - * Returns the {@link XContent} for the provided content type. - */ - public static XContent xContent(MediaType type) { - if (type == null) { - throw new IllegalArgumentException("Cannot get xcontent for unknown type"); - } - return type.xContent(); - } - - /** - * Guesses the content type based on the provided char sequence. - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContentType xContentType(CharSequence content) { - int length = content.length() < GUESS_HEADER_LENGTH ? content.length() : GUESS_HEADER_LENGTH; - if (length == 0) { - return null; - } - char first = content.charAt(0); - if (first == '{') { - return XContentType.JSON; - } - // Should we throw a failure here? Smile idea is to use it in bytes.... - if (length > 2 - && first == SmileConstants.HEADER_BYTE_1 - && content.charAt(1) == SmileConstants.HEADER_BYTE_2 - && content.charAt(2) == SmileConstants.HEADER_BYTE_3) { - return XContentType.SMILE; - } - if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') { - return XContentType.YAML; - } - - // CBOR is not supported - - for (int i = 0; i < length; i++) { - char c = content.charAt(i); - if (c == '{') { - return XContentType.JSON; - } - if (Character.isWhitespace(c) == false) { - break; - } - } - return null; - } - - /** - * Guesses the content (type) based on the provided char sequence and returns the corresponding {@link XContent} - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContent xContent(CharSequence content) { - XContentType type = xContentType(content); - if (type == null) { - throw new XContentParseException("Failed to derive xcontent"); - } - return xContent(type); - } - - /** - * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContent xContent(byte[] data) { - return xContent(data, 0, data.length); - } - - /** - * Guesses the content type based on the provided bytes and returns the corresponding {@link XContent} - * - * @deprecated guessing the content type should not be needed ideally. We should rather know the content type upfront or read it - * from headers. Till we fixed the REST layer to read the Content-Type header, that should be the only place where guessing is needed. - */ - @Deprecated - public static XContent xContent(byte[] data, int offset, int length) { - XContentType type = xContentType(data, offset, length); - if (type == null) { - throw new XContentParseException("Failed to derive xcontent"); - } - return xContent(type); - } - - /** - * Guesses the content type based on the provided input stream without consuming it. - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContentType xContentType(InputStream si) throws IOException { - /* - * We need to guess the content type. To do this, we look for the first non-whitespace character and then try to guess the content - * type on the GUESS_HEADER_LENGTH bytes that follow. We do this in a way that does not modify the initial read position in the - * underlying input stream. This is why the input stream must support mark/reset and why we repeatedly mark the read position and - * reset. - */ - if (si.markSupported() == false) { - throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass()); - } - si.mark(Integer.MAX_VALUE); - try { - // scan until we find the first non-whitespace character or the end of the stream - int current; - do { - current = si.read(); - if (current == -1) { - return null; - } - } while (Character.isWhitespace((char) current)); - // now guess the content type off the next GUESS_HEADER_LENGTH bytes including the current byte - final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH]; - firstBytes[0] = (byte) current; - int read = 1; - while (read < GUESS_HEADER_LENGTH) { - final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read); - if (r == -1) { - break; - } - read += r; - } - return xContentType(firstBytes, 0, read); - } finally { - si.reset(); - } - - } - - /** - * Guesses the content type based on the provided bytes. - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContentType xContentType(byte[] bytes) { - return xContentType(bytes, 0, bytes.length); - } - - /** - * Guesses the content type based on the provided bytes. - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContentType xContentType(byte[] bytes, int offset, int length) { - int totalLength = bytes.length; - if (totalLength == 0 || length == 0) { - return null; - } else if ((offset + length) > totalLength) { - return null; - } - byte first = bytes[offset]; - if (first == '{') { - return XContentType.JSON; - } - if (length > 2 - && first == SmileConstants.HEADER_BYTE_1 - && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 - && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { - return XContentType.SMILE; - } - if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') { - return XContentType.YAML; - } - // CBOR logic similar to CBORFactory#hasCBORFormat - if (first == CBORConstants.BYTE_OBJECT_INDEFINITE && length > 1) { - return XContentType.CBOR; - } - if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, first) && length > 2) { - // Actually, specific "self-describe tag" is a very good indicator - if (first == (byte) 0xD9 && bytes[offset + 1] == (byte) 0xD9 && bytes[offset + 2] == (byte) 0xF7) { - return XContentType.CBOR; - } - } - // for small objects, some encoders just encode as major type object, we can safely - // say its CBOR since it doesn't contradict SMILE or JSON, and its a last resort - if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, first)) { - return XContentType.CBOR; - } - - int jsonStart = 0; - // JSON may be preceded by UTF-8 BOM - if (length > 3 && first == (byte) 0xEF && bytes[offset + 1] == (byte) 0xBB && bytes[offset + 2] == (byte) 0xBF) { - jsonStart = 3; - } - - // a last chance for JSON - for (int i = jsonStart; i < length; i++) { - byte b = bytes[offset + i]; - if (b == '{') { - return XContentType.JSON; - } - if (Character.isWhitespace(b) == false) { - break; - } - } - return null; + return MediaTypeRegistry.contentBuilder(XContentType.CBOR); } } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java index 023caa49e1f39..453107fe4ff65 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java @@ -32,21 +32,27 @@ package org.opensearch.common.xcontent; +import com.fasterxml.jackson.dataformat.cbor.CBORConstants; +import com.fasterxml.jackson.dataformat.smile.SmileConstants; + +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; import org.opensearch.core.xcontent.XContent; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Map; +import java.io.OutputStream; /** * The content type of {@link XContent}. + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum XContentType implements MediaType { /** @@ -72,6 +78,26 @@ public String subtype() { public XContent xContent() { return JsonXContent.jsonXContent; } + + @Override + public boolean detectedXContent(final byte[] bytes, int offset, int length) { + return bytes[offset] == '{'; + } + + @Override + public boolean detectedXContent(final CharSequence content, final int length) { + return content.charAt(0) == '{'; + } + + @Override + public XContentBuilder contentBuilder() throws IOException { + return JsonXContent.contentBuilder(); + } + + @Override + public XContentBuilder contentBuilder(final OutputStream os) throws IOException { + return new XContentBuilder(JsonXContent.jsonXContent, os); + } }, /** * The jackson based smile binary format. Fast and compact binary format. @@ -91,6 +117,32 @@ public String subtype() { public XContent xContent() { return SmileXContent.smileXContent; } + + @Override + public boolean detectedXContent(final byte[] bytes, int offset, int length) { + return length > 2 + && bytes[offset] == SmileConstants.HEADER_BYTE_1 + && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 + && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3; + } + + @Override + public boolean detectedXContent(final CharSequence content, final int length) { + return length > 2 + && content.charAt(0) == SmileConstants.HEADER_BYTE_1 + && content.charAt(1) == SmileConstants.HEADER_BYTE_2 + && content.charAt(2) == SmileConstants.HEADER_BYTE_3; + } + + @Override + public XContentBuilder contentBuilder() throws IOException { + return SmileXContent.contentBuilder(); + } + + @Override + public XContentBuilder contentBuilder(final OutputStream os) throws IOException { + return new XContentBuilder(SmileXContent.smileXContent, os); + } }, /** * A YAML based content type. @@ -110,6 +162,26 @@ public String subtype() { public XContent xContent() { return YamlXContent.yamlXContent; } + + @Override + public boolean detectedXContent(final byte[] bytes, int offset, int length) { + return length > 2 && bytes[offset] == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-'; + } + + @Override + public boolean detectedXContent(final CharSequence content, final int length) { + return length > 2 && content.charAt(0) == '-' && content.charAt(1) == '-' && content.charAt(2) == '-'; + } + + @Override + public XContentBuilder contentBuilder() throws IOException { + return YamlXContent.contentBuilder(); + } + + @Override + public XContentBuilder contentBuilder(final OutputStream os) throws IOException { + return new XContentBuilder(YamlXContent.yamlXContent, os); + } }, /** * A CBOR based content type. @@ -129,12 +201,42 @@ public String subtype() { public XContent xContent() { return CborXContent.cborXContent; } - }; - static { - /** a parser of media types */ - MediaTypeParserRegistry.register(XContentType.values(), Map.of("application/*", JSON, "application/x-ndjson", JSON)); - } + @Override + public boolean detectedXContent(final byte[] bytes, int offset, int length) { + // CBOR logic similar to CBORFactory#hasCBORFormat + if (bytes[offset] == CBORConstants.BYTE_OBJECT_INDEFINITE && length > 1) { + return true; + } + if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_TAG, bytes[offset]) && length > 2) { + // Actually, specific "self-describe tag" is a very good indicator + if (bytes[offset] == (byte) 0xD9 && bytes[offset + 1] == (byte) 0xD9 && bytes[offset + 2] == (byte) 0xF7) { + return true; + } + } + // for small objects, some encoders just encode as major type object, we can safely + // say its CBOR since it doesn't contradict SMILE or JSON, and its a last resort + if (CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, bytes[offset])) { + return true; + } + return false; + } + + @Override + public boolean detectedXContent(final CharSequence content, final int length) { + return false; + } + + @Override + public XContentBuilder contentBuilder() throws IOException { + return CborXContent.contentBuilder(); + } + + @Override + public XContentBuilder contentBuilder(final OutputStream os) throws IOException { + return new XContentBuilder(CborXContent.cborXContent, os); + } + }; private int index; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java index 46891b279ba43..7e92f236213d4 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java @@ -37,7 +37,11 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; + +import org.opensearch.common.xcontent.XContentContraints; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -46,7 +50,6 @@ import org.opensearch.core.xcontent.XContentGenerator; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -57,11 +60,7 @@ /** * A CBOR based content implementation using Jackson. */ -public class CborXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class CborXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(cborXContent); } @@ -75,7 +74,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); cborFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - cborFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + cborFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + cborFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); cborFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); cborXContent = new CborXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentGenerator.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentGenerator.java index 2dba887c664b6..1c13ebd3981a9 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentGenerator.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentGenerator.java @@ -33,6 +33,7 @@ package org.opensearch.common.xcontent.cbor; import com.fasterxml.jackson.core.JsonGenerator; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentGenerator; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentParser.java index 60da72413e114..4abc9650c4bf6 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentParser.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContentParser.java @@ -33,10 +33,11 @@ package org.opensearch.common.xcontent.cbor; import com.fasterxml.jackson.core.JsonParser; -import org.opensearch.core.xcontent.DeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentParser; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; public class CborXContentParser extends JsonXContentParser { diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java index e6c27e4cf3eef..91f6bbeb4f786 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java @@ -38,6 +38,10 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; + +import org.opensearch.common.xcontent.XContentContraints; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -45,7 +49,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentGenerator; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -56,11 +59,7 @@ /** * A JSON based content implementation using Jackson. */ -public class JsonXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class JsonXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(jsonXContent); } @@ -77,7 +76,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - jsonFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + jsonFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + jsonFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); jsonFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); jsonXContent = new JsonXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentGenerator.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentGenerator.java index 9164b5216f9a5..3f8493d7a4f14 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentGenerator.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentGenerator.java @@ -41,16 +41,17 @@ import com.fasterxml.jackson.core.util.DefaultIndenter; import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; import com.fasterxml.jackson.core.util.JsonGeneratorDelegate; + +import org.opensearch.common.util.io.Streams; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentGenerator; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.filtering.FilterPathBasedFilter; -import org.opensearch.common.util.io.Streams; import java.io.BufferedInputStream; import java.io.IOException; @@ -339,7 +340,7 @@ public void writeRawField(String name, InputStream content) throws IOException { // needed for the XContentFactory.xContentType call content = new BufferedInputStream(content); } - XContentType contentType = XContentFactory.xContentType(content); + MediaType contentType = MediaTypeRegistry.xContentType(content); if (contentType == null) { throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed"); } @@ -354,7 +355,7 @@ public void writeRawField(String name, InputStream content, MediaType mediaType) if (mayWriteRawData(mediaType) == false) { // EMPTY is safe here because we never call namedObject when writing raw data try ( - XContentParser parser = XContentFactory.xContent(mediaType) + XContentParser parser = mediaType.xContent() // It's okay to pass the throwing deprecation handler // because we should not be writing raw fields when // generating JSON diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentParser.java index edb582eb58ec7..ed830c6b7bfa3 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentParser.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContentParser.java @@ -35,12 +35,13 @@ import com.fasterxml.jackson.core.JsonLocation; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentLocation; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.AbstractXContentParser; -import org.opensearch.common.util.io.IOUtils; import java.io.IOException; import java.math.BigInteger; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java index eb968556de8c9..c73e126102a80 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java @@ -37,16 +37,19 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; + +import org.opensearch.common.xcontent.XContentContraints; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentGenerator; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.MediaType; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -57,11 +60,7 @@ /** * A Smile based content implementation using Jackson. */ -public class SmileXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class SmileXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(smileXContent); } @@ -77,7 +76,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); smileFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - smileFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + smileFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + smileFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); smileFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); smileXContent = new SmileXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentGenerator.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentGenerator.java index b7ba56bdb01ea..0315e9a77272f 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentGenerator.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentGenerator.java @@ -33,6 +33,7 @@ package org.opensearch.common.xcontent.smile; import com.fasterxml.jackson.core.JsonGenerator; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentGenerator; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentParser.java index b767d4e13587d..ade265798b5a4 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentParser.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContentParser.java @@ -33,10 +33,11 @@ package org.opensearch.common.xcontent.smile; import com.fasterxml.jackson.core.JsonParser; -import org.opensearch.core.xcontent.DeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentParser; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; public class SmileXContentParser extends JsonXContentParser { diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/XContentProvider.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/XContentProvider.java new file mode 100644 index 0000000000000..af5ab67507b81 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/XContentProvider.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent.spi; + +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.spi.MediaTypeProvider; + +import java.util.List; +import java.util.Map; + +/** + * Media Type implementations provided by xcontent library + * + * @opensearch.internal + */ +public class XContentProvider implements MediaTypeProvider { + /** Returns the concrete {@link MediaType} provided by the xcontent library */ + @Override + public List<MediaType> getMediaTypes() { + return List.of(XContentType.values()); + } + + /** Returns the additional {@link MediaType} aliases provided by the xcontent library */ + @Override + public Map<String, MediaType> getAdditionalMediaTypes() { + return Map.of("application/*", XContentType.JSON, "application/x-ndjson", XContentType.JSON); + } +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/package-info.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/package-info.java new file mode 100644 index 0000000000000..c265021f12763 --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/spi/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** SPI implementation for the xcontent library */ +package org.opensearch.common.xcontent.spi; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java index bb4fa9a09d448..0e69c6c33b923 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java @@ -36,15 +36,19 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactoryBuilder; + +import org.opensearch.common.xcontent.XContentContraints; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentGenerator; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.MediaType; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -52,14 +56,12 @@ import java.io.Reader; import java.util.Set; +import org.yaml.snakeyaml.LoaderOptions; + /** * A YAML based content implementation using Jackson. */ -public class YamlXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class YamlXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(yamlXContent); } @@ -68,9 +70,18 @@ public static XContentBuilder contentBuilder() throws IOException { public static final YamlXContent yamlXContent; static { - yamlFactory = new YAMLFactory(); + final LoaderOptions loaderOptions = new LoaderOptions(); + loaderOptions.setCodePointLimit(DEFAULT_CODEPOINT_LIMIT); + yamlFactory = new YAMLFactoryBuilder(new YAMLFactory()).loaderOptions(loaderOptions).build(); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - yamlFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + yamlFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + yamlFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); yamlFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); yamlXContent = new YamlXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentGenerator.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentGenerator.java index 5dd85afdb9c01..ddc9f5365c22b 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentGenerator.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentGenerator.java @@ -33,6 +33,7 @@ package org.opensearch.common.xcontent.yaml; import com.fasterxml.jackson.core.JsonGenerator; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentGenerator; diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentParser.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentParser.java index 3088852f610e8..5fabe58cca919 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentParser.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContentParser.java @@ -33,10 +33,11 @@ package org.opensearch.common.xcontent.yaml; import com.fasterxml.jackson.core.JsonParser; -import org.opensearch.core.xcontent.DeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; + import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContentParser; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; public class YamlXContentParser extends JsonXContentParser { diff --git a/libs/x-content/src/main/resources/META-INF/services/org.opensearch.core.xcontent.spi.MediaTypeProvider b/libs/x-content/src/main/resources/META-INF/services/org.opensearch.core.xcontent.spi.MediaTypeProvider new file mode 100644 index 0000000000000..ce3fab93087dd --- /dev/null +++ b/libs/x-content/src/main/resources/META-INF/services/org.opensearch.core.xcontent.spi.MediaTypeProvider @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +org.opensearch.common.xcontent.spi.XContentProvider diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/ConstructingObjectParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/ConstructingObjectParserTests.java index 6f986457898bf..d27289d3b1359 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/ConstructingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/ConstructingObjectParserTests.java @@ -34,9 +34,9 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; import org.opensearch.common.xcontent.ObjectParserTests.NamedObject; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.ObjectParser; diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/InstantiatingObjectParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/InstantiatingObjectParserTests.java index f7311c7defb7e..4e509951462b2 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/InstantiatingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/InstantiatingObjectParserTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent; -import org.opensearch.core.ParseField; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.InstantiatingObjectParser; import org.opensearch.core.xcontent.ParserConstructor; import org.opensearch.core.xcontent.XContentParser; diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java index 15492b7351984..64d36f0a8b78f 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java @@ -32,7 +32,7 @@ package org.opensearch.common.xcontent; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; @@ -46,40 +46,37 @@ public class MediaTypeParserTests extends OpenSearchTestCase { public void testJsonWithParameters() throws Exception { String mediaType = "application/json"; - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap())); - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8"))); assertThat( - MediaTypeParserRegistry.parseMediaType(mediaType + "; charset=UTF-8").getParameters(), - equalTo(Map.of("charset", "utf-8")) - ); - assertThat( - MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(), + MediaTypeRegistry.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8", "custom", "123")) ); } public void testWhiteSpaceInTypeSubtype() { String mediaType = " application/json "; - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON)); assertThat( - MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(), + MediaTypeRegistry.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8", "custom", "123")) ); assertThat( - MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(), + MediaTypeRegistry.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8", "custom", "123")) ); mediaType = " application / json "; - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType), is(nullValue())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType), is(nullValue())); } public void testInvalidParameters() { String mediaType = "application/json"; - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue())); - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; key = value"), is(nullValue())); - assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; key="), is(nullValue())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; key = value"), is(nullValue())); + assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; key="), is(nullValue())); } } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/ObjectParserTests.java index cd59bf59fe15d..5f87a17007fb5 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/ObjectParserTests.java @@ -32,19 +32,19 @@ package org.opensearch.common.xcontent; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; -import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ContextParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ObjectParser; +import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; +import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayOutputStream; @@ -395,7 +395,7 @@ public void testAllVariants() throws IOException { double expectedNullableDouble; int expectedNullableInt; - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); builder.field("int_field", randomBoolean() ? "1" : 1); if (randomBoolean()) { @@ -449,7 +449,7 @@ public void testAllVariants() throws IOException { } builder.field("string_or_null", nullValue ? null : "5"); builder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder)); + XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString()); class TestStruct { int int_field; int nullableIntField; @@ -647,7 +647,7 @@ public void testParseNamedObjectsInOrderNotSupported() throws IOException { } public void testIgnoreUnknownFields() throws IOException { - XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder b = MediaTypeRegistry.JSON.contentBuilder(); b.startObject(); { b.field("test", "foo"); @@ -669,7 +669,7 @@ class TestStruct { } public void testIgnoreUnknownObjects() throws IOException { - XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder b = MediaTypeRegistry.JSON.contentBuilder(); b.startObject(); { b.field("test", "foo"); @@ -695,7 +695,7 @@ class TestStruct { } public void testIgnoreUnknownArrays() throws IOException { - XContentBuilder b = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder b = MediaTypeRegistry.JSON.contentBuilder(); b.startObject(); { b.field("test", "foo"); diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/SimpleStruct.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/SimpleStruct.java index a4aca80918284..1d2a66ea1f78f 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/SimpleStruct.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/SimpleStruct.java @@ -33,7 +33,7 @@ package org.opensearch.common.xcontent; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 3552514af0aa8..81a2b0e290121 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -37,11 +37,11 @@ import com.fasterxml.jackson.dataformat.yaml.JacksonYAMLParseException; import org.opensearch.common.CheckedSupplier; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; +import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; @@ -49,16 +49,20 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Supplier; +import java.util.zip.GZIPInputStream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -68,6 +72,7 @@ import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assume.assumeThat; import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; public class XContentParserTests extends OpenSearchTestCase { @@ -80,7 +85,8 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_STRING_LEN), /* YAML parser limitation */ XContentType.YAML, - () -> randomAlphaOfLengthBetween(1, 3140000) + /* use 75% of the limit, difficult to get the exact size of the content right */ + () -> randomRealisticUnicodeOfCodepointLengthBetween(1, (int) (YamlXContent.DEFAULT_CODEPOINT_LIMIT * 0.75)) ); private static final Map<XContentType, Supplier<String>> OFF_LIMIT_GENERATORS = Map.of( @@ -92,7 +98,51 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomAlphaOfLength(SmileXContent.DEFAULT_MAX_STRING_LEN + 1), /* YAML parser limitation */ XContentType.YAML, - () -> randomRealisticUnicodeOfCodepointLength(3145730) + () -> randomRealisticUnicodeOfCodepointLength(YamlXContent.DEFAULT_CODEPOINT_LIMIT + 1) + ); + + private static final Map<XContentType, Supplier<String>> FIELD_NAME_GENERATORS = Map.of( + XContentType.JSON, + () -> randomAlphaOfLengthBetween(1, JsonXContent.DEFAULT_MAX_NAME_LEN), + XContentType.CBOR, + () -> randomAlphaOfLengthBetween(1, CborXContent.DEFAULT_MAX_NAME_LEN), + XContentType.SMILE, + () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_NAME_LEN), + XContentType.YAML, + () -> randomAlphaOfLengthBetween(1, YamlXContent.DEFAULT_MAX_NAME_LEN) + ); + + private static final Map<XContentType, Supplier<String>> FIELD_NAME_OFF_LIMIT_GENERATORS = Map.of( + XContentType.JSON, + () -> randomAlphaOfLength(JsonXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.CBOR, + () -> randomAlphaOfLength(CborXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.SMILE, + () -> randomAlphaOfLength(SmileXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.YAML, + () -> randomAlphaOfLength(YamlXContent.DEFAULT_MAX_NAME_LEN + 1) + ); + + private static final Map<XContentType, Supplier<Integer>> DEPTH_GENERATORS = Map.of( + XContentType.JSON, + () -> randomIntBetween(1, JsonXContent.DEFAULT_MAX_DEPTH), + XContentType.CBOR, + () -> randomIntBetween(1, CborXContent.DEFAULT_MAX_DEPTH), + XContentType.SMILE, + () -> randomIntBetween(1, SmileXContent.DEFAULT_MAX_DEPTH), + XContentType.YAML, + () -> randomIntBetween(1, YamlXContent.DEFAULT_MAX_DEPTH) + ); + + private static final Map<XContentType, Supplier<Integer>> OFF_LIMIT_DEPTH_GENERATORS = Map.of( + XContentType.JSON, + () -> JsonXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.CBOR, + () -> CborXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.SMILE, + () -> SmileXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.YAML, + () -> YamlXContent.DEFAULT_MAX_DEPTH + 1 ); public void testStringOffLimit() throws IOException { @@ -156,6 +206,188 @@ public void testString() throws IOException { } } + public void testFieldNameOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = FIELD_NAME_OFF_LIMIT_GENERATORS.get(xContentType).get(); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + // See please https://github.com/FasterXML/jackson-dataformats-binary/issues/392, support + // for CBOR, Smile is coming + if (xContentType != XContentType.JSON) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } else { + assertThrows(StreamConstraintsException.class, () -> parser.nextToken()); + } + } + } + } + + public void testFieldName() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = FIELD_NAME_GENERATORS.get(xContentType).get(); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + } + + public void testWriteDepthOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + // Branching off YAML logic into separate test case testWriteDepthOffLimitYaml since it behaves differently + assumeThat(xContentType, not(XContentType.YAML)); + + final String field = randomAlphaOfLengthBetween(1, 5); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(xContentType).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + // The behavior here is very interesting: the generator does write the new object tag (changing the internal state) + // BUT throws the exception after the fact, this is why we have to close the object at the end. + assertThrows(StreamConstraintsException.class, () -> builder.startObject()); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + + builder.endObject(); + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.endObject(); + } + } + } + + public void testWriteDepthOffLimitYaml() throws IOException { + final String field = randomAlphaOfLengthBetween(1, 5); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.YAML.xContent())) { + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(XContentType.YAML).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + // The behavior here is very interesting: the generator does write the new object tag (changing the internal state) + // BUT throws the exception after the fact, this is why we have to close the object at the end. + assertThrows(StreamConstraintsException.class, () -> builder.startObject()); + } catch (final IllegalStateException ex) { + // YAML parser is having really hard time recovering from StreamConstraintsException, the internal + // state seems to be completely messed up and the closing cleanly seems to be not feasible. + } + } + + public void testReadDepthOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(xContentType).get() - 1; + + // Since parser and generator use the same max depth constraints, we could not generate the content with off limits, + // using precreated test files instead. + try ( + InputStream in = new GZIPInputStream( + getDataInputStream("depth-off-limit." + xContentType.name().toLowerCase(Locale.US) + ".gz") + ) + ) { + try (XContentParser parser = createParser(xContentType.xContent(), in)) { + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + } + + if (xContentType != XContentType.YAML) { + assertThrows(StreamConstraintsException.class, () -> parser.nextToken()); + } + } + } + } + + public void testDepth() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = randomAlphaOfLengthBetween(1, 5); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + final int maxDepth = DEPTH_GENERATORS.get(xContentType).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.endObject(); + } + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field + depth, parser.currentName()); + } + + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + } + + assertNull(parser.nextToken()); + } + } + } + public void testFloat() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); @@ -366,7 +598,7 @@ public void testReadBooleans() throws IOException { public void testEmptyList() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startArray("some_array").endArray().endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("some_array", parser.currentName()); @@ -388,7 +620,7 @@ public void testSimpleList() throws IOException { .endArray() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("some_array", parser.currentName()); @@ -416,7 +648,7 @@ public void testNestedList() throws IOException { .endArray() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("some_array", parser.currentName()); @@ -440,7 +672,7 @@ public void testNestedMapInList() throws IOException { .endArray() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals("some_array", parser.currentName()); @@ -516,7 +748,7 @@ public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; numberOfTokens = generateRandomObjectForMarking(builder); - String content = Strings.toString(builder); + String content = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); @@ -564,7 +796,7 @@ public void testSubParserArray() throws IOException { builder.endArray(); builder.endObject(); - String content = Strings.toString(builder); + String content = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); @@ -597,7 +829,7 @@ public void testSubParserArray() throws IOException { public void testCreateSubParserAtAWrongPlace() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); generateRandomObjectForMarking(builder); - String content = Strings.toString(builder); + String content = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); @@ -611,7 +843,7 @@ public void testCreateSubParserAtAWrongPlace() throws IOException { public void testCreateRootSubParser() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens = generateRandomObjectForMarking(builder); - String content = Strings.toString(builder); + String content = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); @@ -629,7 +861,7 @@ public void testCreateRootSubParser() throws IOException { /** * Generates a random object {"first_field": "foo", "marked_field": {...random...}, "last_field": "bar} - * + * <p> * Returns the number of tokens in the marked field */ private static int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz new file mode 100644 index 0000000000000..88de7e590e7f0 Binary files /dev/null and b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz differ diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.json.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.json.gz new file mode 100644 index 0000000000000..76274910542ac Binary files /dev/null and b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.json.gz differ diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz new file mode 100644 index 0000000000000..e248778b37253 Binary files /dev/null and b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz differ diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz new file mode 100644 index 0000000000000..3b36594482a68 Binary files /dev/null and b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz differ diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java index eaf8a3bc6fe43..7e48d659e0bbc 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java @@ -33,9 +33,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.Aggregator; @@ -43,9 +43,9 @@ import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.metrics.MetricsAggregator; +import org.opensearch.search.aggregations.support.ArrayValuesSource; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.aggregations.support.ArrayValuesSource; import java.io.IOException; import java.util.Map; diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index f7ab0db3c9607..24f74f3859157 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -89,4 +89,9 @@ protected Aggregator doCreateInternal( } return new MatrixStatsAggregator(name, typedValuesSources, searchContext, parent, multiValueMode, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsParser.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsParser.java index fc6b9725d93d6..a62033d93e640 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsParser.java @@ -34,10 +34,10 @@ import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.MultiValueMode; -import org.opensearch.search.aggregations.support.ValueType; -import org.opensearch.search.aggregations.support.ValuesSourceType; import org.opensearch.search.aggregations.support.ArrayValuesSourceAggregationBuilder; import org.opensearch.search.aggregations.support.ArrayValuesSourceParser; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; import java.util.Map; diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java index de67cc2930652..de6b59b1546a5 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java @@ -46,7 +46,7 @@ /** * Descriptive stats gathered per shard. Coordinating node computes final correlation and covariance stats * based on these descriptive stats. This single pass, parallel approach is based on: - * + * <p> * http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf */ public class RunningStats implements Writeable, Cloneable { @@ -222,7 +222,7 @@ private void updateCovariance(final String[] fieldNames, final Map<String, Doubl /** * Merges the descriptive statistics of a second data set (e.g., per shard) - * + * <p> * running computations taken from: http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf **/ public void merge(final RunningStats other) { diff --git a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java index c5dc68ff4c800..63eb312212d5d 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java @@ -31,13 +31,13 @@ package org.opensearch.search.aggregations.matrix.stats; -import org.opensearch.core.ParseField; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.ParseField; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.plugins.SearchPlugin; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.Aggregation; diff --git a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java index c159990b4eff0..5cc51d902fc19 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/opensearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/opensearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java index 4aea2a210c2b4..b0d9681339652 100644 --- a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/opensearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java +++ b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/opensearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java @@ -33,6 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 785e597857825..648536f9136a8 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -32,20 +32,36 @@ package org.opensearch.analysis.common; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.Operator; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class QueryStringWithAnalyzersIT extends OpenSearchIntegTestCase { +public class QueryStringWithAnalyzersIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public QueryStringWithAnalyzersIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactory.java index 3fff4d671200c..dc187de86ee19 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/ASCIIFoldingTokenFilterFactory.java @@ -34,8 +34,8 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; -import org.opensearch.core.ParseField; import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenFilterFactory; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/AnalysisPainlessExtension.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/AnalysisPainlessExtension.java index 8924e2d314c93..ac3f044d1f473 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/AnalysisPainlessExtension.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/AnalysisPainlessExtension.java @@ -32,9 +32,9 @@ package org.opensearch.analysis.common; -import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistLoader; +import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.script.ScriptContext; import java.util.Collections; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/BrazilianStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/BrazilianStemTokenFilterFactory.java index 83e3cff1ddeda..8fbcb3fcd8215 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/BrazilianStemTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/BrazilianStemTokenFilterFactory.java @@ -32,10 +32,10 @@ package org.opensearch.analysis.common; +import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.br.BrazilianStemFilter; import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; -import org.apache.lucene.analysis.CharArraySet; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 3fa6f0fda406d..cf2736a8583d2 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -89,6 +89,7 @@ import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; +import org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilter; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; import org.apache.lucene.analysis.miscellaneous.LengthFilter; @@ -128,10 +129,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; @@ -154,8 +155,6 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; -import org.tartarus.snowball.ext.DutchStemmer; -import org.tartarus.snowball.ext.FrenchStemmer; import java.util.ArrayList; import java.util.Collection; @@ -165,6 +164,9 @@ import java.util.TreeMap; import java.util.function.Supplier; +import org.tartarus.snowball.ext.DutchStemmer; +import org.tartarus.snowball.ext.FrenchStemmer; + import static org.opensearch.plugins.AnalysisPlugin.requiresAnalysisSettings; public class CommonAnalysisModulePlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { @@ -264,6 +266,7 @@ public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() { ); filters.put("decimal_digit", DecimalDigitFilterFactory::new); filters.put("delimited_payload", DelimitedPayloadTokenFilterFactory::new); + filters.put("delimited_term_freq", DelimitedTermFrequencyTokenFilterFactory::new); filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); @@ -391,7 +394,17 @@ public Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() { // TODO deprecate and remove in API tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); - tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("PathHierarchy", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("pattern", PatternTokenizerFactory::new); tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); tokenizers.put("whitespace", WhitespaceTokenizerFactory::new); @@ -499,6 +512,13 @@ public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() { ) ) ); + filters.add( + PreConfiguredTokenFilter.singleton( + "delimited_term_freq", + false, + input -> new DelimitedTermFrequencyTokenFilter(input, DelimitedTermFrequencyTokenFilterFactory.DEFAULT_DELIMITER) + ) + ); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { @@ -545,7 +565,7 @@ public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); - /** + /* * We disable the graph analysis on this token stream * because it produces shingles of different size. * Graph analysis on such token stream is useless and dangerous as it may create too many paths @@ -652,8 +672,17 @@ public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() { } return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); - tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); - + tokenizers.add(PreConfiguredTokenizer.openSearchVersion("PathHierarchy", (version) -> { + // TODO Remove "PathHierarchy" tokenizer name in 4.0 and throw exception + if (version.onOrAfter(Version.V_3_0_0)) { + deprecationLogger.deprecate( + "PathHierarchy_tokenizer_deprecation", + "The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [path_hierarchy] instead." + ); + } + return new PathHierarchyTokenizer(); + })); return tokenizers; } } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java new file mode 100644 index 0000000000000..8929a7c54ef4c --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilter; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AbstractTokenFilterFactory; + +public class DelimitedTermFrequencyTokenFilterFactory extends AbstractTokenFilterFactory { + public static final char DEFAULT_DELIMITER = '|'; + private static final String DELIMITER = "delimiter"; + private final char delimiter; + + DelimitedTermFrequencyTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + delimiter = parseDelimiter(settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new DelimitedTermFrequencyTokenFilter(tokenStream, delimiter); + } + + private static char parseDelimiter(Settings settings) { + String delimiter = settings.get(DELIMITER); + if (delimiter == null) { + return DEFAULT_DELIMITER; + } else if (delimiter.length() == 1) { + return delimiter.charAt(0); + } + + throw new IllegalArgumentException( + "Setting [" + DELIMITER + "] must be a single, non-null character. [" + delimiter + "] was provided." + ); + } +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DutchStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DutchStemTokenFilterFactory.java index 296740fb161ad..66e5cac331990 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DutchStemTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DutchStemTokenFilterFactory.java @@ -41,6 +41,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.Analysis; + import org.tartarus.snowball.ext.DutchStemmer; public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory { diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FingerprintAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FingerprintAnalyzerProvider.java index 0c59d11bd88a5..ae315ff0a9cfe 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FingerprintAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FingerprintAnalyzerProvider.java @@ -34,8 +34,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; -import org.opensearch.core.ParseField; import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractIndexAnalyzerProvider; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FrenchStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FrenchStemTokenFilterFactory.java index 15872d8b288c0..3aa546a79cde6 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FrenchStemTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/FrenchStemTokenFilterFactory.java @@ -41,6 +41,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.Analysis; + import org.tartarus.snowball.ext.FrenchStemmer; public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory { diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 25bf58409928e..8d29a347caeb8 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -40,12 +40,13 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.Analysis; -import org.xml.sax.InputSource; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import org.xml.sax.InputSource; + /** * Uses the {@link org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter} to decompound tokens based on hyphenation rules. * diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java index ad968aeee62cb..e9f3fd96dd69d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java @@ -49,12 +49,12 @@ * A factory for creating keyword marker token filters that prevent tokens from * being modified by stemmers. Two types of keyword marker filters are available: * the {@link SetKeywordMarkerFilter} and the {@link PatternKeywordMarkerFilter}. - * + * <p> * The {@link SetKeywordMarkerFilter} uses a set of keywords to denote which tokens * should be excluded from stemming. This filter is created if the settings include * {@code keywords}, which contains the list of keywords, or {@code `keywords_path`}, * which contains a path to a file in the config directory with the keywords. - * + * <p> * The {@link PatternKeywordMarkerFilter} uses a regular expression pattern to match * against tokens that should be excluded from stemming. This filter is created if * the settings include {@code keywords_pattern}, which contains the regular expression diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java index bd241de749f11..d6d9f8975f2fc 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java @@ -54,7 +54,7 @@ public class MappingCharFilterFactory extends AbstractCharFilterFactory implemen MappingCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name); - List<MappingRule<String, String>> rules = Analysis.parseWordList(env, settings, "mappings", this::parse, false); + List<MappingRule<String, String>> rules = Analysis.parseWordList(env, settings, "mappings", this::parse); if (rules == null) { throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchSolrSynonymParser.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchSolrSynonymParser.java index e50c37f03cd2e..e9d5d8cb25faf 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchSolrSynonymParser.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchSolrSynonymParser.java @@ -32,8 +32,8 @@ package org.opensearch.analysis.common; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.util.CharsRef; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchWordnetSynonymParser.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchWordnetSynonymParser.java index c7ae6c08411b9..9ecc38fde7e60 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchWordnetSynonymParser.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/OpenSearchWordnetSynonymParser.java @@ -32,8 +32,8 @@ package org.opensearch.analysis.common; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; import org.apache.lucene.util.CharsRef; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java index 78d151ee16c3b..04786689b50f0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java @@ -45,7 +45,7 @@ /** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. - * + * <p> * Available stemmers are listed in org.tartarus.snowball.ext. The name of a * stemmer is the part of the class name before "Stemmer", e.g., the stemmer in * {@link org.tartarus.snowball.ext.EnglishStemmer} is named "English". diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 9d22f52aa3712..5506626e40da0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -72,6 +72,9 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenFilterFactory; + +import java.io.IOException; + import org.tartarus.snowball.ext.ArmenianStemmer; import org.tartarus.snowball.ext.BasqueStemmer; import org.tartarus.snowball.ext.CatalanStemmer; @@ -97,8 +100,6 @@ import org.tartarus.snowball.ext.SwedishStemmer; import org.tartarus.snowball.ext.TurkishStemmer; -import java.io.IOException; - public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { private static final TokenStream EMPTY_TOKEN_STREAM = new EmptyTokenStream(); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/UAX29URLEmailTokenizerFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/UAX29URLEmailTokenizerFactory.java index 8d6e0ec0815b4..8d9eb4902daae 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/UAX29URLEmailTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/UAX29URLEmailTokenizerFactory.java @@ -33,8 +33,8 @@ package org.opensearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.email.UAX29URLEmailTokenizer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactory.java index 51ac3141fd465..15f4cf8cba0e0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactory.java @@ -47,6 +47,7 @@ import java.util.List; import java.util.Set; +import static org.opensearch.analysis.common.WordDelimiterTokenFilterFactory.parseTypes; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.CATENATE_ALL; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.CATENATE_NUMBERS; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.CATENATE_WORDS; @@ -57,7 +58,6 @@ import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.SPLIT_ON_NUMERICS; import static org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE; -import static org.opensearch.analysis.common.WordDelimiterTokenFilterFactory.parseTypes; public class WordDelimiterGraphTokenFilterFactory extends AbstractTokenFilterFactory { private final byte[] charTypeTable; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java index 94c7d63f2bee7..f37d5862b9d3f 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java @@ -211,8 +211,8 @@ private void createTokenFilterFactoryWithTypeTable(String[] rules) throws IOExce } public void testTypeTableParsingError() { - String[] rules = { "# This is a comment", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; + String[] rules = { "# This is a comment", "# => ALPHANUM", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; RuntimeException ex = expectThrows(RuntimeException.class, () -> createTokenFilterFactoryWithTypeTable(rules)); - assertEquals("Line [4]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); + assertEquals("Line [5]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CharGroupTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CharGroupTokenizerFactoryTests.java index 95886cda8b025..459f463c3ee28 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CharGroupTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CharGroupTokenizerFactoryTests.java @@ -33,13 +33,14 @@ package org.opensearch.analysis.common; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.util.CharTokenizer; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import java.io.IOException; import java.io.Reader; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 1c4db089565ff..11713f52f5b18 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -145,6 +145,7 @@ protected Map<String, Class<?>> getTokenFilters() { filters.put("cjkwidth", CJKWidthFilterFactory.class); filters.put("cjkbigram", CJKBigramFilterFactory.class); filters.put("delimitedpayload", DelimitedPayloadTokenFilterFactory.class); + filters.put("delimitedtermfrequency", DelimitedTermFrequencyTokenFilterFactory.class); filters.put("keepword", KeepWordFilterFactory.class); filters.put("type", KeepTypesFilterFactory.class); filters.put("classic", ClassicFilterFactory.class); @@ -202,6 +203,7 @@ protected Map<String, Class<?>> getPreConfiguredTokenFilters() { filters.put("decimal_digit", null); filters.put("delimited_payload_filter", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class); filters.put("delimited_payload", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class); + filters.put("delimited_term_freq", org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilterFactory.class); filters.put("dutch_stem", SnowballPorterFilterFactory.class); filters.put("edge_ngram", null); filters.put("edgeNGram", null); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index 32556db3939b8..a681d9a104ecf 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -47,8 +47,8 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.plugins.AnalysisPlugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; import java.io.IOException; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java index 1a78690dffcf7..40270e9fddcac 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ConcatenateGraphTokenFilterFactoryTests.java @@ -8,11 +8,11 @@ package org.opensearch.analysis.common; -import org.apache.lucene.tests.analysis.CannedTokenStream; -import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.tests.analysis.CannedTokenStream; +import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java new file mode 100644 index 0000000000000..fab83a75387de --- /dev/null +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.analysis.AnalysisTestsHelper; +import org.opensearch.index.analysis.TokenFilterFactory; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.OpenSearchTokenStreamTestCase; + +import java.io.StringReader; + +public class DelimitedTermFrequencyTokenFilterFactoryTests extends OpenSearchTokenStreamTestCase { + + public void testDefault() throws Exception { + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .build(), + new CommonAnalysisModulePlugin() + ); + doTest(analysis, "cat|4 dog|5"); + } + + public void testDelimiter() throws Exception { + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .put("index.analysis.filter.my_delimited_term_freq.delimiter", ":") + .build(), + new CommonAnalysisModulePlugin() + ); + doTest(analysis, "cat:4 dog:5"); + } + + public void testDelimiterLongerThanOneCharThrows() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .put("index.analysis.filter.my_delimited_term_freq.delimiter", "^^") + .build(), + new CommonAnalysisModulePlugin() + ) + ); + + assertEquals("Setting [delimiter] must be a single, non-null character. [^^] was provided.", ex.getMessage()); + } + + private void doTest(OpenSearchTestCase.TestAnalysis analysis, String source) throws Exception { + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_delimited_term_freq"); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader(source)); + + TokenStream stream = tokenFilter.create(tokenizer); + + CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); + TermFrequencyAttribute tfAtt = stream.getAttribute(TermFrequencyAttribute.class); + stream.reset(); + assertTermEquals("cat", stream, termAtt, tfAtt, 4); + assertTermEquals("dog", stream, termAtt, tfAtt, 5); + assertFalse(stream.incrementToken()); + stream.end(); + stream.close(); + } + + void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt, TermFrequencyAttribute tfAtt, int expectedTf) + throws Exception { + assertTrue(stream.incrementToken()); + assertEquals(expected, termAtt.toString()); + assertEquals(expectedTf, tfAtt.getTermFrequency()); + } +} diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java index 9bfc3a77e8c44..738c81c13cb6c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DisableGraphQueryTests.java @@ -33,13 +33,13 @@ package org.opensearch.analysis.common; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.PhraseQuery; -import org.apache.lucene.search.MultiPhraseQuery; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexService; import org.opensearch.index.query.MatchPhraseQueryBuilder; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java index 6ac5f42d2a66f..7681a3f6626e3 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/EdgeNGramTokenizerTests.java @@ -36,15 +36,15 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java index d26949c30f203..5e8365409a725 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/FlattenGraphTokenFilterFactoryTests.java @@ -32,14 +32,14 @@ package org.opensearch.analysis.common; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.tests.analysis.CannedTokenStream; import org.apache.lucene.tests.analysis.Token; -import org.apache.lucene.analysis.TokenStream; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import java.io.IOException; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 80ca894d1a5bc..e55c1c69b2e40 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -32,16 +32,18 @@ package org.opensearch.analysis.common; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.Operator; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -55,16 +57,30 @@ import static org.opensearch.index.query.QueryBuilders.matchPhraseQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.highlight; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHighlight; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class HighlighterWithAnalyzersTests extends OpenSearchIntegTestCase { +public class HighlighterWithAnalyzersTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public HighlighterWithAnalyzersTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CommonAnalysisModulePlugin.class); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java index 387eb4a377007..28e041ac8c92d 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java @@ -37,6 +37,7 @@ public static CharFilterFactory create(String... rules) throws IOException { public void testRulesOk() throws IOException { MappingCharFilterFactory mappingCharFilterFactory = (MappingCharFilterFactory) create( + "# This is a comment", "# => _hashtag_", ":) => _happy_", ":( => _sad_" @@ -64,7 +65,10 @@ public void testRuleError() { } public void testRulePartError() { - RuntimeException ex = expectThrows(RuntimeException.class, () -> create("# => _hashtag_", ":) => _happy_", "a:b")); - assertEquals("Line [3]: Invalid mapping rule : [a:b]", ex.getMessage()); + RuntimeException ex = expectThrows( + RuntimeException.class, + () -> create("# This is a comment", "# => _hashtag_", ":) => _happy_", "a:b") + ); + assertEquals("Line [4]: Invalid mapping rule : [a:b]", ex.getMessage()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java index e9dfa299871e5..3264d1cbdc10c 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MultiplexerTokenFilterTests.java @@ -41,8 +41,8 @@ import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import java.io.IOException; import java.util.Collections; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenizerFactoryTests.java index 6e4dc558fed99..21b65059cf688 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/NGramTokenizerFactoryTests.java @@ -32,27 +32,27 @@ package org.opensearch.analysis.common; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; import java.util.Arrays; -import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; import static org.hamcrest.Matchers.instanceOf; +import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; public class NGramTokenizerFactoryTests extends OpenSearchTokenStreamTestCase { public void testParseTokenChars() { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java index 12e428be6c821..555d6c78b6ec5 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PathHierarchyTokenizerFactoryTests.java @@ -35,16 +35,61 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Tokenizer; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.test.OpenSearchTokenStreamTestCase; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; +import org.opensearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; +import java.util.Collections; public class PathHierarchyTokenizerFactoryTests extends OpenSearchTokenStreamTestCase { + private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(new CommonAnalysisModulePlugin())) + .getAnalysisRegistry() + .build(idxSettings); + } + + /** + * Test that deprecated "PathHierarchy" tokenizer name is still available via {@link CommonAnalysisModulePlugin} starting in 3.x. + */ + public void testPreConfiguredTokenizer() throws IOException { + + { + try ( + IndexAnalyzers indexAnalyzers = buildAnalyzers( + VersionUtils.randomVersionBetween(random(), Version.V_3_0_0, Version.CURRENT), + "PathHierarchy" + ) + ) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertTokenStreamContents(analyzer.tokenStream("dummy", "/a/b/c"), new String[] { "/a", "/a/b", "/a/b/c" }); + // Once LUCENE-12750 is fixed we can use the following testing method instead. + // Similar testing approach has been used for deprecation of (Edge)NGrams tokenizers as well. + // assertAnalyzesTo(analyzer, "/a/b/c", new String[] { "/a", "/a/b", "/a/b/c" }); + + } + } + } + public void testDefaults() throws IOException { final Index index = new Index("test", "_na_"); final Settings indexSettings = newAnalysisSettingsBuilder().build(); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java index a3dc75fd37671..2398240b82967 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PatternCaptureTokenFilterTests.java @@ -39,8 +39,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import static org.opensearch.test.OpenSearchTestCase.createTestAnalysis; import static org.hamcrest.Matchers.containsString; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java index b31f4020ef627..d88b5bc93c28f 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -44,8 +44,8 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptService; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import java.io.IOException; import java.util.Collections; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ScriptedConditionTokenFilterTests.java index 6f8a182ab45fc..171304a054d83 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ScriptedConditionTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -44,8 +44,8 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptService; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import java.util.Collections; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java index 18d3727475065..de30d4f1c03c0 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -50,9 +50,9 @@ import java.io.IOException; import java.io.StringReader; -import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.instanceOf; +import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; public class StemmerTokenFilterFactoryTests extends OpenSearchTokenStreamTestCase { diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java index 733004cafcadc..8c8b8ac7f61c0 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/SynonymsAnalysisTests.java @@ -33,10 +33,10 @@ package org.opensearch.analysis.common; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -46,8 +46,8 @@ import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.MatcherAssert; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/UniqueTokenFilterTests.java index a321fd4a5879c..ba8de4db07396 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/UniqueTokenFilterTests.java @@ -33,10 +33,10 @@ package org.opensearch.analysis.common; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WhitespaceTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WhitespaceTokenizerFactoryTests.java index 70f6269ad0d04..0d4db04141e45 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WhitespaceTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WhitespaceTokenizerFactoryTests.java @@ -40,8 +40,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.Reader; diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 7a717fe7fe22e..51ea7d200dc5d 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -44,8 +44,8 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.StringReader; diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 56ed2175df60a..179de835a4105 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -298,6 +298,9 @@ --- "path_hierarchy": + - skip: + features: "allowed_warnings" + - do: indices.analyze: body: @@ -312,6 +315,8 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" @@ -337,11 +342,13 @@ - match: { detail.tokenizer.tokens.2.token: a/b/c } - do: + allowed_warnings: + - 'The [PathHierarchy] tokenizer name is deprecated and will be removed in a future version. Please change the tokenizer name to [path_hierarchy] instead.' indices.analyze: body: text: "a/b/c" explain: true - tokenizer: PathHierarchy + tokenizer: PathHierarchy - length: { detail.tokenizer.tokens: 3 } - match: { detail.tokenizer.name: PathHierarchy } - match: { detail.tokenizer.tokens.0.token: a } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 40c82ff185661..802c79c780689 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -127,6 +127,69 @@ - match: { tokens.2.token: brown } - match: { tokens.3.token: fox } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "word_delimiter_graph": - do: @@ -231,6 +294,69 @@ - match: { detail.tokenfilters.0.tokens.5.end_offset: 19 } - match: { detail.tokenfilters.0.tokens.5.position: 5 } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "unique": - do: @@ -1198,6 +1324,46 @@ - match: { tokens.0.token: foo } --- +"delimited_term_freq": + - skip: + version: " - 2.9.99" + reason: "delimited_term_freq token filter was added in v2.10.0" + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_delimited_term_freq: + type: delimited_term_freq + delimiter: ^ + - do: + indices.analyze: + index: test + body: + text: foo^3 + tokenizer: keyword + filter: [my_delimited_term_freq] + attributes: termFrequency + explain: true + - length: { detail.tokenfilters: 1 } + - match: { detail.tokenfilters.0.tokens.0.token: foo } + - match: { detail.tokenfilters.0.tokens.0.termFrequency: 3 } + + # Test pre-configured token filter too: + - do: + indices.analyze: + body: + text: foo|100 + tokenizer: keyword + filter: [delimited_term_freq] + attributes: termFrequency + explain: true + - length: { detail.tokenfilters: 1 } + - match: { detail.tokenfilters.0.tokens.0.token: foo } + - match: { detail.tokenfilters.0.tokens.0.termFrequency: 100 } +--- "keep_filter": - do: indices.create: diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml index 0078575ae8e57..5e266c10cba8f 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml @@ -69,6 +69,7 @@ char_filter: - type: mapping mappings: + - "# This is a comment" - "# => _hashsign_" - "@ => _atsign_" - length: { tokens: 3 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml new file mode 100644 index 0000000000000..140d70414a4a7 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml @@ -0,0 +1,70 @@ +# integration tests for queries with specific analysis chains + +"match query with stacked stems": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + # Tests the match query stemmed tokens are "stacked" on top of the unstemmed + # versions in the same position. + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + analysis: + analyzer: + index: + tokenizer: standard + filter: [lowercase] + search: + rest_total_hits_as_int: true + tokenizer: standard + filter: [lowercase, keyword_repeat, porter_stem, unique_stem] + filter: + unique_stem: + type: unique + only_on_same_position: true + mappings: + properties: + text: + type: match_only_text + analyzer: index + search_analyzer: search + + - do: + index: + index: test + id: 1 + body: { "text": "the fox runs across the street" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + - match: {hits.total: 1} + + - do: + index: + index: test + id: 2 + body: { "text": "run fox run" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + - match: {hits.total: 2} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml new file mode 100644 index 0000000000000..a5da3043f19b5 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml @@ -0,0 +1,144 @@ +"ngram search": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + analyzer: + my_analyzer: + tokenizer: standard + filter: [my_ngram] + filter: + my_ngram: + type: ngram + min: 2, + max: 2 + mappings: + properties: + text: + type: match_only_text + analyzer: my_analyzer + + - do: + index: + index: test + id: 1 + body: { "text": "foo bar baz" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: foa + - match: {hits.total: 1} + +--- +"testNGramCopyField": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + max_ngram_diff: 9 + analysis: + analyzer: + my_ngram_analyzer: + tokenizer: my_ngram_tokenizer + tokenizer: + my_ngram_tokenizer: + type: ngram + min: 1, + max: 10 + token_chars: [] + mappings: + properties: + origin: + type: match_only_text + copy_to: meta + meta: + type: match_only_text + analyzer: my_ngram_analyzer + + - do: + index: + index: test + id: 1 + body: { "origin": "C.A1234.5678" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: 1234 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: 1234.56 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: A1234 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + term: + meta: + value: a1234 + - match: {hits.total: 0} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: A1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: a1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml new file mode 100644 index 0000000000000..accf5d975d57f --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml @@ -0,0 +1,137 @@ +"ngram highlighting": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.max_ngram_diff: 19 + analysis: + tokenizer: + my_ngramt: + type: ngram + min_gram: 1 + max_gram: 20 + token_chars: letter,digit + filter: + my_ngram: + type: ngram + min_gram: 1 + max_gram: 20 + analyzer: + name2_index_analyzer: + tokenizer: whitespace + filter: [my_ngram] + name_index_analyzer: + tokenizer: my_ngramt + name_search_analyzer: + tokenizer: whitespace + mappings: + properties: + name: + type: match_only_text + term_vector: with_positions_offsets + analyzer: name_index_analyzer + search_analyzer: name_search_analyzer + name2: + type: match_only_text + term_vector: with_positions_offsets + analyzer: name2_index_analyzer + search_analyzer: name_search_analyzer + + - do: + index: + index: test + id: 1 + refresh: true + body: + name: logicacmg ehemals avinci - the know how company + name2: logicacmg ehemals avinci - the know how company + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica m + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica ma + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "<em>logica</em>cmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica m + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica ma + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "<em>logicacmg</em> ehemals avinci - the know how company"} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml new file mode 100644 index 0000000000000..717d3a7dd8a3e --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml @@ -0,0 +1,59 @@ +--- +"Test query string with snowball": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + mappings: + properties: + field: + type: match_only_text + number: + type: integer + + - do: + index: + index: test + id: 1 + body: { field: foo bar} + + - do: + indices.refresh: + index: [test] + + - do: + indices.validate_query: + index: test + q: field:bars + analyzer: snowball + + - is_true: valid + + - do: + search: + rest_total_hits_as_int: true + index: test + q: field:bars + analyzer: snowball + + - match: {hits.total: 1} + + - do: + explain: + index: test + id: 1 + q: field:bars + analyzer: snowball + + - is_true: matched + + - do: + count: + index: test + q: field:bars + analyzer: snowball + + - match: {count : 1} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml new file mode 100644 index 0000000000000..cd2d2e42c6a17 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml @@ -0,0 +1,42 @@ +--- +"Test default search analyzer is applied": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + index.analysis.analyzer.default.type: simple + index.analysis.analyzer.default_search.type: german + mappings: + properties: + body: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + body: Ich lese die Bücher + + - do: + indices.refresh: + index: [ test ] + + - do: + search: + index: test + q: "body:Bücher" + + - match: { hits.total.value: 0 } + + - do: + search: + index: test + q: "body:Bücher" + analyzer: simple + + - match: { hits.total.value: 1 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml new file mode 100644 index 0000000000000..0c537dd42d583 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml @@ -0,0 +1,348 @@ +--- +"Test common terms query with stacked tokens": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + features: "allowed_warnings" + + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + syns: + type: synonym + synonyms: [ "quick,fast" ] + analyzer: + syns: + tokenizer: standard + filter: [ "syns" ] + mappings: + properties: + field1: + type: match_only_text + analyzer: syns + field2: + type: match_only_text + analyzer: syns + + - do: + index: + index: test + id: 3 + body: + field1: quick lazy huge brown pidgin + field2: the quick lazy huge brown fox jumps over the tree + + - do: + index: + index: test + id: 1 + body: + field1: the quick brown fox + + - do: + index: + index: test + id: 2 + body: + field1: the quick lazy huge brown fox jumps over the tree + refresh: true + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + low_freq_operator: or + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + low_freq_operator: and + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast huge fox + minimum_should_match: + low_freq: 3 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + minimum_should_match: + high_freq: 5 + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "1" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + minimum_should_match: + high_freq: 6 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the quick brown + cutoff_frequency: 3 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + operator: and + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + operator: or + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + minimum_should_match: 3 + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + multi_match: + query: the fast brown + fields: [ "field1", "field2" ] + cutoff_frequency: 3 + operator: and + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.2._id: "2" } + +--- +"Test match query with synonyms - see #3881 for extensive description of the issue": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + synonym: + type: synonym + synonyms: [ "quick,fast" ] + analyzer: + index: + type: custom + tokenizer: standard + filter: lowercase + search: + rest_total_hits_as_int: true + type: custom + tokenizer: standard + filter: [ lowercase, synonym ] + mappings: + properties: + text: + type: match_only_text + analyzer: index + search_analyzer: search + + - do: + index: + index: test + id: 1 + body: + text: quick brown fox + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick + operator: and + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick brown + operator: and + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fast + operator: and + - match: { hits.total: 1 } + + - do: + index: + index: test + id: 2 + body: + text: fast brown fox + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick + operator: and + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick brown + operator: and + - match: { hits.total: 2 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml new file mode 100644 index 0000000000000..d3f5d0fe4f8b4 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml @@ -0,0 +1,209 @@ +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + index: + number_of_shards: 1 # keep scoring stable + analysis: + filter: + syns: + type: synonym + synonyms: [ "wtf, what the fudge", "foo, bar baz" ] + graph_syns: + type: synonym_graph + synonyms: [ "wtf, what the fudge", "foo, bar baz" ] + analyzer: + lower_syns: + type: custom + tokenizer: standard + filter: [ lowercase, syns ] + lower_graph_syns: + type: custom + tokenizer: standard + filter: [ lowercase, graph_syns ] + mappings: + properties: + field: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + text: say wtf happened foo + - do: + index: + index: test + id: 2 + body: + text: bar baz what the fudge man + + - do: + index: + index: test + id: 3 + body: + text: wtf + + - do: + index: + index: test + id: 4 + body: + text: what is the name for fudge + + - do: + index: + index: test + id: 5 + body: + text: bar two three + + - do: + index: + index: test + id: 6 + body: + text: bar baz two three + refresh: true + +--- +"simple multiterm phrase": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase: + text: + query: foo two three + analyzer: lower_syns + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "5" } # incorrect match because we're not using graph synonyms + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase: + text: + query: foo two three + analyzer: lower_graph_syns + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "6" } # correct match because we're using graph synonyms + +--- +"simple multiterm and": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: say what the fudge + analyzer: lower_syns + operator: and + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } # non-graph synonyms coincidentally give us the correct answer here + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: say what the fudge + analyzer: lower_graph_syns + operator: and + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + +--- +"minimum should match": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: three what the fudge foo + operator: or + analyzer: lower_graph_syns + auto_generate_synonyms_phrase_query: false + - match: { hits.total: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: three what the fudge foo + operator: or + analyzer: lower_graph_syns + minimum_should_match: 80% + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "6" } + - match: { hits.hits.2._id: "1" } + +--- +"multiterm synonyms phrase": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: wtf + operator: and + analyzer: lower_graph_syns + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + +--- +"phrase prefix": + - do: + index: + index: test + id: 7 + body: + text: "WTFD!" + + - do: + index: + index: test + id: 8 + body: + text: "Weird Al's WHAT THE FUDGESICLE" + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase_prefix: + text: + query: wtf + analyzer: lower_graph_syns + - match: { hits.total: 5 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "7" } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.3._id: "8" } + - match: { hits.hits.4._id: "2" } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml new file mode 100644 index 0000000000000..8334ca27ff274 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml @@ -0,0 +1,67 @@ +# integration tests for intervals queries using analyzers +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + analyzer: standard + text_en: + type: match_only_text + analyzer: english + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "4"}}' + - '{"text" : "Outside it is cold and wet and raining cats and dogs", + "text_en" : "Outside it is cold and wet and raining cats and dogs"}' + +--- +"Test use_field": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + catch: bad_request + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cats + - match: + query: dog + max_gaps: 1 + - match: { status: 400 } + - match: { error.type: "search_phase_execution_exception"} + - match: { error.reason: "all shards failed"} + - do: + catch: bad_request + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cats + - match: + query: dog + use_field: text_en + max_gaps: 1 + - match: { status: 400 } + - match: { error.type: "search_phase_execution_exception"} + - match: { error.reason: "all shards failed"} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml new file mode 100644 index 0000000000000..90596ca04205c --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml @@ -0,0 +1,238 @@ +# Integration tests for the phrase suggester with a few analyzers + +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + analysis: + analyzer: + body: + tokenizer: standard + filter: [lowercase] + bigram: + tokenizer: standard + filter: [lowercase, bigram] + ngram: + tokenizer: standard + filter: [lowercase, ngram] + reverse: + tokenizer: standard + filter: [lowercase, reverse] + filter: + bigram: + type: shingle + output_unigrams: false + min_shingle_size: 2 + max_shingle_size: 2 + ngram: + type: shingle + output_unigrams: true + min_shingle_size: 2 + max_shingle_size: 2 + mappings: + properties: + body: + type: match_only_text + analyzer: body + fields: + bigram: + type: match_only_text + analyzer: bigram + ngram: + type: match_only_text + analyzer: ngram + reverse: + type: match_only_text + analyzer: reverse + + - do: + bulk: + index: test + refresh: true + body: | + { "index": {} } + { "body": "Xorr the God-Jewel" } + { "index": {} } + { "body": "Xorn" } + { "index": {} } + { "body": "Arthur, King of the Britons" } + { "index": {} } + { "body": "Sir Lancelot the Brave" } + { "index": {} } + { "body": "Patsy, Arthur's Servant" } + { "index": {} } + { "body": "Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot" } + { "index": {} } + { "body": "Sir Bedevere the Wise" } + { "index": {} } + { "body": "Sir Galahad the Pure" } + { "index": {} } + { "body": "Miss Islington, the Witch" } + { "index": {} } + { "body": "Zoot" } + { "index": {} } + { "body": "Leader of Robin's Minstrels" } + { "index": {} } + { "body": "Old Crone" } + { "index": {} } + { "body": "Frank, the Historian" } + { "index": {} } + { "body": "Frank's Wife" } + { "index": {} } + { "body": "Dr. Piglet" } + { "index": {} } + { "body": "Dr. Winston" } + { "index": {} } + { "body": "Sir Robin (Stand-in)" } + { "index": {} } + { "body": "Knight Who Says Ni" } + { "index": {} } + { "body": "Police sergeant who stops the film" } + +--- +"sorts by score": + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.ngram + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body.ngram + min_word_length: 1 + suggest_mode: always + + - match: {suggest.test.0.options.0.text: xorr the god jewel} + - match: {suggest.test.0.options.1.text: xorn the god jewel} + +--- +"breaks ties by sorting terms": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + # This runs the suggester without bigrams so we can be sure of the sort order + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body + analyzer: body + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body + min_word_length: 1 + suggest_mode: always + + # The scores are identical but xorn comes first because it sorts first + - match: {suggest.test.0.options.0.text: xorn the god jewel} + - match: {suggest.test.0.options.1.text: xorr the god jewel} + - match: {suggest.test.0.options.0.score: $body.suggest.test.0.options.0.score} + +--- +"fails when asked to run on a field without unigrams": + - do: + catch: /since it doesn't emit unigrams/ + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + + - do: + catch: /since it doesn't emit unigrams/ + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + analyzer: bigram + +--- +"doesn't fail when asked to run on a field without unigrams when force_unigrams=false": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + force_unigrams: false + + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + analyzer: bigram + force_unigrams: false + +--- +"reverse suggestions": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: Artur, Ging of the Britons + test: + phrase: + field: body.ngram + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body.reverse + min_word_length: 1 + suggest_mode: always + pre_filter: reverse + post_filter: reverse + + - match: {suggest.test.0.options.0.text: arthur king of the britons} diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle new file mode 100644 index 0000000000000..98cdec83b9ad1 --- /dev/null +++ b/modules/cache-common/build.gradle @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'Module for caches which are optional and do not require additional security permission' + classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java new file mode 100644 index 0000000000000..568ac4d188c51 --- /dev/null +++ b/modules/cache-common/src/internalClusterTest/java/org.opensearch.cache.common.tier/TieredSpilloverCacheIT.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; +import org.opensearch.client.Client; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.time.ZoneId; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.greaterThan; + +public class TieredSpilloverCacheIT extends OpenSearchIntegTestCase { + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(TieredSpilloverCachePlugin.class, MockDiskCachePlugin.class); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .build(); + } + + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream() + .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.common" + ".tier.TieredSpilloverCachePlugin")) + ); + } + + public void testSanityChecksWithIndicesRequestCache() throws InterruptedException { + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("f", "type=date") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true).build()) + .get() + ); + indexRandom( + true, + client.prepareIndex("index").setSource("f", "2014-03-10T00:00:00.000Z"), + client.prepareIndex("index").setSource("f", "2014-05-13T00:00:00.000Z") + ); + ensureSearchable("index"); + + // This is not a random example: serialization with time zones writes shared strings + // which used to not work well with the query cache because of the handles stream output + // see #9500 + final SearchResponse r1 = client.prepareSearch("index") + .setSize(0) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + dateHistogram("histo").field("f") + .timeZone(ZoneId.of("+01:00")) + .minDocCount(0) + .dateHistogramInterval(DateHistogramInterval.MONTH) + ) + .get(); + assertSearchResponse(r1); + + // The cached is actually used + assertThat( + client.admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), + greaterThan(0L) + ); + } + + public static class MockDiskCachePlugin extends Plugin implements CachePlugin { + + public MockDiskCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of(MockDiskCache.MockDiskCacheFactory.NAME, new MockDiskCache.MockDiskCacheFactory(0, 1000)); + } + + @Override + public String getName() { + return "mock_disk_plugin"; + } + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java new file mode 100644 index 0000000000000..7b64a7e93fe27 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -0,0 +1,335 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.iterable.Iterables; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; + +/** + * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap + * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, + * then items are eventually evicted from it and removed which will result in cache miss. + * + * @param <K> Type of key + * @param <V> Type of value + * + * @opensearch.experimental + */ +@ExperimentalApi +public class TieredSpilloverCache<K, V> implements ICache<K, V> { + + private final ICache<K, V> diskCache; + private final ICache<K, V> onHeapCache; + private final RemovalListener<K, V> removalListener; + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); + ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); + /** + * Maintains caching tiers in ascending order of cache latency. + */ + private final List<ICache<K, V>> cacheList; + + TieredSpilloverCache(Builder<K, V> builder) { + Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null"); + Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null"); + this.removalListener = Objects.requireNonNull(builder.removalListener, "Removal listener can't be null"); + + this.onHeapCache = builder.onHeapCacheFactory.create( + new CacheConfig.Builder<K, V>().setRemovalListener(new RemovalListener<K, V>() { + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + try (ReleasableLock ignore = writeLock.acquire()) { + diskCache.put(notification.getKey(), notification.getValue()); + } + removalListener.onRemoval(notification); + } + }) + .setKeyType(builder.cacheConfig.getKeyType()) + .setValueType(builder.cacheConfig.getValueType()) + .setSettings(builder.cacheConfig.getSettings()) + .setWeigher(builder.cacheConfig.getWeigher()) + .build(), + builder.cacheType, + builder.cacheFactories + + ); + this.diskCache = builder.diskCacheFactory.create(builder.cacheConfig, builder.cacheType, builder.cacheFactories); + this.cacheList = Arrays.asList(onHeapCache, diskCache); + } + + // Package private for testing + ICache<K, V> getOnHeapCache() { + return onHeapCache; + } + + // Package private for testing + ICache<K, V> getDiskCache() { + return diskCache; + } + + @Override + public V get(K key) { + return getValueFromTieredCache().apply(key); + } + + @Override + public void put(K key, V value) { + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + } + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + + V cacheValue = getValueFromTieredCache().apply(key); + if (cacheValue == null) { + // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. + // This is needed as there can be many requests for the same key at the same time and we only want to load + // the value once. + V value = null; + try (ReleasableLock ignore = writeLock.acquire()) { + value = onHeapCache.computeIfAbsent(key, loader); + } + return value; + } + return cacheValue; + } + + @Override + public void invalidate(K key) { + // We are trying to invalidate the key from all caches though it would be present in only of them. + // Doing this as we don't know where it is located. We could do a get from both and check that, but what will + // also trigger a hit/miss listener event, so ignoring it for now. + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidate(key); + } + } + } + + @Override + public void invalidateAll() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.invalidateAll(); + } + } + } + + /** + * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. + * @return An iterable over (onHeap + disk) keys + */ + @SuppressWarnings("unchecked") + @Override + public Iterable<K> keys() { + return Iterables.concat(onHeapCache.keys(), diskCache.keys()); + } + + @Override + public long count() { + long count = 0; + for (ICache<K, V> cache : cacheList) { + count += cache.count(); + } + return count; + } + + @Override + public void refresh() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + cache.refresh(); + } + } + } + + @Override + public void close() throws IOException { + for (ICache<K, V> cache : cacheList) { + cache.close(); + } + } + + private Function<K, V> getValueFromTieredCache() { + return key -> { + try (ReleasableLock ignore = readLock.acquire()) { + for (ICache<K, V> cache : cacheList) { + V value = cache.get(key); + if (value != null) { + // update hit stats + return value; + } else { + // update miss stats + } + } + } + return null; + }; + } + + /** + * Factory to create TieredSpilloverCache objects. + */ + public static class TieredSpilloverCacheFactory implements ICache.Factory { + + /** + * Defines cache name + */ + public static final String TIERED_SPILLOVER_CACHE_NAME = "tiered_spillover"; + + /** + * Default constructor + */ + public TieredSpilloverCacheFactory() {} + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Settings settings = config.getSettings(); + Setting<String> onHeapSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String onHeapCacheStoreName = onHeapSetting.get(settings); + if (!cacheFactories.containsKey(onHeapCacheStoreName)) { + throw new IllegalArgumentException( + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory onHeapCacheFactory = cacheFactories.get(onHeapCacheStoreName); + + Setting<String> onDiskSetting = TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String diskCacheStoreName = onDiskSetting.get(settings); + if (!cacheFactories.containsKey(diskCacheStoreName)) { + throw new IllegalArgumentException( + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + cacheType + ); + } + ICache.Factory diskCacheFactory = cacheFactories.get(diskCacheStoreName); + return new Builder<K, V>().setDiskCacheFactory(diskCacheFactory) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setRemovalListener(config.getRemovalListener()) + .setCacheConfig(config) + .setCacheType(cacheType) + .build(); + } + + @Override + public String getCacheName() { + return TIERED_SPILLOVER_CACHE_NAME; + } + } + + /** + * Builder object for tiered spillover cache. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> { + private ICache.Factory onHeapCacheFactory; + private ICache.Factory diskCacheFactory; + private RemovalListener<K, V> removalListener; + private CacheConfig<K, V> cacheConfig; + private CacheType cacheType; + private Map<String, ICache.Factory> cacheFactories; + + /** + * Default constructor + */ + public Builder() {} + + /** + * Set onHeap cache factory + * @param onHeapCacheFactory Factory for onHeap cache. + * @return builder + */ + public Builder<K, V> setOnHeapCacheFactory(ICache.Factory onHeapCacheFactory) { + this.onHeapCacheFactory = onHeapCacheFactory; + return this; + } + + /** + * Set disk cache factory + * @param diskCacheFactory Factory for disk cache. + * @return builder + */ + public Builder<K, V> setDiskCacheFactory(ICache.Factory diskCacheFactory) { + this.diskCacheFactory = diskCacheFactory; + return this; + } + + /** + * Set removal listener for tiered cache. + * @param removalListener Removal listener + * @return builder + */ + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + /** + * Set cache config. + * @param cacheConfig cache config. + * @return builder + */ + public Builder<K, V> setCacheConfig(CacheConfig<K, V> cacheConfig) { + this.cacheConfig = cacheConfig; + return this; + } + + /** + * Set cache type. + * @param cacheType Cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Set cache factories + * @param cacheFactories cache factories + * @return builder + */ + public Builder<K, V> setCacheFactories(Map<String, ICache.Factory> cacheFactories) { + this.cacheFactories = cacheFactories; + return this; + } + + /** + * Build tiered spillover cache. + * @return TieredSpilloverCache + */ + public TieredSpilloverCache<K, V> build() { + return new TieredSpilloverCache<>(this); + } + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java new file mode 100644 index 0000000000000..6b0620c5fbede --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCachePlugin.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Plugin for TieredSpilloverCache. + */ +public class TieredSpilloverCachePlugin extends Plugin implements CachePlugin { + + /** + * Plugin name + */ + public static final String TIERED_CACHE_SPILLOVER_PLUGIN_NAME = "tieredSpilloverCachePlugin"; + + /** + * Default constructor + */ + public TieredSpilloverCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME, + new TieredSpilloverCache.TieredSpilloverCacheFactory() + ); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (CacheType cacheType : CacheType.values()) { + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + settingList.add( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()) + ); + } + return settingList; + } + + @Override + public String getName() { + return TIERED_CACHE_SPILLOVER_PLUGIN_NAME; + } +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java new file mode 100644 index 0000000000000..50b4177f599d1 --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.settings.Setting; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to TieredSpilloverCache. + */ +public class TieredSpilloverCacheSettings { + + /** + * Setting which defines the onHeap cache store to be used in TieredSpilloverCache. + * + * Pattern: {cache_type}.tiered_spillover.onheap.store.name + * Example: indices.request.cache.tiered_spillover.onheap.store.name + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_ONHEAP_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".onheap.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Setting which defines the disk cache store to be used in TieredSpilloverCache. + */ + public static final Setting.AffixSetting<String> TIERED_SPILLOVER_DISK_STORE_NAME = Setting.suffixKeySetting( + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.name", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Default constructor + */ + TieredSpilloverCacheSettings() {} +} diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java new file mode 100644 index 0000000000000..fa2de3c14b5dc --- /dev/null +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package related to cache tiers **/ +package org.opensearch.cache.common.tier; diff --git a/modules/cache-common/src/main/plugin-metadata/plugin-security.policy b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..12fe9f2ddb60b --- /dev/null +++ b/modules/cache-common/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java new file mode 100644 index 0000000000000..79b57b80c3aa0 --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public class MockDiskCache<K, V> implements ICache<K, V> { + + Map<K, V> cache; + int maxSize; + long delay; + + public MockDiskCache(int maxSize, long delay) { + this.maxSize = maxSize; + this.delay = delay; + this.cache = new ConcurrentHashMap<K, V>(); + } + + @Override + public V get(K key) { + V value = cache.get(key); + return value; + } + + @Override + public void put(K key, V value) { + if (this.cache.size() >= maxSize) { // For simplification + return; + } + try { + Thread.sleep(delay); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + this.cache.put(key, value); + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) { + V value = cache.computeIfAbsent(key, key1 -> { + try { + return loader.load(key); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + return value; + } + + @Override + public void invalidate(K key) { + this.cache.remove(key); + } + + @Override + public void invalidateAll() { + this.cache.clear(); + } + + @Override + public Iterable<K> keys() { + return this.cache.keySet(); + } + + @Override + public long count() { + return this.cache.size(); + } + + @Override + public void refresh() {} + + @Override + public void close() { + + } + + public static class MockDiskCacheFactory implements Factory { + + public static final String NAME = "mockDiskCache"; + final long delay; + final int maxSize; + + public MockDiskCacheFactory(long delay, int maxSize) { + this.delay = delay; + this.maxSize = maxSize; + } + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + return new Builder<K, V>().setMaxSize(maxSize).setDeliberateDelay(delay).build(); + } + + @Override + public String getCacheName() { + return NAME; + } + } + + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + int maxSize; + long delay; + + @Override + public ICache<K, V> build() { + return new MockDiskCache<K, V>(this.maxSize, this.delay); + } + + public Builder<K, V> setMaxSize(int maxSize) { + this.maxSize = maxSize; + return this; + } + + public Builder<K, V> setDeliberateDelay(long millis) { + this.delay = millis; + return this; + } + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java new file mode 100644 index 0000000000000..1172a48e97c6a --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCachePluginTests.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class TieredSpilloverCachePluginTests extends OpenSearchTestCase { + + public void testGetCacheFactoryMap() { + TieredSpilloverCachePlugin tieredSpilloverCachePlugin = new TieredSpilloverCachePlugin(); + Map<String, ICache.Factory> map = tieredSpilloverCachePlugin.getCacheFactoryMap(); + assertNotNull(map.get(TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME)); + assertEquals(TieredSpilloverCachePlugin.TIERED_CACHE_SPILLOVER_PLUGIN_NAME, tieredSpilloverCachePlugin.getName()); + } +} diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java new file mode 100644 index 0000000000000..2f7938934300e --- /dev/null +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -0,0 +1,896 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.common.tier; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; + +public class TieredSpilloverCacheTests extends OpenSearchTestCase { + + public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + randomIntBetween(1, 4), + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + int numOfItems1 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + List<String> keys = new ArrayList<>(); + // Put values in cache. + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + keys.add(key); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(0, removalListener.evictionsMetric.count()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + int cacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { + // Hit cache with stored key + cacheHit++; + int index = randomIntBetween(0, keys.size() - 1); + tieredSpilloverCache.computeIfAbsent(keys.get(index), getLoadAwareCacheLoader()); + } else { + // Hit cache with randomized key which is expected to miss cache always. + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), getLoadAwareCacheLoader()); + cacheMiss++; + } + } + assertEquals(0, removalListener.evictionsMetric.count()); + } + + public void testComputeIfAbsentWithFactoryBasedCacheCreation() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the desired settings needed to create a TieredSpilloverCache object with INDICES_REQUEST_CACHE cacheType. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + ICache<String, String> tieredSpilloverICache = new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ); + + TieredSpilloverCache<String, String> tieredSpilloverCache = (TieredSpilloverCache<String, String>) tieredSpilloverICache; + + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + long actualDiskCacheSize = tieredSpilloverCache.getDiskCache().count(); + assertEquals(actualDiskCacheSize, removalListener.evictionsMetric.count()); // Evictions from onHeap equal to + // disk cache size. + + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); + tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); + + assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); + assertEquals(tieredSpilloverCache.getDiskCache().count(), diskTierKeys.size()); + } + + public void testWithFactoryCreationWithOnHeapCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); + assertEquals( + ex.getMessage(), + "No associated onHeapCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE + ); + } + + public void testWithFactoryCreationWithDiskCacheNotPresent() { + int onHeapCacheSize = randomIntBetween(10, 30); + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + // Set the settings without onHeap cache settings. + Settings settings = Settings.builder() + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings(settings) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + new MockDiskCache.MockDiskCacheFactory(0, randomIntBetween(100, 300)) + ) + ) + ); + assertEquals( + ex.getMessage(), + "No associated diskCache found for tieredSpilloverCache for " + "cacheType:" + CacheType.INDICES_REQUEST_CACHE + ); + } + + public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build() + ) + .build(); + + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize); + + TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig) + .setRemovalListener(removalListener) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .build(); + + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + long actualDiskCacheSize = tieredSpilloverCache.getDiskCache().count(); + assertEquals(actualDiskCacheSize, removalListener.evictionsMetric.count()); // Evictions from onHeap equal to + // disk cache size. + + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); + tieredSpilloverCache.getDiskCache().keys().forEach(diskTierKeys::add); + + assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); + assertEquals(tieredSpilloverCache.getDiskCache().count(), diskTierKeys.size()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(50, 200); + int onHeapCacheHit = 0; + int diskCacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { // Hit cache with key stored in onHeap cache. + onHeapCacheHit++; + int index = randomIntBetween(0, onHeapKeys.size() - 1); + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(onHeapKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } else { // Hit cache with key stored in disk cache. + diskCacheHit++; + int index = randomIntBetween(0, diskTierKeys.size() - 1); + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(diskTierKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } + } + for (int iter = 0; iter < randomIntBetween(50, 200); iter++) { + // Hit cache with randomized key which is expected to miss cache always. + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + cacheMiss++; + } + } + + public void testComputeIfAbsentWithEvictionsFromBothTier() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + int numOfItems = randomIntBetween(totalSize + 1, totalSize * 3); + for (int iter = 0; iter < numOfItems; iter++) { + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + } + assertTrue(removalListener.evictionsMetric.count() > 0); + } + + public void testGetAndCount() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + + for (int iter = 0; iter < numOfItems1; iter++) { + if (randomBoolean()) { + if (randomBoolean()) { + int index = randomIntBetween(0, onHeapKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(onHeapKeys.get(index))); + } else { + int index = randomIntBetween(0, diskTierKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(diskTierKeys.get(index))); + } + } else { + assertNull(tieredSpilloverCache.get(UUID.randomUUID().toString())); + } + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + } + + public void testPut() { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key, value); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(200, 400); + int diskCacheSize = randomIntBetween(450, 800); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + (onHeapCacheSize * keyValueSize) + "b" + ) + .build(), + 0 + ); + + for (int i = 0; i < onHeapCacheSize; i++) { + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + } + + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(0, tieredSpilloverCache.getDiskCache().count()); + + // Again try to put OnHeap cache capacity amount of new items. + List<String> newKeyList = new ArrayList<>(); + for (int i = 0; i < onHeapCacheSize; i++) { + newKeyList.add(UUID.randomUUID().toString()); + } + + for (int i = 0; i < newKeyList.size(); i++) { + tieredSpilloverCache.computeIfAbsent(newKeyList.get(i), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + } + + // Verify that new items are part of onHeap cache. + List<String> actualOnHeapCacheKeys = new ArrayList<>(); + tieredSpilloverCache.getOnHeapCache().keys().forEach(actualOnHeapCacheKeys::add); + + assertEquals(newKeyList.size(), actualOnHeapCacheKeys.size()); + for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { + assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); + } + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(onHeapCacheSize, tieredSpilloverCache.getDiskCache().count()); + } + + public void testInvalidate() { + int onHeapCacheSize = 1; + int diskCacheSize = 10; + int keyValueSize = 20; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + // First try to invalidate without the key present in cache. + tieredSpilloverCache.invalidate(key); + + // Now try to invalidate with the key present in onHeap cache. + tieredSpilloverCache.put(key, value); + tieredSpilloverCache.invalidate(key); + assertEquals(0, tieredSpilloverCache.count()); + + tieredSpilloverCache.put(key, value); + // Put another key/value so that one of the item is evicted to disk cache. + String key2 = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key2, UUID.randomUUID().toString()); + assertEquals(2, tieredSpilloverCache.count()); + // Again invalidate older key + tieredSpilloverCache.invalidate(key); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testCacheKeys() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + // During first round add onHeapCacheSize entries. Will go to onHeap cache initially. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + diskTierKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + // In another round, add another onHeapCacheSize entries. These will go to onHeap and above ones will be + // evicted to onDisk cache. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + onHeapKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + + List<String> actualOnHeapKeys = new ArrayList<>(); + List<String> actualOnDiskKeys = new ArrayList<>(); + Iterable<String> onHeapiterable = tieredSpilloverCache.getOnHeapCache().keys(); + Iterable<String> onDiskiterable = tieredSpilloverCache.getDiskCache().keys(); + onHeapiterable.iterator().forEachRemaining(actualOnHeapKeys::add); + onDiskiterable.iterator().forEachRemaining(actualOnDiskKeys::add); + for (String onHeapKey : onHeapKeys) { + assertTrue(actualOnHeapKeys.contains(onHeapKey)); + } + for (String onDiskKey : actualOnDiskKeys) { + assertTrue(actualOnDiskKeys.contains(onDiskKey)); + } + + // Testing keys() which returns all keys. + List<String> actualMergedKeys = new ArrayList<>(); + List<String> expectedMergedKeys = new ArrayList<>(); + expectedMergedKeys.addAll(onHeapKeys); + expectedMergedKeys.addAll(diskTierKeys); + + Iterable<String> mergedIterable = tieredSpilloverCache.keys(); + mergedIterable.iterator().forEachRemaining(actualMergedKeys::add); + + assertEquals(expectedMergedKeys.size(), actualMergedKeys.size()); + for (String key : expectedMergedKeys) { + assertTrue(actualMergedKeys.contains(key)); + } + } + + public void testRefresh() { + int diskCacheSize = randomIntBetween(60, 100); + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + 50, + diskCacheSize, + removalListener, + Settings.EMPTY, + 0 + ); + tieredSpilloverCache.refresh(); + } + + public void testInvalidateAll() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int keyValueSize = 50; + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(), + 0 + ); + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List<String> onHeapKeys = new ArrayList<>(); + List<String> diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader<String, String> tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + tieredSpilloverCache.invalidateAll(); + assertEquals(0, tieredSpilloverCache.count()); + } + + public void testComputeIfAbsentConcurrently() throws Exception { + int onHeapCacheSize = randomIntBetween(100, 300); + int diskCacheSize = randomIntBetween(200, 400); + int keyValueSize = 50; + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + Settings settings = Settings.builder() + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + onHeapCacheSize * keyValueSize + "b" + ) + .build(); + + TieredSpilloverCache<String, String> tieredSpilloverCache = intializeTieredSpilloverCache( + keyValueSize, + diskCacheSize, + removalListener, + settings, + 0 + ); + + int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + + Thread[] threads = new Thread[numberOfSameKeys]; + Phaser phaser = new Phaser(numberOfSameKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfSameKeys); // To wait for all threads to finish. + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + for (int i = 0; i < numberOfSameKeys; i++) { + threads[i] = new Thread(() -> { + try { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded = false; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); // Wait for rest of tasks to be cancelled. + int numberOfTimesKeyLoaded = 0; + assertEquals(numberOfSameKeys, loadAwareCacheLoaderList.size()); + for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) { + LoadAwareCacheLoader<String, String> loader = loadAwareCacheLoaderList.get(i); + if (loader.isLoaded()) { + numberOfTimesKeyLoaded++; + } + } + assertEquals(1, numberOfTimesKeyLoaded); // It should be loaded only once. + } + + public void testConcurrencyForEvictionFlow() throws Exception { + int diskCacheSize = randomIntBetween(450, 800); + + MockCacheRemovalListener<String, String> removalListener = new MockCacheRemovalListener<>(); + + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + ICache.Factory diskCacheFactory = new MockDiskCache.MockDiskCacheFactory(500, diskCacheSize); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> 150) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + 200 + "b" + ) + .build() + ) + .build(); + TieredSpilloverCache<String, String> tieredSpilloverCache = new TieredSpilloverCache.Builder<String, String>() + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(diskCacheFactory) + .setRemovalListener(removalListener) + .setCacheConfig(cacheConfig) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .build(); + + String keyToBeEvicted = "key1"; + String secondKey = "key2"; + + // Put first key on tiered cache. Will go into onHeap cache. + tieredSpilloverCache.computeIfAbsent(keyToBeEvicted, new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + CountDownLatch countDownLatch = new CountDownLatch(1); + CountDownLatch countDownLatch1 = new CountDownLatch(1); + // Put second key on tiered cache. Will cause eviction of first key from onHeap cache and should go into + // disk cache. + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = getLoadAwareCacheLoader(); + Thread thread = new Thread(() -> { + try { + tieredSpilloverCache.computeIfAbsent(secondKey, loadAwareCacheLoader); + countDownLatch1.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + assertBusy(() -> { assertTrue(loadAwareCacheLoader.isLoaded()); }, 100, TimeUnit.MILLISECONDS); // We wait for new key to be loaded + // after which it eviction flow is + // guaranteed to occur. + ICache<String, String> onDiskCache = tieredSpilloverCache.getDiskCache(); + + // Now on a different thread, try to get key(above one which got evicted) from tiered cache. We expect this + // should return not null value as it should be present on diskCache. + AtomicReference<String> actualValue = new AtomicReference<>(); + Thread thread1 = new Thread(() -> { + try { + actualValue.set(tieredSpilloverCache.get(keyToBeEvicted)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + thread1.start(); + countDownLatch.await(); + assertNotNull(actualValue.get()); + countDownLatch1.await(); + assertEquals(1, removalListener.evictionsMetric.count()); + assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(1, onDiskCache.count()); + assertNotNull(onDiskCache.get(keyToBeEvicted)); + } + + class MockCacheRemovalListener<K, V> implements RemovalListener<K, V> { + final CounterMetric evictionsMetric = new CounterMetric(); + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionsMetric.inc(); + } + } + + private LoadAwareCacheLoader<String, String> getLoadAwareCacheLoader() { + return new LoadAwareCacheLoader<>() { + boolean isLoaded = false; + + @Override + public String load(String key) { + isLoaded = true; + return UUID.randomUUID().toString(); + } + + @Override + public boolean isLoaded() { + return isLoaded; + } + }; + } + + private TieredSpilloverCache<String, String> intializeTieredSpilloverCache( + int keyValueSize, + int diskCacheSize, + RemovalListener<String, String> removalListener, + Settings settings, + long diskDeliberateDelay + ) { + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + CacheConfig<String, String> cacheConfig = new CacheConfig.Builder<String, String>().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put(settings) + .build() + ) + .build(); + + ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize); + + return new TieredSpilloverCache.Builder<String, String>().setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setRemovalListener(removalListener) + .setOnHeapCacheFactory(onHeapCacheFactory) + .setDiskCacheFactory(mockDiskCacheFactory) + .setCacheConfig(cacheConfig) + .build(); + } +} diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java index b17f4804d4d50..6afd5c4ca75c1 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -8,26 +8,44 @@ package org.opensearch.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.common.settings.Settings; import org.opensearch.geometry.utils.StandardValidator; import org.opensearch.geometry.utils.WellKnownText; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; + /** * This is the base class for all the Geo related integration tests. Use this class to add the features and settings * for the test cluster on which integration tests are running. */ -public abstract class GeoModulePluginIntegTestCase extends OpenSearchIntegTestCase { +public abstract class GeoModulePluginIntegTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final double GEOHASH_TOLERANCE = 1E-5D; protected static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); + public GeoModulePluginIntegTestCase(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + /** * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this * geo plugin is not getting packaged in a zip, we need to load it before the tests run. diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java index 9bd082a6e1ffe..9e7ce0d3c7980 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -8,10 +8,9 @@ package org.opensearch.geo.search; -import org.hamcrest.MatcherAssert; -import org.junit.Before; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.GeoModulePluginIntegTestCase; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; @@ -21,6 +20,8 @@ import org.opensearch.geometry.Geometry; import org.opensearch.geometry.utils.WellKnownText; import org.opensearch.test.OpenSearchIntegTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Before; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -43,6 +44,10 @@ public class MissingValueIT extends GeoModulePluginIntegTestCase { private GeoPoint bottomRight; private GeoPoint topLeft; + public MissingValueIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index d9ff3e8f473ef..7316847ac6046 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -67,6 +67,10 @@ public abstract class AbstractGeoBucketAggregationIntegTest extends GeoModulePlu protected final Version version = VersionUtils.randomIndexCompatibleVersion(random()); + public AbstractGeoBucketAggregationIntegTest(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected boolean forbidPrivateIndexSettings() { return false; @@ -83,7 +87,7 @@ protected boolean forbidPrivateIndexSettings() { */ protected void prepareGeoShapeIndexForAggregations(final Random random) throws Exception { expectedDocsCountForGeoShapes = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List<IndexRequestBuilder> geoshapes = new ArrayList<>(); assertAcked(prepareCreate(GEO_SHAPE_INDEX_NAME).setSettings(settings).setMapping(GEO_SHAPE_FIELD_NAME, "type" + "=geo_shape")); boolean isShapeIntersectingBB = false; @@ -132,7 +136,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep expectedDocCountsForSingleGeoPoint = new HashMap<>(); createIndex("idx_unmapped"); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put("index.number_of_shards", 4) .put("index.number_of_replicas", 0) .build(); @@ -156,7 +160,7 @@ protected void prepareSingleValueGeoPointIndex(final Random random) throws Excep protected void prepareMultiValuedGeoPointIndex(final Random random) throws Exception { multiValuedExpectedDocCountsGeoPoint = new HashMap<>(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); final List<IndexRequestBuilder> cities = new ArrayList<>(); assertAcked( prepareCreate("multi_valued_idx").setSettings(settings) diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java index e3ca03aa495ab..4048bb62f8818 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java @@ -35,6 +35,7 @@ import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeDocValue; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; @@ -53,17 +54,21 @@ import java.util.Random; import java.util.Set; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.geometry.utils.Geohash.PRECISION; import static org.opensearch.geometry.utils.Geohash.stringEncode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase public class GeoHashGridIT extends AbstractGeoBucketAggregationIntegTest { private static final String AGG_NAME = "geohashgrid"; + public GeoHashGridIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { Random random = random(); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java index 4c2c13b66d926..2a5772d417530 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java @@ -8,11 +8,11 @@ package org.opensearch.geo.search.aggregations.bucket; -import org.hamcrest.MatcherAssert; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeDocValue; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; @@ -21,6 +21,7 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.test.OpenSearchIntegTestCase; +import org.hamcrest.MatcherAssert; import java.util.HashSet; import java.util.List; @@ -28,8 +29,8 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase public class GeoTileGridIT extends AbstractGeoBucketAggregationIntegTest { @@ -38,6 +39,10 @@ public class GeoTileGridIT extends AbstractGeoBucketAggregationIntegTest { private static final String AGG_NAME = "geotilegrid"; + public GeoTileGridIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { final Random random = random(); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java index 5b4dd052a2f65..85541c60f133c 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java @@ -10,6 +10,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.GeoModulePluginIntegTestCase; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.tests.common.AggregationBuilders; @@ -19,11 +20,11 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.test.OpenSearchIntegTestCase; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; /** * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard @@ -34,6 +35,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class ShardReduceIT extends GeoModulePluginIntegTestCase { + public ShardReduceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/common/GeoBoundsHelper.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/common/GeoBoundsHelper.java index 257cc98db69fc..297aa311d5b75 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/common/GeoBoundsHelper.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/common/GeoBoundsHelper.java @@ -8,7 +8,6 @@ package org.opensearch.geo.search.aggregations.common; -import org.junit.Assert; import org.opensearch.common.geo.GeoPoint; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; @@ -20,6 +19,7 @@ import org.opensearch.geometry.Polygon; import org.opensearch.geometry.Rectangle; import org.opensearch.geometry.ShapeType; +import org.junit.Assert; import java.util.Locale; diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java index f7c9747e1a163..711744b944ce3 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java @@ -10,13 +10,12 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.document.DocumentField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.geo.GeoModulePluginIntegTestCase; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; import org.opensearch.geo.tests.common.RandomGeoGenerator; @@ -33,10 +32,10 @@ import java.util.Map; import java.util.stream.IntStream; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; /** * This is base class for all Geo Aggregations Integration Tests. This class is similar to what we have in the server @@ -66,6 +65,10 @@ public abstract class AbstractGeoAggregatorModulePluginTestCase extends GeoModul protected static Map<String, Integer> expectedDocCountsForGeoHash = null; protected static Map<String, GeoPoint> expectedCentroidsForGeoHash = null; + public AbstractGeoAggregatorModulePluginTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); @@ -256,7 +259,7 @@ public void setupSuiteScopeCluster() throws Exception { long totalHits = response.getHits().getTotalHits().value; XContentBuilder builder = XContentFactory.jsonBuilder(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); - logger.info("Full high_card_idx Response Content:\n{ {} }", Strings.toString(builder)); + logger.info("Full high_card_idx Response Content:\n{ {} }", builder.toString()); for (int i = 0; i < totalHits; i++) { SearchHit searchHit = response.getHits().getAt(i); assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java index 4be965b862ddf..1c28df6bc4ea2 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java @@ -32,18 +32,24 @@ package org.opensearch.geo.search.aggregations.metrics; -import org.hamcrest.MatcherAssert; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.util.BigArray; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.test.OpenSearchIntegTestCase; +import org.hamcrest.MatcherAssert; import java.util.List; +import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -51,16 +57,15 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.geo.tests.common.AggregationBuilders.geoBounds; -import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.search.aggregations.AggregationBuilders.global; -import static org.opensearch.search.aggregations.AggregationBuilders.terms; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase public class GeoBoundsITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoBounds"; + public GeoBoundsITTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java index e6d45e27b8f70..2dc8a91600419 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.tests.common.AggregationBuilders; import org.opensearch.search.aggregations.metrics.GeoCentroid; @@ -41,16 +42,20 @@ import java.util.List; +import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase public class GeoCentroidITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoCentroid"; + public GeoCentroidITTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) .addAggregation( diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index d9dfa4db3c1c1..99f965e8b8c66 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -33,10 +33,10 @@ package org.opensearch.geo; import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; diff --git a/modules/geo/src/main/java/org/opensearch/geo/algorithm/PolygonGenerator.java b/modules/geo/src/main/java/org/opensearch/geo/algorithm/PolygonGenerator.java index 246ece4342cff..da1d97260ec96 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/algorithm/PolygonGenerator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/algorithm/PolygonGenerator.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.awt.geom.Point2D; import java.util.ArrayList; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 50c93edacd6b7..2b59a07da254f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -35,22 +35,22 @@ import org.apache.lucene.index.IndexReader; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.util.BigArrays; import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.geo.search.aggregations.bucket.geogrid.cells.CellIdSource; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; +import org.opensearch.geo.search.aggregations.bucket.geogrid.cells.CellIdSource; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceParserHelper; -import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 303e577e99e7b..665ea6c5f2f37 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -38,9 +38,9 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.composite.LongValuesSource; import org.opensearch.search.aggregations.bucket.composite.SingleDimensionValuesSource; -import org.opensearch.search.aggregations.bucket.GeoTileUtils; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import java.io.IOException; @@ -48,7 +48,7 @@ /** * A {@link SingleDimensionValuesSource} for geotile values. - * + * <p> * Since geotile values can be represented as long values, this class is almost the same as {@link LongValuesSource} * The main differences is {@link GeoTileValuesSource#setAfter(Comparable)} as it needs to accept geotile string values i.e. "zoom/x/y". * diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index d41518cedbf86..16a74f58c9d3a 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -31,9 +31,9 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; +import org.opensearch.common.util.LongObjectPagedHashMap; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.LongObjectPagedHashMap; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index c716f2d3b2306..b0b980ca58975 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.OpenSearchException; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 197ab2d99f114..60ee1973c1080 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -196,4 +196,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index d5a3919684345..54b82f9770b63 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -194,4 +194,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java index 588c8bc59c2e0..6ff38fa28978e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java @@ -37,7 +37,7 @@ /** * Class representing {@link CellValues} whose values are filtered * according to whether they are within the specified {@link GeoBoundingBox}. - * + * <p> * The specified bounding box is assumed to be bounded. * * @opensearch.internal diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java index 2f013b76d5a67..4a39fa1da04eb 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoBoundsAggregator.java @@ -8,9 +8,9 @@ package org.opensearch.geo.search.aggregations.metrics; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.MetricsAggregator; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index 780f25ba3d7fb..fc9cce3cf98c1 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -89,4 +89,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoBoundsAggregator::new, true); builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEO_SHAPE, GeoBoundsGeoShapeAggregator::new, true); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index bc7fde8d66d0a..0d25e4caa6e62 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -13,15 +13,15 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; -import org.junit.Before; import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.opensearch.search.aggregations.bucket.GeoTileUtils; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.opensearch.search.aggregations.composite.BaseCompositeAggregatorTestCase; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 6766d31b0491e..c2e31aef8ae73 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -37,11 +37,11 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoBoundingBox; diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index db38f1e978b06..a195a1ade781a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -31,19 +31,19 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Rectangle; import org.opensearch.test.OpenSearchTestCase; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + public class GeoHashGridParserTests extends OpenSearchTestCase { public void testParseValidFromInts() throws Exception { int precision = randomIntBetween(1, 12); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java index 0bf41f37c45a7..02b7c10198d80 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridParserTests.java @@ -32,9 +32,9 @@ package org.opensearch.geo.search.aggregations.bucket.geogrid; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Rectangle; import org.opensearch.search.aggregations.bucket.GeoTileUtils; diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java index d449d72f0b148..112e338e0f930 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsGeoShapeAggregatorTests.java @@ -19,8 +19,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.hamcrest.MatcherAssert; -import org.junit.Assert; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeUtils; @@ -39,6 +37,8 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Assert; import java.util.ArrayList; import java.util.Collections; diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index 706c73e7416f5..3b2075bd9a22e 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -8,10 +8,10 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGeometryGenerator.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGeometryGenerator.java index c6f78e846955d..45a72c7103ae9 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGeometryGenerator.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/RandomGeoGeometryGenerator.java @@ -8,7 +8,6 @@ package org.opensearch.geo.tests.common; -import org.junit.Assert; import org.opensearch.geo.algorithm.PolygonGenerator; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; @@ -23,6 +22,7 @@ import org.opensearch.geometry.ShapeType; import org.opensearch.index.mapper.GeoShapeIndexer; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/geo/src/yamlRestTest/java/org/opensearch/geo/GeoClientYamlTestSuiteIT.java b/modules/geo/src/yamlRestTest/java/org/opensearch/geo/GeoClientYamlTestSuiteIT.java index 22604cff0fcf1..1f734b156689d 100644 --- a/modules/geo/src/yamlRestTest/java/org/opensearch/geo/GeoClientYamlTestSuiteIT.java +++ b/modules/geo/src/yamlRestTest/java/org/opensearch/geo/GeoClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index 49c7c7ae152c0..ae4f73edd37d7 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -33,16 +33,16 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.support.WriteRequest; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.ingest.IngestStats; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.MockScriptPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -108,7 +108,7 @@ public void testFailureInConditionalProcessor() { + " ]\n" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); @@ -160,8 +160,8 @@ public void testScriptDisabled() throws Exception { equalTo(id) ); - client().admin().cluster().preparePutPipeline(pipelineIdWithScript, pipelineWithScript, XContentType.JSON).get(); - client().admin().cluster().preparePutPipeline(pipelineIdWithoutScript, pipelineWithoutScript, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline(pipelineIdWithScript, pipelineWithScript, MediaTypeRegistry.JSON).get(); + client().admin().cluster().preparePutPipeline(pipelineIdWithoutScript, pipelineWithoutScript, MediaTypeRegistry.JSON).get(); checkPipelineExists.accept(pipelineIdWithScript); checkPipelineExists.accept(pipelineIdWithoutScript); @@ -225,7 +225,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio .setId("1") .setContent( new BytesArray("{\"script\": {\"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"my_script\"} }"), - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); BytesReference pipeline = new BytesArray( @@ -236,7 +236,7 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio + " ]\n" + "}" ); - client().admin().cluster().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline("_id", pipeline, MediaTypeRegistry.JSON).get(); client().prepareIndex("index") .setId("1") @@ -277,7 +277,7 @@ public void testWithDedicatedIngestNode() throws Exception { BytesReference pipeline = new BytesArray( "{\n" + " \"processors\" : [\n" + " {\"set\" : {\"field\": \"y\", \"value\": 0}}\n" + " ]\n" + "}" ); - client().admin().cluster().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline("_id", pipeline, MediaTypeRegistry.JSON).get(); client().prepareIndex("index") .setId("1") diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/BytesProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/BytesProcessor.java index 3bd1137975800..b76fe41c8e67d 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/BytesProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/BytesProcessor.java @@ -32,7 +32,7 @@ package org.opensearch.ingest.common; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import java.util.Map; diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java new file mode 100644 index 0000000000000..c968fb2f6c2da --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java @@ -0,0 +1,647 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.hash.MessageDigests; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Base64; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that generating community id flow hash for the network flow tuples, the algorithm is defined in + * <a href="https://github.com/corelight/community-id-spec">Community ID Flow Hashing</a>. + */ +public class CommunityIdProcessor extends AbstractProcessor { + public static final String TYPE = "community_id"; + // the version of the community id flow hashing algorithm + private static final String COMMUNITY_ID_HASH_VERSION = "1"; + // 0 byte for padding + private static final byte PADDING_BYTE = 0; + // the maximum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MAX_NUMBER = 255; + // the minimum code number for network protocol, ICMP message type and code as defined by IANA + private static final int IANA_COMMON_MIN_NUMBER = 0; + // the minimum seed for generating hash + private static final int MIN_SEED = 0; + // the maximum seed for generating hash + private static final int MAX_SEED = 65535; + // the minimum port number in transport layer + private static final int MIN_PORT = 0; + // the maximum port number in transport layer + private static final int MAX_PORT = 63335; + private static final String ICMP_MESSAGE_TYPE = "type"; + private static final String ICMP_MESSAGE_CODE = "code"; + private final String sourceIPField; + private final String sourcePortField; + private final String destinationIPField; + private final String destinationPortField; + private final String ianaProtocolNumberField; + private final String protocolField; + private final String icmpTypeField; + private final String icmpCodeField; + private final int seed; + private final String targetField; + private final boolean ignoreMissing; + + CommunityIdProcessor( + String tag, + String description, + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + super(tag, description); + this.sourceIPField = sourceIPField; + this.sourcePortField = sourcePortField; + this.destinationIPField = destinationIPField; + this.destinationPortField = destinationPortField; + this.ianaProtocolNumberField = ianaProtocolNumberField; + this.protocolField = protocolField; + this.icmpTypeField = icmpTypeField; + this.icmpCodeField = icmpCodeField; + this.seed = seed; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + } + + public String getSourceIPField() { + return sourceIPField; + } + + public String getSourcePortField() { + return sourcePortField; + } + + public String getDestinationIPField() { + return destinationIPField; + } + + public String getDestinationPortField() { + return destinationPortField; + } + + public String getIANAProtocolNumberField() { + return ianaProtocolNumberField; + } + + public String getProtocolField() { + return protocolField; + } + + public String getIcmpTypeField() { + return icmpTypeField; + } + + public String getIcmpCodeField() { + return icmpCodeField; + } + + public int getSeed() { + return seed; + } + + public String getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + @Override + public IngestDocument execute(IngestDocument document) { + // resolve protocol firstly + Protocol protocol = resolveProtocol(document); + // exit quietly if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + return document; + } + + // resolve ip secondly, exit quietly if either source ip or destination ip cannot be resolved and ignore_missing is true + byte[] sourceIPByteArray = resolveIP(document, sourceIPField); + if (sourceIPByteArray == null) { + return document; + } + byte[] destIPByteArray = resolveIP(document, destinationIPField); + if (destIPByteArray == null) { + return document; + } + // source ip and destination ip must have same format, either ipv4 or ipv6 + if (sourceIPByteArray.length != destIPByteArray.length) { + throw new IllegalArgumentException("source ip and destination ip must have same format"); + } + + // resolve source port and destination port for transport protocols, + // exit quietly if either source port or destination port is null nor empty + Integer sourcePort = null; + Integer destinationPort = null; + if (protocol.isTransportProtocol()) { + sourcePort = resolvePort(document, sourcePortField); + if (sourcePort == null) { + return document; + } + + destinationPort = resolvePort(document, destinationPortField); + if (destinationPort == null) { + return document; + } + } + + // resolve ICMP message type and code, support both ipv4 and ipv6 + // set source port to icmp type, and set dest port to icmp code, so that we can have a generic way to handle + // all protocols + boolean isOneway = true; + final boolean isICMPProtocol = Protocol.ICMP == protocol || Protocol.ICMP_V6 == protocol; + if (isICMPProtocol) { + Integer icmpType = resolveICMP(document, icmpTypeField, ICMP_MESSAGE_TYPE); + if (icmpType == null) { + return document; + } else { + sourcePort = icmpType; + } + + // for the message types which don't have code, fetch the equivalent code from the pre-defined mapper, + // and they can be considered to two-way flow + Byte equivalentCode = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? ICMPType.getEquivalentCode(icmpType.byteValue()) + : ICMPv6Type.getEquivalentCode(icmpType.byteValue()); + if (equivalentCode != null) { + isOneway = false; + // for IPv6-ICMP, the pre-defined code is negative byte, + // we need to convert it to positive integer for later comparison + destinationPort = Protocol.ICMP.getProtocolCode() == protocol.getProtocolCode() + ? Integer.valueOf(equivalentCode) + : Byte.toUnsignedInt(equivalentCode); + } else { + // get icmp code from the document if we cannot get equivalent code from the pre-defined mapper + Integer icmpCode = resolveICMP(document, icmpCodeField, ICMP_MESSAGE_CODE); + if (icmpCode == null) { + return document; + } else { + destinationPort = icmpCode; + } + } + } + + assert (sourcePort != null && destinationPort != null); + boolean isLess = compareIPAndPort(sourceIPByteArray, sourcePort, destIPByteArray, destinationPort); + // swap ip and port to remove directionality in the flow tuple, smaller ip:port tuple comes first + // but for ICMP and IPv6-ICMP, if it's a one-way flow, the flow tuple is considered to be ordered + if (!isLess && (!isICMPProtocol || !isOneway)) { + byte[] byteArray = sourceIPByteArray; + sourceIPByteArray = destIPByteArray; + destIPByteArray = byteArray; + + int tempPort = sourcePort; + sourcePort = destinationPort; + destinationPort = tempPort; + } + + // generate flow hash + String digest = generateCommunityIDHash( + protocol.getProtocolCode(), + sourceIPByteArray, + destIPByteArray, + sourcePort, + destinationPort, + seed + ); + document.setFieldValue(targetField, digest); + return document; + } + + @Override + public String getType() { + return TYPE; + } + + /** + * Resolve network protocol + * @param document the ingesting document + * @return the resolved protocol, null if the resolved protocol is null and ignore_missing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Protocol resolveProtocol(IngestDocument document) { + Protocol protocol = null; + Integer ianaProtocolNumber = null; + String protocolName = null; + if (!Strings.isNullOrEmpty(ianaProtocolNumberField)) { + ianaProtocolNumber = document.getFieldValue(ianaProtocolNumberField, Integer.class, true); + } + if (!Strings.isNullOrEmpty(protocolField)) { + protocolName = document.getFieldValue(protocolField, String.class, true); + } + // if iana protocol number is not specified, then resolve protocol name + if (ianaProtocolNumber != null) { + if (ianaProtocolNumber >= IANA_COMMON_MIN_NUMBER + && ianaProtocolNumber <= IANA_COMMON_MAX_NUMBER + && Protocol.protocolCodeMap.containsKey(ianaProtocolNumber.byteValue())) { + protocol = Protocol.protocolCodeMap.get(ianaProtocolNumber.byteValue()); + } else { + throw new IllegalArgumentException("unsupported iana protocol number [" + ianaProtocolNumber + "]"); + } + } else if (protocolName != null) { + Protocol protocolFromName = Protocol.fromProtocolName(protocolName); + if (protocolFromName != null) { + protocol = protocolFromName; + } else { + throw new IllegalArgumentException("unsupported protocol [" + protocolName + "]"); + } + } + + // return null if protocol cannot be resolved and ignore_missing is true + if (protocol == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "cannot resolve protocol by neither iana protocol number field [" + + ianaProtocolNumberField + + "] nor protocol name field [" + + protocolField + + "]" + ); + } + } + return protocol; + } + + /** + * Resolve ip address + * @param document the ingesting document + * @param fieldName the ip field to be resolved + * @return the byte array of the resolved ip + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private byte[] resolveIP(IngestDocument document, String fieldName) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source ip field path and destination ip field path cannot be null nor empty"); + } + } + + String ipAddress = document.getFieldValue(fieldName, String.class, true); + if (Strings.isNullOrEmpty(ipAddress)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("ip address in the field [" + fieldName + "] is null or empty"); + } + } + + byte[] byteArray = InetAddresses.ipStringToBytes(ipAddress); + if (byteArray == null) { + throw new IllegalArgumentException( + "ip address [" + ipAddress + "] in the field [" + fieldName + "] is not a valid ipv4/ipv6 address" + ); + } else { + return byteArray; + } + } + + /** + * Resolve port for transport protocols + * @param document the ingesting document + * @param fieldName the port field to be resolved + * @return the resolved port number, null if the resolved port is null and ignoreMissing is true + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolvePort(IngestDocument document, String fieldName) { + Integer port; + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("both source port and destination port field path cannot be null nor empty"); + } + } else { + port = document.getFieldValue(fieldName, Integer.class, true); + } + + if (port == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException( + "both source port and destination port cannot be null, but port in the field path [" + fieldName + "] is null" + ); + } + } else if (port < MIN_PORT || port > MAX_PORT) { + throw new IllegalArgumentException( + "both source port and destination port must be between 0 and 65535, but port in the field path [" + + fieldName + + "] is [" + + port + + "]" + ); + } + return port; + } + + /** + * Resolve ICMP's message type and code field + * @param document the ingesting document + * @param fieldName name of the type or the code field + * @param fieldType type or code + * @return the resolved value of the specified field, return null if ignore_missing if true and the field doesn't exist or is null, + * @throws IllegalArgumentException only if ignoreMissing is false and the field is null, empty, invalid, + * or if the field that is found at the provided path is not of the expected type. + */ + private Integer resolveICMP(IngestDocument document, String fieldName, String fieldType) { + if (Strings.isNullOrEmpty(fieldName)) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " field path cannot be null nor empty"); + } + } + Integer fieldValue = document.getFieldValue(fieldName, Integer.class, true); + if (fieldValue == null) { + if (ignoreMissing) { + return null; + } else { + throw new IllegalArgumentException("icmp message " + fieldType + " cannot be null"); + } + } else if (fieldValue < IANA_COMMON_MIN_NUMBER || fieldValue > IANA_COMMON_MAX_NUMBER) { + throw new IllegalArgumentException("invalid icmp message " + fieldType + " [" + fieldValue + "]"); + } else { + return fieldValue; + } + } + + /** + * + * @param protocolCode byte of the protocol number + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @param seed seed for generating hash + * @return the generated hash value, use SHA-1 + */ + private String generateCommunityIDHash( + byte protocolCode, + byte[] sourceIPByteArray, + byte[] destIPByteArray, + Integer sourcePort, + Integer destinationPort, + int seed + ) { + MessageDigest messageDigest = MessageDigests.sha1(); + messageDigest.update(intToTwoByteArray(seed)); + messageDigest.update(sourceIPByteArray); + messageDigest.update(destIPByteArray); + messageDigest.update(protocolCode); + messageDigest.update(PADDING_BYTE); + messageDigest.update(intToTwoByteArray(sourcePort)); + messageDigest.update(intToTwoByteArray(destinationPort)); + + return COMMUNITY_ID_HASH_VERSION + ":" + Base64.getEncoder().encodeToString(messageDigest.digest()); + } + + /** + * Convert an integer to two byte array + * @param val the integer which will be consumed to produce a two byte array + * @return the two byte array + */ + private byte[] intToTwoByteArray(Integer val) { + byte[] byteArray = new byte[2]; + byteArray[0] = Integer.valueOf(val >>> 8).byteValue(); + byteArray[1] = val.byteValue(); + return byteArray; + } + + /** + * Compare the ip and port, return true if the flow tuple is ordered + * @param sourceIPByteArray bytes of the source ip in the network flow tuple + * @param destIPByteArray bytes of the destination ip in the network flow tuple + * @param sourcePort source port in the network flow tuple + * @param destinationPort destination port in the network flow tuple + * @return true if sourceIP is less than destinationIP or sourceIP equals to destinationIP + * but sourcePort is less than destinationPort + */ + private boolean compareIPAndPort(byte[] sourceIPByteArray, int sourcePort, byte[] destIPByteArray, int destinationPort) { + int compareResult = compareByteArray(sourceIPByteArray, destIPByteArray); + return compareResult < 0 || compareResult == 0 && sourcePort < destinationPort; + } + + /** + * Compare two byte array which have same length + * @param byteArray1 the first byte array to compare + * @param byteArray2 the second byte array to compare + * @return 0 if each byte in both two arrays are same, a value less than 0 if byte in the first array is less than + * the byte at the same index, a value greater than 0 if byte in the first array is greater than the byte at the same index + */ + private int compareByteArray(byte[] byteArray1, byte[] byteArray2) { + assert (byteArray1.length == byteArray2.length); + int i = 0; + int j = 0; + while (i < byteArray1.length && j < byteArray2.length) { + int isLess = Byte.compareUnsigned(byteArray1[i], byteArray2[j]); + if (isLess == 0) { + i++; + j++; + } else { + return isLess; + } + } + return 0; + } + + /** + * Mapping ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPType { + ECHO_REPLY((byte) 0, (byte) 8), + ECHO((byte) 8, (byte) 0), + RTR_ADVERT((byte) 9, (byte) 10), + RTR_SOLICIT((byte) 10, (byte) 9), + TSTAMP((byte) 13, (byte) 14), + TSTAMP_REPLY((byte) 14, (byte) 13), + INFO((byte) 15, (byte) 16), + INFO_REPLY((byte) 16, (byte) 15), + MASK((byte) 17, (byte) 18), + MASK_REPLY((byte) 18, (byte) 17); + + private final byte type; + private final byte code; + + ICMPType(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of ICMP and derives equivalent message code + * @param type the message type of ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * Mapping IPv6-ICMP's message type and code into a port-like notion for ordering the request or response + */ + enum ICMPv6Type { + ECHO_REQUEST((byte) 128, (byte) 129), + ECHO_REPLY((byte) 129, (byte) 128), + MLD_LISTENER_QUERY((byte) 130, (byte) 131), + MLD_LISTENER_REPORT((byte) 131, (byte) 130), + ND_ROUTER_SOLICIT((byte) 133, (byte) 134), + ND_ROUTER_ADVERT((byte) 134, (byte) 133), + ND_NEIGHBOR_SOLICIT((byte) 135, (byte) 136), + ND_NEIGHBOR_ADVERT((byte) 136, (byte) 135), + WRU_REQUEST((byte) 139, (byte) 140), + WRU_REPLY((byte) 140, (byte) 139), + HAAD_REQUEST((byte) 144, (byte) 145), + HAAD_REPLY((byte) 145, (byte) 144); + + private final byte type; + private final byte code; + + ICMPv6Type(byte type, byte code) { + this.type = type; + this.code = code; + } + + private static final Map<Byte, Byte> ICMPTypeMapper = Arrays.stream(values()).collect(Collectors.toMap(t -> t.type, t -> t.code)); + + /** + * Takes the message type of IPv6-ICMP and derives equivalent message code + * @param type the message type of IPv6-ICMP + * @return the equivalent message code + */ + public static Byte getEquivalentCode(int type) { + return ICMPTypeMapper.get(Integer.valueOf(type).byteValue()); + } + } + + /** + * An enumeration of the supported network protocols + */ + enum Protocol { + ICMP((byte) 1, false), + TCP((byte) 6, true), + UDP((byte) 17, true), + ICMP_V6((byte) 58, false), + SCTP((byte) 132, true); + + private final byte protocolCode; + private final boolean isTransportProtocol; + + Protocol(int ianaNumber, boolean isTransportProtocol) { + this.protocolCode = Integer.valueOf(ianaNumber).byteValue(); + this.isTransportProtocol = isTransportProtocol; + } + + public static final Map<Byte, Protocol> protocolCodeMap = Arrays.stream(values()) + .collect(Collectors.toMap(Protocol::getProtocolCode, p -> p)); + + public static Protocol fromProtocolName(String protocolName) { + String name = protocolName.toUpperCase(Locale.ROOT); + if (name.equals("IPV6-ICMP")) { + return Protocol.ICMP_V6; + } + try { + return valueOf(name); + } catch (IllegalArgumentException e) { + return null; + } + } + + public byte getProtocolCode() { + return this.protocolCode; + } + + public boolean isTransportProtocol() { + return this.isTransportProtocol; + } + } + + public static class Factory implements Processor.Factory { + @Override + public CommunityIdProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + String sourceIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_ip_field"); + String sourcePortField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "source_port_field"); + String destinationIPField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "destination_ip_field"); + String destinationPortField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "destination_port_field" + ); + String ianaProtocolNumberField = ConfigurationUtils.readOptionalStringProperty( + TYPE, + processorTag, + config, + "iana_protocol_number_field" + ); + String protocolField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "protocol_field"); + String icmpTypeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_type_field"); + String icmpCodeField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "icmp_code_field"); + int seed = ConfigurationUtils.readIntProperty(TYPE, processorTag, config, "seed", 0); + if (seed < MIN_SEED || seed > MAX_SEED) { + throw newConfigurationException(TYPE, processorTag, "seed", "seed must be between 0 and 65535"); + } + + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", "community_id"); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + + return new CommunityIdProcessor( + processorTag, + description, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java new file mode 100644 index 0000000000000..dec69df275130 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; + +import java.util.Map; + +public final class CopyProcessor extends AbstractProcessor { + public static final String TYPE = "copy"; + + private final TemplateScript.Factory sourceField; + private final TemplateScript.Factory targetField; + + private final boolean ignoreMissing; + + private final boolean removeSource; + + private final boolean overrideTarget; + + CopyProcessor(String tag, String description, TemplateScript.Factory sourceField, TemplateScript.Factory targetField) { + this(tag, description, sourceField, targetField, false, false, false); + } + + CopyProcessor( + String tag, + String description, + TemplateScript.Factory sourceField, + TemplateScript.Factory targetField, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + super(tag, description); + this.sourceField = sourceField; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + this.removeSource = removeSource; + this.overrideTarget = overrideTarget; + } + + public TemplateScript.Factory getSourceField() { + return sourceField; + } + + public TemplateScript.Factory getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + public boolean isRemoveSource() { + return removeSource; + } + + public boolean isOverrideTarget() { + return overrideTarget; + } + + @Override + public IngestDocument execute(IngestDocument document) { + String source = document.renderTemplate(sourceField); + final boolean sourceFieldPathIsNullOrEmpty = Strings.isNullOrEmpty(source); + if (sourceFieldPathIsNullOrEmpty || document.hasField(source, true) == false) { + if (ignoreMissing) { + return document; + } else if (sourceFieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("source field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("source field [" + source + "] doesn't exist"); + } + } + + String target = document.renderTemplate(targetField); + if (Strings.isNullOrEmpty(target)) { + throw new IllegalArgumentException("target field path cannot be null nor empty"); + } + if (source.equals(target)) { + throw new IllegalArgumentException("source field path and target field path cannot be same"); + } + + if (overrideTarget || document.hasField(target, true) == false || document.getFieldValue(target, Object.class) == null) { + Object sourceValue = document.getFieldValue(source, Object.class); + document.setFieldValue(target, IngestDocument.deepCopy(sourceValue)); + } else { + throw new IllegalArgumentException("target field [" + target + "] already exists"); + } + + if (removeSource) { + document.removeField(source); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + + @Override + public CopyProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + String sourceField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_field"); + TemplateScript.Factory sourceFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "source_field", + sourceField, + scriptService + ); + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "target_field", + targetField, + scriptService + ); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + boolean removeSource = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "remove_source", false); + boolean overrideTarget = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override_target", false); + + return new CopyProcessor( + processorTag, + description, + sourceFieldTemplate, + targetFieldTemplate, + ignoreMissing, + removeSource, + overrideTarget + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java index 39c2d67ac0b85..0eab6334854ab 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/DotExpanderProcessor.java @@ -118,25 +118,15 @@ public Processor create( ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); if (field.contains(".") == false) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "field does not contain a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "field does not contain a dot"); } if (field.indexOf('.') == 0 || field.lastIndexOf('.') == field.length() - 1) { - throw ConfigurationUtils.newConfigurationException( - ConfigurationUtils.TAG_KEY, - tag, - "field", - "Field can't start or end with a dot" - ); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "Field can't start or end with a dot"); } int firstIndex = -1; for (int index = field.indexOf('.'); index != -1; index = field.indexOf('.', index + 1)) { if (index - firstIndex == 1) { - throw ConfigurationUtils.newConfigurationException(ConfigurationUtils.TAG_KEY, tag, "field", "No space between dots"); + throw ConfigurationUtils.newConfigurationException(TYPE, tag, "field", "No space between dots"); } firstIndex = index; } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java index 37320c0e900a5..7e114023fb86f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java @@ -37,7 +37,7 @@ /** * Exception class thrown by {@link FailProcessor}. - * + * <p> * This exception is caught in the {@link CompoundProcessor} and * then changes the state of {@link IngestDocument}. This * exception should get serialized. diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java index 741a4fb29cfb8..b7c417f5f44a5 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java @@ -53,10 +53,10 @@ /** * A processor that for each value in a list executes a one or more processors. - * + * <p> * This can be useful in cases to do string operations on json array of strings, * or remove a field from objects inside a json array. - * + * <p> * Note that this processor is experimental. */ public final class ForEachProcessor extends AbstractProcessor implements WrappingProcessor { diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java index 300106e435114..9c79a06a45f58 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/GrokProcessorGetAction.java @@ -31,15 +31,15 @@ package org.opensearch.ingest.common; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.node.NodeClient; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index c786785a008d7..0f8b248fd5af8 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -33,7 +33,6 @@ package org.opensearch.ingest.common; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.ClusterSettings; @@ -42,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; import org.opensearch.grok.Grok; import org.opensearch.grok.MatcherWatchdog; import org.opensearch.ingest.DropProcessor; @@ -98,7 +98,7 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet processors.put(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)); processors.put(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()); processors.put(JsonProcessor.TYPE, new JsonProcessor.Factory()); - processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); + processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory(parameters.scriptService)); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService)); @@ -106,6 +106,9 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet processors.put(DropProcessor.TYPE, new DropProcessor.Factory()); processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory()); processors.put(CsvProcessor.TYPE, new CsvProcessor.Factory()); + processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService)); + processors.put(RemoveByPatternProcessor.TYPE, new RemoveByPatternProcessor.Factory()); + processors.put(CommunityIdProcessor.TYPE, new CommunityIdProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/JsonProcessor.java index d3a0f2f7a3783..6de3e236ee40c 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/JsonProcessor.java @@ -32,12 +32,12 @@ package org.opensearch.ingest.common; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java index ff3cca4ce111f..73f03b3cb2e0f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/KeyValueProcessor.java @@ -33,10 +33,13 @@ package org.opensearch.ingest.common; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; import java.util.Collections; import java.util.List; @@ -56,24 +59,24 @@ public final class KeyValueProcessor extends AbstractProcessor { private static final Pattern STRIP_BRACKETS = Pattern.compile("(^[\\(\\[<\"'])|([\\]\\)>\"']$)"); - private final String field; + private final TemplateScript.Factory field; private final String fieldSplit; private final String valueSplit; private final Set<String> includeKeys; private final Set<String> excludeKeys; - private final String targetField; + private final TemplateScript.Factory targetField; private final boolean ignoreMissing; private final Consumer<IngestDocument> execution; KeyValueProcessor( String tag, String description, - String field, + TemplateScript.Factory field, String fieldSplit, String valueSplit, Set<String> includeKeys, Set<String> excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -106,10 +109,10 @@ public final class KeyValueProcessor extends AbstractProcessor { private static Consumer<IngestDocument> buildExecution( String fieldSplit, String valueSplit, - String field, + TemplateScript.Factory field, Set<String> includeKeys, Set<String> excludeKeys, - String targetField, + TemplateScript.Factory targetField, boolean ignoreMissing, String trimKey, String trimValue, @@ -130,41 +133,62 @@ private static Consumer<IngestDocument> buildExecution( keyFilter = key -> includeKeys.contains(key) && excludeKeys.contains(key) == false; } } - final String fieldPathPrefix; - String keyPrefix = prefix == null ? "" : prefix; - if (targetField == null) { - fieldPathPrefix = keyPrefix; - } else { - fieldPathPrefix = targetField + "." + keyPrefix; - } - final Function<String, String> keyPrefixer; - if (fieldPathPrefix.isEmpty()) { - keyPrefixer = val -> val; - } else { - keyPrefixer = val -> fieldPathPrefix + val; - } - final Function<String, String[]> fieldSplitter = buildSplitter(fieldSplit, true); - Function<String, String[]> valueSplitter = buildSplitter(valueSplit, false); - final Function<String, String> keyTrimmer = buildTrimmer(trimKey); - final Function<String, String> bracketStrip; - if (stripBrackets) { - bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); - } else { - bracketStrip = val -> val; - } - final Function<String, String> valueTrimmer = buildTrimmer(trimValue); + return document -> { - String value = document.getFieldValue(field, String.class, ignoreMissing); + final String fieldPathPrefix; + String keyPrefix = prefix == null ? "" : prefix; + if (targetField != null) { + String targetFieldPath = document.renderTemplate(targetField); + if (!Strings.isNullOrEmpty((targetFieldPath))) { + fieldPathPrefix = targetFieldPath + "." + keyPrefix; + } else { + fieldPathPrefix = keyPrefix; + } + } else { + fieldPathPrefix = keyPrefix; + } + + final Function<String, String> keyPrefixer; + if (fieldPathPrefix.isEmpty()) { + keyPrefixer = val -> val; + } else { + keyPrefixer = val -> fieldPathPrefix + val; + } + final Function<String, String[]> fieldSplitter = buildSplitter(fieldSplit, true); + Function<String, String[]> valueSplitter = buildSplitter(valueSplit, false); + final Function<String, String> keyTrimmer = buildTrimmer(trimKey); + final Function<String, String> bracketStrip; + if (stripBrackets) { + bracketStrip = val -> STRIP_BRACKETS.matcher(val).replaceAll(""); + } else { + bracketStrip = val -> val; + } + final Function<String, String> valueTrimmer = buildTrimmer(trimValue); + + String path = document.renderTemplate(field); + final boolean fieldPathNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathNullOrEmpty || document.hasField(path, true) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } + } + + String value = document.getFieldValue(path, String.class, ignoreMissing); if (value == null) { if (ignoreMissing) { return; } - throw new IllegalArgumentException("field [" + field + "] is null, cannot extract key-value pairs."); + throw new IllegalArgumentException("field [" + path + "] is null, cannot extract key-value pairs. "); } + for (String part : fieldSplitter.apply(value)) { String[] kv = valueSplitter.apply(part); if (kv.length != 2) { - throw new IllegalArgumentException("field [" + field + "] does not contain value_split [" + valueSplit + "]"); + throw new IllegalArgumentException("field [" + path + "] does not contain value_split [" + valueSplit + "]"); } String key = keyTrimmer.apply(kv[0]); if (keyFilter.test(key)) { @@ -193,7 +217,7 @@ private static Function<String, String[]> buildSplitter(String split, boolean fi } } - String getField() { + TemplateScript.Factory getField() { return field; } @@ -213,7 +237,7 @@ Set<String> getExcludeKeys() { return excludeKeys; } - String getTargetField() { + TemplateScript.Factory getTargetField() { return targetField; } @@ -241,6 +265,12 @@ public String getType() { } public static class Factory implements Processor.Factory { + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public KeyValueProcessor create( Map<String, Processor.Factory> registry, @@ -249,7 +279,13 @@ public KeyValueProcessor create( Map<String, Object> config ) throws Exception { String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field"); + TemplateScript.Factory fieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", field, scriptService); String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = null; + if (!Strings.isNullOrEmpty(targetField)) { + targetFieldTemplate = ConfigurationUtils.compileTemplate(TYPE, processorTag, "target_field", targetField, scriptService); + } + String fieldSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field_split"); String valueSplit = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "value_split"); String trimKey = ConfigurationUtils.readOptionalStringProperty(TYPE, processorTag, config, "trim_key"); @@ -270,12 +306,12 @@ public KeyValueProcessor create( return new KeyValueProcessor( processorTag, description, - field, + fieldTemplate, fieldSplit, valueSplit, includeKeys, excludeKeys, - targetField, + targetFieldTemplate, ignoreMissing, trimKey, trimValue, diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ProcessorsAllowlistExtension.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ProcessorsAllowlistExtension.java index 1ba16ae2ccda4..6dff29ec1be44 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ProcessorsAllowlistExtension.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ProcessorsAllowlistExtension.java @@ -32,9 +32,9 @@ package org.opensearch.ingest.common; -import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistLoader; +import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.script.IngestScript; import org.opensearch.script.ScriptContext; diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java new file mode 100644 index 0000000000000..da87f5201db72 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveByPatternProcessor.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.common.Nullable; +import org.opensearch.common.ValidationException; +import org.opensearch.common.regex.Regex; +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + +/** + * Processor that removes existing fields by field patterns or excluding field patterns. + */ +public final class RemoveByPatternProcessor extends AbstractProcessor { + + public static final String TYPE = "remove_by_pattern"; + private final List<String> fieldPatterns; + private final List<String> excludeFieldPatterns; + + RemoveByPatternProcessor( + String tag, + String description, + @Nullable List<String> fieldPatterns, + @Nullable List<String> excludeFieldPatterns + ) { + super(tag, description); + if (fieldPatterns != null && excludeFieldPatterns != null || fieldPatterns == null && excludeFieldPatterns == null) { + throw new IllegalArgumentException("either fieldPatterns and excludeFieldPatterns must be set"); + } + if (fieldPatterns == null) { + this.fieldPatterns = null; + this.excludeFieldPatterns = new ArrayList<>(excludeFieldPatterns); + } else { + this.fieldPatterns = new ArrayList<>(fieldPatterns); + this.excludeFieldPatterns = null; + } + } + + public List<String> getFieldPatterns() { + return fieldPatterns; + } + + public List<String> getExcludeFieldPatterns() { + return excludeFieldPatterns; + } + + @Override + public IngestDocument execute(IngestDocument document) { + Set<String> existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set<String> metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + + if (fieldPatterns != null && !fieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = fieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (matched) { + document.removeField(field); + } + } + }); + } + + if (excludeFieldPatterns != null && !excludeFieldPatterns.isEmpty()) { + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field)) { + final boolean matched = excludeFieldPatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, field)); + if (!matched) { + document.removeField(field); + } + } + }); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + public Factory() {} + + @Override + public RemoveByPatternProcessor create( + Map<String, Processor.Factory> registry, + String processorTag, + String description, + Map<String, Object> config + ) throws Exception { + final List<String> fieldPatterns = new ArrayList<>(); + final List<String> excludeFieldPatterns = new ArrayList<>(); + final Object fieldPattern = ConfigurationUtils.readOptionalObject(config, "field_pattern"); + final Object excludeFieldPattern = ConfigurationUtils.readOptionalObject(config, "exclude_field_pattern"); + + if (fieldPattern == null && excludeFieldPattern == null || fieldPattern != null && excludeFieldPattern != null) { + throw newConfigurationException( + TYPE, + processorTag, + "field_pattern", + "either field_pattern or exclude_field_pattern must be set" + ); + } + + if (fieldPattern != null) { + if (fieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> fieldPatternList = (List<String>) fieldPattern; + fieldPatterns.addAll(fieldPatternList); + } else { + fieldPatterns.add((String) fieldPattern); + } + validateFieldPatterns(processorTag, fieldPatterns, "field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, fieldPatterns, null); + } else { + if (excludeFieldPattern instanceof List) { + @SuppressWarnings("unchecked") + List<String> excludeFieldPatternList = (List<String>) excludeFieldPattern; + excludeFieldPatterns.addAll(excludeFieldPatternList); + } else { + excludeFieldPatterns.add((String) excludeFieldPattern); + } + validateFieldPatterns(processorTag, excludeFieldPatterns, "exclude_field_pattern"); + return new RemoveByPatternProcessor(processorTag, description, null, excludeFieldPatterns); + } + } + + private void validateFieldPatterns(String processorTag, List<String> patterns, String patternKey) { + List<String> validationErrors = new ArrayList<>(); + for (String fieldPattern : patterns) { + if (fieldPattern.contains("#")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a '#'"); + } + if (fieldPattern.contains(":")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not contain a ':'"); + } + if (fieldPattern.startsWith("_")) { + validationErrors.add(patternKey + " [" + fieldPattern + "] must not start with '_'"); + } + if (Strings.validFileNameExcludingAstrix(fieldPattern) == false) { + validationErrors.add( + patternKey + " [" + fieldPattern + "] must not contain the following characters " + Strings.INVALID_FILENAME_CHARS + ); + } + } + + if (validationErrors.size() > 0) { + ValidationException validationException = new ValidationException(); + validationException.addValidationErrors(validationErrors); + throw newConfigurationException(TYPE, processorTag, patternKey, validationException.getMessage()); + } + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index 5da3b6bea7bc2..e6d151aec9be1 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -32,6 +32,9 @@ package org.opensearch.ingest.common; +import org.opensearch.common.Nullable; +import org.opensearch.core.common.Strings; +import org.opensearch.index.VersionType; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -40,10 +43,15 @@ import org.opensearch.script.TemplateScript; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + /** * Processor that removes existing fields. Nothing happens if the field is not present. */ @@ -52,11 +60,28 @@ public final class RemoveProcessor extends AbstractProcessor { public static final String TYPE = "remove"; private final List<TemplateScript.Factory> fields; + private final List<TemplateScript.Factory> excludeFields; private final boolean ignoreMissing; - RemoveProcessor(String tag, String description, List<TemplateScript.Factory> fields, boolean ignoreMissing) { + RemoveProcessor( + String tag, + String description, + @Nullable List<TemplateScript.Factory> fields, + @Nullable List<TemplateScript.Factory> excludeFields, + boolean ignoreMissing + ) { super(tag, description); - this.fields = new ArrayList<>(fields); + if (fields == null && excludeFields == null || fields != null && excludeFields != null) { + throw new IllegalArgumentException("either fields or excludeFields must be set"); + } + if (fields != null) { + this.fields = new ArrayList<>(fields); + this.excludeFields = null; + } else { + this.fields = null; + this.excludeFields = new ArrayList<>(excludeFields); + } + this.ignoreMissing = ignoreMissing; } @@ -64,18 +89,76 @@ public List<TemplateScript.Factory> getFields() { return fields; } + public List<TemplateScript.Factory> getExcludeFields() { + return excludeFields; + } + @Override public IngestDocument execute(IngestDocument document) { - if (ignoreMissing) { + if (fields != null && !fields.isEmpty()) { fields.forEach(field -> { String path = document.renderTemplate(field); - if (document.hasField(path)) { - document.removeField(path); + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } } + + // cannot remove _index, _version and _version_type. + if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); + } + // removing _id is disallowed when there's an external version specified in the request + if (path.equals(IngestDocument.Metadata.ID.getFieldName()) + && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { + Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); + throw new IllegalArgumentException( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType + ); + } + } + document.removeField(path); }); - } else { - fields.forEach(document::removeField); } + + if (excludeFields != null && !excludeFields.isEmpty()) { + Set<String> excludeFieldSet = new HashSet<>(); + excludeFields.forEach(field -> { + String path = document.renderTemplate(field); + // ignore the empty or null field path + if (!Strings.isNullOrEmpty(path)) { + excludeFieldSet.add(path); + } + }); + + if (!excludeFieldSet.isEmpty()) { + Set<String> existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set<String> metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field) && !excludeFieldSet.contains(field)) { + document.removeField(field); + } + }); + } + } + return document; } @@ -100,20 +183,41 @@ public RemoveProcessor create( Map<String, Object> config ) throws Exception { final List<String> fields = new ArrayList<>(); - final Object field = ConfigurationUtils.readObject(TYPE, processorTag, config, "field"); - if (field instanceof List) { - @SuppressWarnings("unchecked") - List<String> stringList = (List<String>) field; - fields.addAll(stringList); - } else { - fields.add((String) field); + final List<String> excludeFields = new ArrayList<>(); + final Object field = ConfigurationUtils.readOptionalObject(config, "field"); + final Object excludeField = ConfigurationUtils.readOptionalObject(config, "exclude_field"); + + if (field == null && excludeField == null || field != null && excludeField != null) { + throw newConfigurationException(TYPE, processorTag, "field", "either field or exclude_field must be set"); } - final List<TemplateScript.Factory> compiledTemplates = fields.stream() - .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) - .collect(Collectors.toList()); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - return new RemoveProcessor(processorTag, description, compiledTemplates, ignoreMissing); + + if (field != null) { + if (field instanceof List) { + @SuppressWarnings("unchecked") + List<String> stringList = (List<String>) field; + fields.addAll(stringList); + } else { + fields.add((String) field); + } + List<TemplateScript.Factory> fieldCompiledTemplates = fields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, fieldCompiledTemplates, null, ignoreMissing); + } else { + if (excludeField instanceof List) { + @SuppressWarnings("unchecked") + List<String> stringList = (List<String>) excludeField; + excludeFields.addAll(stringList); + } else { + excludeFields.add((String) excludeField); + } + List<TemplateScript.Factory> excludeFieldCompiledTemplates = excludeFields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "exclude_field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, null, excludeFieldCompiledTemplates, ignoreMissing); + } } } } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java index af356eb10d79c..7564bbdf95f45 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -80,9 +81,12 @@ boolean isIgnoreMissing() { @Override public IngestDocument execute(IngestDocument document) { String path = document.renderTemplate(field); - if (document.hasField(path, true) == false) { + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path, true) == false) { if (ignoreMissing) { return document; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); } else { throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java index 5f61091495cd5..d1b4a0961b7bd 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java @@ -33,14 +33,14 @@ package org.opensearch.ingest.common; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; @@ -102,8 +102,11 @@ public IngestDocument execute(IngestDocument document) { } else { ingestScript = precompiledIngestScript; } - ingestScript.execute(document.getSourceAndMetadata()); - CollectionUtils.ensureNoSelfReferences(document.getSourceAndMetadata(), "ingest script"); + IngestDocument mutableDocument = new IngestDocument(document); + ingestScript.execute(mutableDocument.getSourceAndMetadata()); + CollectionUtils.ensureNoSelfReferences(mutableDocument.getSourceAndMetadata(), "ingest script"); + document.getSourceAndMetadata().clear(); + document.getSourceAndMetadata().putAll(mutableDocument.getSourceAndMetadata()); return document; } @@ -137,7 +140,7 @@ public ScriptProcessor create( try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { Script script = Script.parse(parser); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/BytesProcessorTests.java index bbd9ff4c8b912..ce8c182b60a61 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/BytesProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/BytesProcessorTests.java @@ -33,15 +33,14 @@ package org.opensearch.ingest.common; import org.opensearch.OpenSearchException; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.OpenSearchParseException; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; import org.hamcrest.CoreMatchers; -import static org.hamcrest.Matchers.equalTo; - public class BytesProcessorTests extends AbstractStringProcessorTestCase<Long> { private String modifiedInput; @@ -101,14 +100,16 @@ public void testMissingUnits() { assertThat(exception.getMessage(), CoreMatchers.containsString("unit is missing or unrecognized")); } - public void testFractional() throws Exception { + public void testFractional() { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb"); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); - processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1126L)); - assertWarnings( - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [1.1kb] found for setting " + "[Ingest Field]" + OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> processor.execute(ingestDocument)); + assertThat( + e.getMessage(), + CoreMatchers.containsString( + "Fractional bytes values have been deprecated since Legacy 6.2. " + "Use non-fractional bytes values instead:" + ) ); } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java new file mode 100644 index 0000000000000..5edb44b8c64f2 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorFactoryTests.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CommunityIdProcessorFactoryTests extends OpenSearchTestCase { + private CommunityIdProcessor.Factory factory; + + @Before + public void init() { + factory = new CommunityIdProcessor.Factory(); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + int seed = randomIntBetween(0, 65535); + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + config.put("source_port_field", "source_port"); + config.put("destination_ip_field", "destination_ip"); + config.put("destination_port_field", "destination_port"); + config.put("iana_protocol_number_field", "iana_protocol_number"); + config.put("protocol_field", "protocol"); + config.put("icmp_type_field", "icmp_type"); + config.put("icmp_code_field", "icmp_code"); + config.put("seed", seed); + config.put("target_field", "community_id_hash"); + config.put("ignore_missing", ignoreMissing); + String processorTag = randomAlphaOfLength(10); + CommunityIdProcessor communityIDProcessor = factory.create(null, processorTag, null, config); + assertThat(communityIDProcessor.getTag(), equalTo(processorTag)); + assertThat(communityIDProcessor.getSourceIPField(), equalTo("source_ip")); + assertThat(communityIDProcessor.getSourcePortField(), equalTo("source_port")); + assertThat(communityIDProcessor.getDestinationIPField(), equalTo("destination_ip")); + assertThat(communityIDProcessor.getDestinationPortField(), equalTo("destination_port")); + assertThat(communityIDProcessor.getIANAProtocolNumberField(), equalTo("iana_protocol_number")); + assertThat(communityIDProcessor.getProtocolField(), equalTo("protocol")); + assertThat(communityIDProcessor.getIcmpTypeField(), equalTo("icmp_type")); + assertThat(communityIDProcessor.getIcmpCodeField(), equalTo("icmp_code")); + assertThat(communityIDProcessor.getSeed(), equalTo(seed)); + assertThat(communityIDProcessor.getTargetField(), equalTo("community_id_hash")); + assertThat(communityIDProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + } + + public void testCreateWithSourceIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + + config.put("source_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_ip_field] required property is missing")); + } + } + + public void testCreateWithDestinationIPField() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("source_ip_field", "source_ip"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[destination_ip_field] required property is missing")); + } + } + + public void testInvalidSeed() throws Exception { + Map<String, Object> config = new HashMap<>(); + int seed; + if (randomBoolean()) { + seed = -1; + } else { + seed = 65536; + } + config.put("source_ip_field", "source_ip"); + config.put("destination_ip_field", "destination_ip"); + config.put("seed", seed); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchException e) { + assertThat(e.getMessage(), equalTo("[seed] seed must be between 0 and 65535")); + } + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java new file mode 100644 index 0000000000000..2bda9db80dbcc --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CommunityIdProcessorTests.java @@ -0,0 +1,910 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CommunityIdProcessorTests extends OpenSearchTestCase { + + public void testResolveProtocol() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "cannot resolve protocol by neither iana protocol number field [iana_protocol_number] nor protocol name field [protocol]", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + String protocol = randomAlphaOfLength(10); + source.put("protocol", protocol); + IngestDocument ingestDocumentWithProtocol = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithProtocol = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported protocol [" + protocol + "]", + IllegalArgumentException.class, + () -> processorWithProtocol.execute(ingestDocumentWithProtocol) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + int ianaProtocolNumber = randomIntBetween(1000, 10000); + source.put("iana_protocol_number", ianaProtocolNumber); + IngestDocument ingestDocumentWithProtocolNumber = RandomDocumentPicks.randomIngestDocument(random(), source); + + Processor processorWithProtocolNumber = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "unsupported iana protocol number [" + ianaProtocolNumber + "]", + IllegalArgumentException.class, + () -> processorWithProtocolNumber.execute(ingestDocumentWithProtocolNumber) + ); + } + + public void testResolveIPAndPort() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", ""); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [source_ip] is null or empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourceIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourceIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + assertThrows( + "ip address in the field [source_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidSourceIP.execute(ingestDocumentWithInvalidSourceIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", ""); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP); + assertThat(ingestDocumentWithEmptyDestIP.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "ip address in the field [destination_ip] is null or empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIP.execute(ingestDocumentWithEmptyDestIP) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestIP = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestIP = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "ip address in the field [destination_ip] is not a valid ipv4/ipv6 address", + IllegalArgumentException.class, + () -> processorWithInvalidDestIP.execute(ingestDocumentWithInvalidDestIP) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument normalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourceIPFieldPath = createCommunityIdProcessor( + "", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourceIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourceIPFieldPath.execute(normalIngestDocument) + ); + } + ignore_missing = randomBoolean(); + Processor processorWithEmptyDestIPFieldPath = createCommunityIdProcessor( + "source_ip", + "source_port", + "", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestIPFieldPath.execute(normalIngestDocument); + assertThat(normalIngestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source ip field path and destination ip field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyDestIPFieldPath.execute(normalIngestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", null); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptySourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptySourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort); + assertThat(ingestDocumentWithEmptySourcePort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptySourcePort.execute(ingestDocumentWithEmptySourcePort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 65536); + source.put("destination_port", 2000); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidSourcePort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidSourcePort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port must be between 0 and 65535, but port in the field path [source_port] is [65536]", + IllegalArgumentException.class, + () -> processorWithInvalidSourcePort.execute(ingestDocumentWithInvalidSourcePort) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", null); + source.put("protocol", "tcp"); + ignore_missing = randomBoolean(); + IngestDocument ingestDocumentWithEmptyDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithEmptyDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignore_missing + ); + if (ignore_missing) { + processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort); + assertThat(ingestDocumentWithEmptyDestPort.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is null", + IllegalArgumentException.class, + () -> processorWithEmptyDestPort.execute(ingestDocumentWithEmptyDestPort) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", -1); + source.put("protocol", "tcp"); + IngestDocument ingestDocumentWithInvalidDestPort = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidDestPort = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "both source port and destination port cannot be null, but port in the field path [destination_port] is [-1]", + IllegalArgumentException.class, + () -> processorWithInvalidDestPort.execute(ingestDocumentWithInvalidDestPort) + ); + } + + public void testResolveICMPTypeAndCode() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + int protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + String targetFieldName = randomAlphaOfLength(100); + boolean ignoreMissing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + source.put("icmp_type", null); + IngestDocument ingestDocumentWithNullType = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullType.execute(ingestDocumentWithNullType); + assertThat(ingestDocumentWithNullType.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message type cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullType.execute(ingestDocumentWithNullType) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + int icmpType; + if (randomBoolean()) { + icmpType = randomIntBetween(256, 1000); + } else { + icmpType = randomIntBetween(-100, -1); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithInvalidICMPType = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidICMPType = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + false + ); + assertThrows( + "invalid icmp message type [" + icmpType + "]", + IllegalArgumentException.class, + () -> processorWithInvalidICMPType.execute(ingestDocumentWithInvalidICMPType) + ); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + IngestDocument ingestDocumentWithNoCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNoCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + null, + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNoCode.execute(ingestDocumentWithNoCode); + assertThat(ingestDocumentWithNoCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNoCode.execute(ingestDocumentWithNoCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + source.put("icmp_code", null); + IngestDocument ingestDocumentWithNullCode = RandomDocumentPicks.randomIngestDocument(random(), source); + ignoreMissing = randomBoolean(); + Processor processorWithNullCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + "protocol", + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + ignoreMissing + ); + if (ignoreMissing) { + processorWithNullCode.execute(ingestDocumentWithNullCode); + assertThat(ingestDocumentWithNullCode.hasField(targetFieldName), equalTo(false)); + } else { + assertThrows( + "icmp message code cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithNullCode.execute(ingestDocumentWithNullCode) + ); + } + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + protocolNumber = randomFrom(1, 58); + source.put("iana_protocol_number", protocolNumber); + if (protocolNumber == 1) { + icmpType = randomIntBetween(3, 6); + } else { + icmpType = randomIntBetween(146, 161); + } + source.put("icmp_type", icmpType); + int icmpCode; + if (randomBoolean()) { + icmpCode = randomIntBetween(256, 1000); + } else { + icmpCode = randomIntBetween(-100, -1); + } + source.put("icmp_code", icmpCode); + IngestDocument ingestDocumentWithInvalidCode = RandomDocumentPicks.randomIngestDocument(random(), source); + Processor processorWithInvalidCode = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_protocol_number", + null, + "icmp_type", + "icmp_code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + assertThrows( + "invalid icmp message code [" + icmpCode + "]", + IllegalArgumentException.class, + () -> processorWithInvalidCode.execute(ingestDocumentWithInvalidCode) + ); + } + + public void testTransportProtocols() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + source.put("source_port", 1000); + source.put("destination_port", 2000); + boolean isProtocolNameSpecified = randomBoolean(); + if (isProtocolNameSpecified) { + source.put("protocol", randomFrom("tcp", "udp", "sctp")); + } else { + source.put("iana_number", randomFrom(6, 17, 132)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor; + if (isProtocolNameSpecified) { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + null, + "protocol", + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } else { + processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + } + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + String communityIDHash = ingestDocument.getFieldValue(targetFieldName, String.class); + assertThat(communityIDHash.startsWith("1:"), equalTo(true)); + } + + public void testICMP() throws Exception { + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + boolean isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + source.put("type", randomFrom(0, 8, 9, 10, 13, 15, 17, 18)); + } else { + source.put("protocol", "ipv6-icmp"); + source.put("type", randomFrom(128, 129, 130, 131, 133, 134, 135, 136, 139, 140, 144, 145)); + } + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + null, + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + + source = new HashMap<>(); + source.put("source_ip", "1.1.1.1"); + source.put("destination_ip", "2.2.2.2"); + isICMP = randomBoolean(); + if (isICMP) { + source.put("protocol", "icmp"); + // see https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml#icmp-parameters-codes-5 + source.put("type", randomIntBetween(3, 6)); + source.put("code", 0); + } else { + source.put("protocol", "ipv6-icmp"); + // see https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xhtml#icmpv6-parameters-codes-23 + source.put("type", randomIntBetween(146, 161)); + source.put("code", 0); + } + + IngestDocument ingestDocumentWithOnewayFlow = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithOnewayFlow = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + null, + "protocol", + "type", + "code", + randomIntBetween(0, 65535), + targetFieldName, + randomBoolean() + ); + + processorWithOnewayFlow.execute(ingestDocumentWithOnewayFlow); + assertThat(ingestDocumentWithOnewayFlow.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithOnewayFlow.getFieldValue(targetFieldName, String.class).startsWith("1:"), equalTo(true)); + } + + // test that the hash result is consistent with the known value + public void testHashResult() throws Exception { + int index = randomIntBetween(0, CommunityIdHashInstance.values().length - 1); + CommunityIdHashInstance instance = CommunityIdHashInstance.values()[index]; + final boolean isTransportProtocol = instance.name().equals("TCP") + || instance.name().equals("UDP") + || instance.name().equals("SCTP"); + Map<String, Object> source = new HashMap<>(); + source.put("source_ip", instance.getSourceIp()); + source.put("destination_ip", instance.getDestIP()); + if (isTransportProtocol) { + source.put("source_port", instance.getSourcePort()); + source.put("destination_port", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + + // test the flow tuple in reversed direction, the hash result should be the same value + source = new HashMap<>(); + source.put("source_ip", instance.getDestIP()); + source.put("destination_ip", instance.getSourceIp()); + source.put("source_port", instance.getDestPort()); + source.put("destination_port", instance.getSourcePort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocumentWithReversedDirection = RandomDocumentPicks.randomIngestDocument(random(), source); + + targetFieldName = randomAlphaOfLength(100); + Processor processorWithReversedDirection = createCommunityIdProcessor( + "source_ip", + "source_port", + "destination_ip", + "destination_port", + "iana_number", + null, + null, + null, + 0, + targetFieldName, + randomBoolean() + ); + + processorWithReversedDirection.execute(ingestDocumentWithReversedDirection); + assertThat(ingestDocumentWithReversedDirection.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocumentWithReversedDirection.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } else { + source.put("type", instance.getSourcePort()); + source.put("code", instance.getDestPort()); + source.put("iana_number", instance.getProtocolNumber()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); + + String targetFieldName = randomAlphaOfLength(100); + boolean ignore_missing = randomBoolean(); + Processor processor = createCommunityIdProcessor( + "source_ip", + null, + "destination_ip", + null, + "iana_number", + null, + "type", + "code", + 0, + targetFieldName, + ignore_missing + ); + + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(instance.getHash())); + } + } + + private enum CommunityIdHashInstance { + TCP("66.35.250.204", "128.232.110.120", 6, 80, 34855, "1:LQU9qZlK+B5F3KDmev6m5PMibrg="), + UDP("8.8.8.8", "192.168.1.52", 17, 53, 54585, "1:d/FP5EW3wiY1vCndhwleRRKHowQ="), + SCTP("192.168.170.8", "192.168.170.56", 132, 7, 7, "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs="), + ICMP("192.168.0.89", "192.168.0.1", 1, 8, 0, "1:X0snYXpgwiv9TZtqg64sgzUn6Dk="), + ICMP_V6("fe80::260:97ff:fe07:69ea", "ff02::1", 58, 134, 0, "1:pkvHqCL88/tg1k4cPigmZXUtL00="); + + private final String sourceIp; + private final String destIP; + private final int protocolNumber; + private final int sourcePort; + private final int destPort; + private final String hash; + + CommunityIdHashInstance(String sourceIp, String destIP, int protocolNumber, int sourcePort, int destPort, String hash) { + this.sourceIp = sourceIp; + this.destIP = destIP; + this.protocolNumber = protocolNumber; + this.sourcePort = sourcePort; + this.destPort = destPort; + this.hash = hash; + } + + private String getSourceIp() { + return this.sourceIp; + } + + private String getDestIP() { + return this.destIP; + } + + private int getProtocolNumber() { + return this.protocolNumber; + } + + private int getSourcePort() { + return this.sourcePort; + } + + private int getDestPort() { + return this.destPort; + } + + private String getHash() { + return this.hash; + } + } + + private static Processor createCommunityIdProcessor( + String sourceIPField, + String sourcePortField, + String destinationIPField, + String destinationPortField, + String ianaProtocolNumberField, + String protocolField, + String icmpTypeField, + String icmpCodeField, + int seed, + String targetField, + boolean ignoreMissing + ) { + return new CommunityIdProcessor( + randomAlphaOfLength(10), + null, + sourceIPField, + sourcePortField, + destinationIPField, + destinationPortField, + ianaProtocolNumberField, + protocolField, + icmpTypeField, + icmpCodeField, + seed, + targetField, + ignoreMissing + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java index 6eed29e330f2c..0ba0a39261d00 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java @@ -32,6 +32,11 @@ package org.opensearch.ingest.common; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -39,11 +44,6 @@ import java.util.Locale; import java.util.Map; -import org.opensearch.ingest.IngestDocument; -import org.opensearch.ingest.Processor; -import org.opensearch.ingest.RandomDocumentPicks; -import org.opensearch.test.OpenSearchTestCase; - import static org.opensearch.ingest.IngestDocumentMatcher.assertIngestDocument; import static org.opensearch.ingest.common.ConvertProcessor.Type; import static org.hamcrest.Matchers.containsString; diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java new file mode 100644 index 0000000000000..c1ca86a49e334 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CopyProcessorFactoryTests extends OpenSearchTestCase { + + private CopyProcessor.Factory factory; + + @Before + public void init() { + factory = new CopyProcessor.Factory(TestTemplateService.instance()); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + boolean removeSource = randomBoolean(); + boolean overrideTarget = randomBoolean(); + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "source"); + config.put("target_field", "target"); + config.put("ignore_missing", ignoreMissing); + config.put("remove_source", removeSource); + config.put("override_target", overrideTarget); + String processorTag = randomAlphaOfLength(10); + CopyProcessor copyProcessor = factory.create(null, processorTag, null, config); + assertThat(copyProcessor.getTag(), equalTo(processorTag)); + assertThat(copyProcessor.getSourceField().newInstance(Collections.emptyMap()).execute(), equalTo("source")); + assertThat(copyProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); + assertThat(copyProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + assertThat(copyProcessor.isRemoveSource(), equalTo(removeSource)); + assertThat(copyProcessor.isOverrideTarget(), equalTo(overrideTarget)); + } + + public void testCreateWithSourceField() throws Exception { + Map<String, Object> config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + + config.put("source_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + } + + public void testCreateWithTargetField() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "source"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + + config.put("source_field", "source"); + config.put("target_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + } + + public void testInvalidMustacheTemplate() throws Exception { + CopyProcessor.Factory factory = new CopyProcessor.Factory(TestTemplateService.instance(true)); + Map<String, Object> config = new HashMap<>(); + config.put("source_field", "{{source}}"); + config.put("target_field", "target"); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows(OpenSearchException.class, () -> factory.create(null, processorTag, null, config)); + assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); + assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java new file mode 100644 index 0000000000000..3259ba85ef340 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -0,0 +1,145 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CopyProcessorTests extends OpenSearchTestCase { + + public void testCopyExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + + Processor processorWithEmptyTarget = createCopyProcessor(sourceFieldName, "", false, false, false); + assertThrows( + "target field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyTarget.execute(ingestDocument) + ); + + Processor processorWithSameSourceAndTarget = createCopyProcessor(sourceFieldName, sourceFieldName, false, false, false); + assertThrows( + "source field path and target field path cannot be same", + IllegalArgumentException.class, + () -> processorWithSameSourceAndTarget.execute(ingestDocument) + ); + } + + public void testCopyWithIgnoreMissing() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor("non-existing-field", targetFieldName, false, false, false); + assertThrows( + "source field [non-existing-field] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + Processor processorWithEmptyFieldName = createCopyProcessor("", targetFieldName, false, false, false); + assertThrows( + "source field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyFieldName.execute(ingestDocument) + ); + + Processor processorWithIgnoreMissing = createCopyProcessor("non-existing-field", targetFieldName, true, false, false); + processorWithIgnoreMissing.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } + + public void testCopyWithRemoveSource() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + assertThat(ingestDocument.hasField(sourceFieldName), equalTo(false)); + } + + public void testCopyToExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + Object sourceValue = RandomDocumentPicks.randomFieldValue(random()); + String sourceFieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, sourceValue); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + assertThrows( + "target field [" + targetFieldName + "] already exists", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + // if override_target is false but target field's value is null, copy can execute successfully + String targetFieldWithNullValue = RandomDocumentPicks.addRandomField(random(), ingestDocument, null); + Processor processorWithTargetNullValue = createCopyProcessor(sourceFieldName, targetFieldWithNullValue, false, false, false); + processorWithTargetNullValue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldWithNullValue), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldWithNullValue, Object.class), sourceValue); + + Processor processorWithOverrideTargetIsTrue = createCopyProcessor(sourceFieldName, targetFieldName, false, false, true); + processorWithOverrideTargetIsTrue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertDeepCopiedObjectEquals(ingestDocument.getFieldValue(targetFieldName, Object.class), sourceValue); + } + + @SuppressWarnings("unchecked") + private static void assertDeepCopiedObjectEquals(Object expected, Object actual) { + if (expected instanceof Map) { + Map<String, Object> expectedMap = (Map<String, Object>) expected; + Map<String, Object> actualMap = (Map<String, Object>) actual; + assertEquals(expectedMap.size(), actualMap.size()); + for (Map.Entry<String, Object> expectedEntry : expectedMap.entrySet()) { + assertDeepCopiedObjectEquals(expectedEntry.getValue(), actualMap.get(expectedEntry.getKey())); + } + } else if (expected instanceof List) { + assertArrayEquals(((List<?>) expected).toArray(), ((List<?>) actual).toArray()); + } else if (expected instanceof byte[]) { + assertArrayEquals((byte[]) expected, (byte[]) actual); + } else { + assertEquals(expected, actual); + } + } + + private static Processor createCopyProcessor( + String sourceFieldName, + String targetFieldName, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + return new CopyProcessor( + randomAlphaOfLength(10), + null, + new TestTemplateService.MockTemplateScript.Factory(sourceFieldName), + new TestTemplateService.MockTemplateScript.Factory(targetFieldName), + ignoreMissing, + removeSource, + overrideTarget + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CsvProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CsvProcessorTests.java index 1359750dc16ea..650104ac3e9a0 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CsvProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CsvProcessorTests.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.test.OpenSearchTestCase; diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java index ca0c0df40f009..e42a1147825d1 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/DissectProcessorTests.java @@ -155,4 +155,28 @@ public void testNullValueWithOutIgnoreMissing() { IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); } + + public void testMatchEmptyBrackets() { + IngestDocument ingestDocument = new IngestDocument( + "_index", + "_id", + null, + null, + null, + Collections.singletonMap("message", "[foo],[bar],[]") + ); + DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "[%{a}],[%{b}],[%{c}]", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("foo", ingestDocument.getFieldValue("a", String.class)); + assertEquals("bar", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + + ingestDocument = new IngestDocument("_index", "_id", null, null, null, Collections.singletonMap("message", "{}{}{}{baz}")); + dissectProcessor = new DissectProcessor("", null, "message", "{%{a}}{%{b}}{%{c}}{%{d}}", "", true); + dissectProcessor.execute(ingestDocument); + assertEquals("", ingestDocument.getFieldValue("a", String.class)); + assertEquals("", ingestDocument.getFieldValue("b", String.class)); + assertEquals("", ingestDocument.getFieldValue("c", String.class)); + assertEquals("baz", ingestDocument.getFieldValue("d", String.class)); + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java index a76c194037224..f8c760d48920f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/GrokProcessorGetActionTests.java @@ -32,15 +32,15 @@ package org.opensearch.ingest.common; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.TransportService; diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/JsonProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/JsonProcessorTests.java index 531e22a386236..bde5dcd90e951 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/JsonProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/JsonProcessorTests.java @@ -32,11 +32,11 @@ package org.opensearch.ingest.common; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.test.OpenSearchTestCase; @@ -61,7 +61,7 @@ public void testExecute() throws Exception { Map<String, Object> randomJsonMap = RandomDocumentPicks.randomSource(random()); XContentBuilder builder = JsonXContent.contentBuilder().map(randomJsonMap); - String randomJson = XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON); + String randomJson = XContentHelper.convertToJson(BytesReference.bytes(builder), false, MediaTypeRegistry.JSON); document.put(randomField, randomJson); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java index 62060a682c0cb..78972ff8d5dea 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorFactoryTests.java @@ -35,7 +35,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; import org.opensearch.common.util.set.Sets; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -48,8 +50,14 @@ public class KeyValueProcessorFactoryTests extends OpenSearchTestCase { + private KeyValueProcessor.Factory factory; + + @Before + public void init() { + factory = new KeyValueProcessor.Factory(TestTemplateService.instance()); + } + public void testCreateWithDefaults() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map<String, Object> config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -57,7 +65,7 @@ public void testCreateWithDefaults() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), is(nullValue())); @@ -66,7 +74,6 @@ public void testCreateWithDefaults() throws Exception { } public void testCreateWithAllFieldsSet() throws Exception { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map<String, Object> config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); @@ -78,17 +85,16 @@ public void testCreateWithAllFieldsSet() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField(), equalTo("field1")); + assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), equalTo(Sets.newHashSet("a", "b"))); assertThat(processor.getExcludeKeys(), equalTo(Collections.emptySet())); - assertThat(processor.getTargetField(), equalTo("target")); + assertThat(processor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); assertTrue(processor.isIgnoreMissing()); } public void testCreateWithMissingField() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map<String, Object> config = new HashMap<>(); String processorTag = randomAlphaOfLength(10); OpenSearchException exception = expectThrows( @@ -99,7 +105,6 @@ public void testCreateWithMissingField() { } public void testCreateWithMissingFieldSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map<String, Object> config = new HashMap<>(); config.put("field", "field1"); String processorTag = randomAlphaOfLength(10); @@ -111,7 +116,6 @@ public void testCreateWithMissingFieldSplit() { } public void testCreateWithMissingValueSplit() { - KeyValueProcessor.Factory factory = new KeyValueProcessor.Factory(); Map<String, Object> config = new HashMap<>(); config.put("field", "field1"); config.put("field_split", "&"); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java index 685a78e2e769b..5f71ea6f16a4f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/KeyValueProcessorTests.java @@ -36,6 +36,7 @@ import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -51,7 +52,7 @@ public class KeyValueProcessorTests extends OpenSearchTestCase { - private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(); + private static final KeyValueProcessor.Factory FACTORY = new KeyValueProcessor.Factory(TestTemplateService.instance()); public void test() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); @@ -123,7 +124,12 @@ public void testMissingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); Processor processor = createKvProcessor("unknown", "&", "=", null, null, "target", false); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), equalTo("field [unknown] not present as part of path [unknown]")); + assertThat(exception.getMessage(), equalTo("field [unknown] doesn't exist")); + + // when using template snippet, the resolved field path maybe empty + Processor processorWithEmptyFieldPath = createKvProcessor("", "&", "=", null, null, "target", false); + exception = expectThrows(IllegalArgumentException.class, () -> processorWithEmptyFieldPath.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("field path cannot be null nor empty")); } public void testNullValueWithIgnoreMissing() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java new file mode 100644 index 0000000000000..09ba97ebb4595 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorFactoryTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class RemoveByPatternProcessorFactoryTests extends OpenSearchTestCase { + + private RemoveByPatternProcessor.Factory factory; + + @Before + public void init() { + factory = new RemoveByPatternProcessor.Factory(); + } + + public void testCreateFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[field_pattern] Validation Failed: " + + "1: field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: field_pattern [#] must not contain a '#';" + + "4: field_pattern [:] must not contain a ':';" + + "5: field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreateExcludeFieldPatterns() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("exclude_field_pattern", "field1*"); + String processorTag = randomAlphaOfLength(10); + RemoveByPatternProcessor removeByPatternProcessor = factory.create(null, processorTag, null, config); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("exclude_field_pattern", List.of("field1*", "field2*")); + removeByPatternProcessor = factory.create(null, processorTag, null, config2); + assertThat(removeByPatternProcessor.getTag(), equalTo(processorTag)); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(0), equalTo("field1*")); + assertThat(removeByPatternProcessor.getExcludeFieldPatterns().get(1), equalTo("field2*")); + + Map<String, Object> config3 = new HashMap<>(); + List<String> patterns = Arrays.asList("foo*", "*", " ", ",", "#", ":", "_"); + config3.put("exclude_field_pattern", patterns); + Exception exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config3)); + assertThat( + exception.getMessage(), + equalTo( + "[exclude_field_pattern] Validation Failed: " + + "1: exclude_field_pattern [ ] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "2: exclude_field_pattern [,] must not contain the following characters [ , \", *, \\, <, |, ,, >, /, ?];" + + "3: exclude_field_pattern [#] must not contain a '#';" + + "4: exclude_field_pattern [:] must not contain a ':';" + + "5: exclude_field_pattern [_] must not start with '_';" + ) + ); + } + + public void testCreatePatternsFailed() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field_pattern", List.of("foo*")); + config.put("exclude_field_pattern", List.of("bar*")); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field_pattern", null); + config2.put("exclude_field_pattern", null); + + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field_pattern] either field_pattern or exclude_field_pattern must be set")); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java new file mode 100644 index 0000000000000..82ff93de1f44e --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveByPatternProcessorTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RemoveByPatternProcessorTests extends OpenSearchTestCase { + + public void testRemoveWithFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("bar_1", "value"); + ingestDocument.setFieldValue("bar_2", "value"); + List<String> fieldPatterns = new ArrayList<>(); + fieldPatterns.add("foo*"); + fieldPatterns.add("_index*"); + fieldPatterns.add("_id*"); + fieldPatterns.add("_version*"); + Processor processor = new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, null); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("bar_1"), equalTo(true)); + assertThat(ingestDocument.hasField("bar_2"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testRemoveWithExcludeFieldPatterns() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List<String> excludeFieldPatterns = new ArrayList<>(); + excludeFieldPatterns.add("foo_3*"); + Processor processorWithExcludeFieldsAndPatterns = new RemoveByPatternProcessor( + randomAlphaOfLength(10), + null, + null, + excludeFieldPatterns + ); + processorWithExcludeFieldsAndPatterns.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(false)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.INDEX.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION.getFieldName()), equalTo(true)); + assertThat(ingestDocument.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName()), equalTo(true)); + } + + public void testCreateRemoveByPatternProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, null, null) + ); + + final List<String> fieldPatterns; + if (randomBoolean()) { + fieldPatterns = new ArrayList<>(); + } else { + fieldPatterns = List.of("foo_1*"); + } + + final List<String> excludeFieldPatterns; + if (randomBoolean()) { + excludeFieldPatterns = new ArrayList<>(); + } else { + excludeFieldPatterns = List.of("foo_2*"); + } + + assertThrows( + "either fieldPatterns and excludeFieldPatterns must be set", + IllegalArgumentException.class, + () -> new RemoveByPatternProcessor(randomAlphaOfLength(10), null, fieldPatterns, excludeFieldPatterns) + ); + } +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java index 66ca888a0d39f..6332eeafc387c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java @@ -41,6 +41,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -79,16 +80,6 @@ public void testCreateMultipleFields() throws Exception { ); } - public void testCreateMissingField() throws Exception { - Map<String, Object> config = new HashMap<>(); - try { - factory.create(null, null, null, config); - fail("factory create should have failed"); - } catch (OpenSearchParseException e) { - assertThat(e.getMessage(), equalTo("[field] required property is missing")); - } - } - public void testInvalidMustacheTemplate() throws Exception { RemoveProcessor.Factory factory = new RemoveProcessor.Factory(TestTemplateService.instance(true)); Map<String, Object> config = new HashMap<>(); @@ -98,4 +89,31 @@ public void testInvalidMustacheTemplate() throws Exception { assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); } + + public void testCreateWithExcludeField() throws Exception { + Map<String, Object> config = new HashMap<>(); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); + + Map<String, Object> config2 = new HashMap<>(); + config2.put("field", "field1"); + config2.put("exclude_field", "field2"); + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field] either field or exclude_field must be set")); + + Map<String, Object> config6 = new HashMap<>(); + config6.put("exclude_field", "exclude_field"); + RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config6); + assertThat( + removeProcessor.getExcludeFields() + .stream() + .map(template -> template.newInstance(Collections.emptyMap()).execute()) + .collect(Collectors.toList()), + equalTo(List.of("exclude_field")) + ); + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index cf65236157111..7fc1d3f2f0a3c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -32,34 +32,55 @@ package org.opensearch.ingest.common; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.index.VersionType; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.ingest.TestTemplateService; +import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class RemoveProcessorTests extends OpenSearchTestCase { public void testRemoveFields() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); - String field = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String field = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10)); Processor processor = new RemoveProcessor( randomAlphaOfLength(10), null, Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory(field)), + null, false ); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(field), equalTo(false)); } + public void testRemoveByExcludeFields() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List<TemplateScript.Factory> excludeFields = new ArrayList<>(); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + Processor processor = new RemoveProcessor(randomAlphaOfLength(10), null, null, excludeFields, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(false)); + } + public void testRemoveNonExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); @@ -67,12 +88,44 @@ public void testRemoveNonExistingField() throws Exception { config.put("field", fieldName); String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); - try { - processor.execute(ingestDocument); - fail("remove field should have failed"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]")); - } + assertThrows( + "field [" + fieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> { processor.execute(ingestDocument); } + ); + + Map<String, Object> configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + configWithEmptyField + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); + } + + public void testRemoveEmptyField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map<String, Object> config = new HashMap<>(); + config.put("field", ""); + String processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); } public void testIgnoreMissing() throws Exception { @@ -84,5 +137,191 @@ public void testIgnoreMissing() throws Exception { String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); processor.execute(ingestDocument); + + // when using template snippet, the resolved field path maybe empty + Map<String, Object> configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + configWithEmptyField.put("ignore_missing", true); + processorTag = randomAlphaOfLength(10); + processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, configWithEmptyField); + processor.execute(ingestDocument); + } + + public void testRemoveMetadataField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + List<String> metadataFields = ingestDocument.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toList()); + + for (String metadataFieldName : metadataFields) { + Map<String, Object> config = new HashMap<>(); + config.put("field", metadataFieldName); + String processorTag = randomAlphaOfLength(10); + Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); + // _if_seq_no and _if_primary_term do not exist in the enriched document, removing them will throw IllegalArgumentException + if (metadataFieldName.equals(IngestDocument.Metadata.IF_SEQ_NO.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.IF_PRIMARY_TERM.getFieldName())) { + assertThrows( + "field: [" + metadataFieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else if (metadataFieldName.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || metadataFieldName.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + // _index, _version and _version_type cannot be removed + assertThrows( + "cannot remove metadata field [" + metadataFieldName + "]", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else if (metadataFieldName.equals(IngestDocument.Metadata.ID.getFieldName())) { + Long version = ingestDocument.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class); + String versionType = ingestDocument.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!versionType.equals(VersionType.toString(VersionType.INTERNAL))) { + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType, + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + } else { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(metadataFieldName), equalTo(false)); + } + } else if (metadataFieldName.equals(IngestDocument.Metadata.ROUTING.getFieldName()) + && ingestDocument.hasField(IngestDocument.Metadata.ROUTING.getFieldName())) { + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(metadataFieldName), equalTo(false)); + } + } + } + + public void testCreateRemoveProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "either fields or excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, null, null, false) + ); + + final List<TemplateScript.Factory> fields; + if (randomBoolean()) { + fields = new ArrayList<>(); + } else { + fields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + } + + final List<TemplateScript.Factory> excludeFields; + if (randomBoolean()) { + excludeFields = new ArrayList<>(); + } else { + excludeFields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + } + + assertThrows( + "either fields or excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, fields, excludeFields, false) + ); + } + + public void testRemoveDocumentId() throws Exception { + Map<String, Object> config = new HashMap<>(); + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + String processorTag = randomAlphaOfLength(10); + + // test remove _id when _version_type is external + IngestDocument ingestDocumentWithExternalVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + 1L, + VersionType.EXTERNAL, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForExternalVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + 1 + + ", version_type: " + + VersionType.EXTERNAL, + IllegalArgumentException.class, + () -> processorForExternalVersionType.execute(ingestDocumentWithExternalVersionType) + ); + + // test remove _id when _version_type is external_gte + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithExternalGTEVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + 1L, + VersionType.EXTERNAL_GTE, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForExternalGTEVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + 1 + + ", version_type: " + + VersionType.EXTERNAL_GTE, + IllegalArgumentException.class, + () -> processorForExternalGTEVersionType.execute(ingestDocumentWithExternalGTEVersionType) + ); + + // test remove _id when _version_type is internal + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithInternalVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + Versions.MATCH_ANY, + VersionType.INTERNAL, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForInternalVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + processorForInternalVersionType.execute(ingestDocumentWithInternalVersionType); + assertThat(ingestDocumentWithInternalVersionType.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(false)); + + // test remove _id when _version_type is null + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithNoVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + null, + null, + RandomDocumentPicks.randomSource(random()) + ); + Processor processorForNullVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + processorForNullVersionType.execute(ingestDocumentWithNoVersionType); + assertThat(ingestDocumentWithNoVersionType.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(false)); } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java index fc95693024cb0..a600464371af8 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java @@ -112,6 +112,15 @@ public void testRenameNonExistingField() throws Exception { } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("field [" + fieldName + "] doesn't exist")); } + + // when using template snippet, the resolved field path maybe empty + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), false); + try { + processor.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field path cannot be null nor empty")); + } } public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { @@ -121,6 +130,11 @@ public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), true); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); + + // when using template snippet, the resolved field path maybe empty + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), true); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); } public void testRenameNewFieldAlreadyExists() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java index 96d9be75c4ab7..e900458e361ce 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java @@ -105,4 +105,16 @@ private void assertIngestDocument(IngestDocument ingestDocument) { int bytesTotal = ingestDocument.getFieldValue("bytes_in", Integer.class) + ingestDocument.getFieldValue("bytes_out", Integer.class); assertThat(ingestDocument.getSourceAndMetadata().get("bytes_total"), is(bytesTotal)); } + + public void testScriptingWithSelfReferencingSourceMetadata() { + ScriptProcessor processor = new ScriptProcessor(randomAlphaOfLength(10), null, script, null, scriptService); + IngestDocument originalIngestDocument = randomDocument(); + String index = originalIngestDocument.getSourceAndMetadata().get(IngestDocument.Metadata.INDEX.getFieldName()).toString(); + String id = originalIngestDocument.getSourceAndMetadata().get(IngestDocument.Metadata.ID.getFieldName()).toString(); + Map<String, Object> sourceMetadata = originalIngestDocument.getSourceAndMetadata(); + originalIngestDocument.getSourceAndMetadata().put("_source", sourceMetadata); + IngestDocument ingestDocument = new IngestDocument(index, id, null, null, null, originalIngestDocument.getSourceAndMetadata()); + expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + } + } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml index f44cc1f9f9fcf..2a816f0386667 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml @@ -36,3 +36,53 @@ - contains: { nodes.$cluster_manager.ingest.processors: { type: split } } - contains: { nodes.$cluster_manager.ingest.processors: { type: trim } } - contains: { nodes.$cluster_manager.ingest.processors: { type: uppercase } } + +--- +"Copy processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "copy processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: copy } } + +--- +"Remove_by_pattern processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "remove_by_pattern processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: remove_by_pattern } } + +--- +"Community_id processor exists": + - skip: + version: " - 2.12.99" + features: contains + reason: "community_id processor was introduced in 2.13.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + - contains: { nodes.$cluster_manager.ingest.processors: { type: community_id } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml index 836243652b2e0..30a0a520b5c40 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/150_kv.yml @@ -39,3 +39,151 @@ teardown: id: 1 - match: { _source.goodbye: "everybody" } - match: { _source.hello: "world" } + +--- +"Test KV Processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source.zoo.goodbye: "everybody" } + - match: { _source.zoo.hello: "world" } + +--- +"Test KV Processor with non-existing field and without ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[unknown\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "unknown", + target: "zoo", + foo: "goodbye=everybody hello=world" + } + +--- +"Test KV Processor with non-existing field and ignore_missing": + - skip: + version: " - 2.11.99" + reason: "KV Processor with template snippets is only supported since 2.12.0" + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "kv" : { + "field" : "{{source}}", + "target_field" : "{{target}}", + "field_split": " ", + "value_split": "=", + "ignore_missing": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "zoo", + foo: "goodbye=everybody hello=world" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { target: "zoo", foo: "goodbye=everybody hello=world"}} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml index 3230fb37b43f7..a66f02d6b6a6d 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml @@ -202,3 +202,79 @@ teardown: id: 1 - match: { _source.source_field: "foo%20bar" } - match: { _source.target_field: "foo bar" } + +--- +"Test self referencing source with ignore failure": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "script" : { + "lang": "painless", + "source" : "ctx.foo['foo']=ctx.foo;ctx['test-field']='test-value'", + "ignore_failure": true + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.source_field)" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {source_field: "fooBar", foo: {foo: "bar"}} + + - do: + get: + index: test + id: 1 + - match: { _source.source_field: "fooBar" } + - match: { _source.target_field: "FOOBAR"} + - match: { _source.test-field: null} + +--- +"Test self referencing source without ignoring failure": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "script" : { + "lang": "painless", + "source" : "ctx.foo['foo']=ctx.foo;ctx['test-field']='test-value'" + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.source_field)" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: bad_request + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {source_field: "fooBar", foo: {foo: "bar"}} + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Iterable object is self-referencing itself (ingest script)" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml index 916a7fe656cc2..d90e5fbf2362b 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/200_dissect_processor.yml @@ -84,3 +84,38 @@ teardown: } ] } + +--- +"Test dissect processor can match empty brackets": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "dissect" : { + "field" : "message", + "pattern" : "[%{a}][%{b}][%{c}]" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {message: "[foo][bar][]"} + + - do: + get: + index: test + id: 1 + - match: { _source.message: "[foo][bar][]" } + - match: { _source.a: "foo" } + - match: { _source.b: "bar" } + - match: { _source.c: "" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml new file mode 100644 index 0000000000000..96b2256bcc1dc --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml @@ -0,0 +1,66 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test rename processor with non-existing field and without ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "{{field_foo}}", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: '/field path cannot be null nor empty/' + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + +--- +"Test rename processor with non-existing field and ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "{{field_foo}}", + "target_field" : "bar", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "foo bar baz" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml new file mode 100644 index 0000000000000..e120a865052b0 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -0,0 +1,361 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test simulate API works well with remove processor": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + # test simulating existing pipeline works well + - do: + ingest.simulate: + id: "my_pipeline" + body: > + { + "docs": [ + { + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source: { "foo": "bar" } } + + # test simulating inflight pipeline works well + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + }, + "docs": [ + { + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source: { "foo": "bar" } } + +--- +"Test remove processor with non-existing field and without ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + +--- +"Test remove processor with resolved field path doesn't exist": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[bar\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + message: "foo bar baz", + foo: "bar" + } + +--- +"Test remove processor with non-existing field and ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "foo bar baz" } + +#Related issue: https://github.com/opensearch-project/OpenSearch/issues/10732 +--- +"Test remove metadata field": + - skip: + version: " - 2.11.99" + reason: "The bug was fixed in 2.12" + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_index\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "_index" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_version" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_version\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "bar" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_version_type" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_version\_type\]/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo: "bar" + } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : ["_id", "_routing"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + routing: abc + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + - match: { result: created } + + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_id" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /cannot remove metadata field \[\_id\] when specifying external version for the document/ + index: + index: test + id: "test_id_10000" + pipeline: "my_pipeline" + version: 1 + version_type: "external" + body: { message: "foo bar baz" } + + # test simulating pipeline with removing _id + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_id" + } + } + ] + }, + "docs": [ + { + "_version_type": "external_gte", + "_version": 1, + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - match: { docs.0.error.type: "illegal_argument_exception" } + - match: { docs.0.error.reason: "cannot remove metadata field [_id] when specifying external version for the document, version: 1, version_type: external_gte" } + +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/1578 +--- +"Test remove processor with exclude_field": + - skip: + version: " - 2.11.99" + reason: "exclude_field is introduced in 2.12" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "exclude_field": "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "zoo"}} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml new file mode 100644 index 0000000000000..0203b62ba67d6 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml @@ -0,0 +1,374 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat copy processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + catch: /\[target\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source" + } + } + ] + } + - do: + catch: /\[source\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "target_field" : "target" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source", + "target_field" : "target", + "ignore_missing" : true, + "remove_source" : true, + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test copy processor with ignore_missing": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field \[unknown\_field\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello" } } + +--- +"Test copy processor with remove_source": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "remove_source" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "hello" } } + +--- +"Test copy processor with override_target": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field \[bar\] already exists/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + +--- +"Test copy processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "bar", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path and target field path cannot be same/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "bar", + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "foo", target: "bar", foo: "hello", bar: "hello" } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml new file mode 100644 index 0000000000000..397eb8f7b6033 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_remove_by_pattern_processor.yml @@ -0,0 +1,146 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test creating remove_by_pattern processor failed": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : "foo*", + "exclude_field_pattern" : "bar*" + } + } + ] + } + + - do: + catch: /\[field\_pattern\] either field\_pattern or exclude\_field\_pattern must be set/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + } + } + ] + } + +--- +"Test remove_by_pattern processor with field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern" : ["foo*", "*a*b"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: {zoo: "bar" }} + +--- +"Test remove_by_pattern processor with exclude_field_pattern": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "exclude_field_pattern": ["foo*", "a*b*"] + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar", + ab: "bar", + aabb: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { foo1: "bar", foo2: "bar", ab: "bar", aabb: "bar"}} + + +--- +"Test cannot remove metadata fields by remove_by_pattern processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + - do: + catch: /field\_pattern \[\_id\] must not start with \'\_\'\;/ + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove_by_pattern" : { + "field_pattern": "_id" + } + } + ] + } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml new file mode 100644 index 0000000000000..6de5371bb49f7 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/320_community_id_processor.yml @@ -0,0 +1,370 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat community_id processor": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + catch: /\[source\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "destination_ip_field" : "dest" + } + } + ] + } + - do: + catch: /\[destination\_ip\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "src" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "iana_protocol_number_field" : "iana_number", + "protocol_field" : "protocol", + "icmp_type_field" : "icmp", + "icmp_code_field" : "code", + "seed" : 0, + "target_field" : "community_id", + "ignore_missing" : false + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test community_id processor with ignore_missing": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /ip address in the field \[source\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "1.1.1.1", + protocol: "tcp" + } + + - do: + catch: /ip address in the field \[dest\] is null or empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "1.1.1.1", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "1.1.1.1", protocol: "tcp" } } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + dest: "2.2.2.2", + protocol: "tcp" + } + - do: + get: + index: test + id: 1 + - match: { _source: { dest: "2.2.2.2", protocol: "tcp" } } + +--- +"Test community_id processor for tcp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "66.35.250.204", + dest: "128.232.110.120", + protocol: "tcp", + srcPort: 80, + destPort: 34855 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:LQU9qZlK+B5F3KDmev6m5PMibrg=" } + +--- +"Test community_id processor for udp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "8.8.8.8", + dest: "192.168.1.52", + protocol: "udp", + srcPort: 53, + destPort: 54585 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:d/FP5EW3wiY1vCndhwleRRKHowQ=" } + +--- +"Test community_id processor for sctp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "source_port_field" : "srcPort", + "destination_port_field" : "destPort", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.170.8", + dest: "192.168.170.56", + protocol: "sctp", + srcPort: 7, + destPort: 7 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:MP2EtRCAUIZvTw6MxJHLV7N7JDs=" } + +--- +"Test community_id processor for icmp": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "192.168.0.89", + dest: "192.168.0.1", + protocol: "icmp", + type: 8, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:X0snYXpgwiv9TZtqg64sgzUn6Dk=" } + +--- +"Test community_id processor for icmp-v6": + - skip: + version: " - 2.12.99" + reason: "introduced in 2.13" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "community_id" : { + "source_ip_field" : "source", + "destination_ip_field" : "dest", + "icmp_type_field" : "type", + "icmp_code_field" : "code", + "protocol_field" : "protocol" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "fe80::260:97ff:fe07:69ea", + dest: "ff02::1", + protocol: "ipv6-icmp", + type: 134, + code: 0 + } + - do: + get: + index: test + id: 1 + - match: { _source.community_id: "1:pkvHqCL88/tg1k4cPigmZXUtL00=" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index e012a82b15927..edd649a310d42 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -976,3 +976,185 @@ teardown: } - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [____pipeline_doesnot_exist___]" } + +--- +"Test simulate with docs containing metadata fields": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": 100, + "_if_seq_no": 12333333333333333, + "_if_primary_term": 1, + "_source": { + "foo": "bar" + } + } + ] + } + + - length: { docs: 1 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc._id: "id" } + - match: { docs.0.doc._routing: "foo" } + - match: { docs.0.doc._version: "100" } + - match: { docs.0.doc._if_seq_no: "12333333333333333" } + - match: { docs.0.doc._if_primary_term: "1" } + - match: { docs.0.doc._source.foo: "bar" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": "bar", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_version], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_seq_no": "123", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_seq_no], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_primary_term": "1", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_primary_term], only int or long is accepted" } + +--- +"Test simulate with pipeline with ignore failure and cyclic field assignments in script": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "script" : { + "ignore_failure" : true, + "lang": "painless", + "source": "ctx.foo['foo']=ctx.foo;ctx.tag='recursive'" + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.foo.foo)" + } + } + ] + }, + "docs": [ + { + "_source": { + "foo": { + "foo": "bar" + } + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - match: { docs.0.processor_results.0.status: "error_ignored" } + - match: { docs.0.processor_results.0.ignored_error.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.0.doc._source.tag: null } + - match: { docs.0.processor_results.1.doc._source.target_field: "BAR" } + - match: { docs.0.processor_results.1.doc._source.foo.foo: "bar" } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "script" } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 35b7de8f83164..c0ff155ce1038 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,9 +39,9 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:4.0.1') + api('com.maxmind.geoip2:geoip2:4.2.0') // geoip2 dependencies: - api('com.maxmind.db:maxmind-db:3.0.0') + api('com.maxmind.db:maxmind-db:3.1.0') api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") diff --git a/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 deleted file mode 100644 index 0722ebf08e137..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2a9b0ebd91b73a409a526b4d939f5ab8f4a1a87 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 new file mode 100644 index 0000000000000..b6bfeeb9da60b --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 @@ -0,0 +1 @@ +78ff932dc13ac41dd1f0fd9e7405a7f4ad815ce0 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 deleted file mode 100644 index 89b0c4c49b450..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79dcda62168a77caf595f8fda101baa17fef125d \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 new file mode 100644 index 0000000000000..9db7c7319af0b --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 @@ -0,0 +1 @@ +2008992ab45d61c7b28a18678b5df82272529da3 \ No newline at end of file diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index b947f929fce76..a10d20c998085 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/opensearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -36,17 +36,17 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.ingest.IngestService; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.NodeRoles; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.StreamsUtils; import java.io.ByteArrayInputStream; @@ -158,7 +158,7 @@ public void testLazyLoading() throws IOException { builder.endObject(); bytes = BytesReference.bytes(builder); } - assertAcked(client().admin().cluster().putPipeline(new PutPipelineRequest("geoip", bytes, XContentType.JSON)).actionGet()); + assertAcked(client().admin().cluster().putPipeline(new PutPipelineRequest("geoip", bytes, MediaTypeRegistry.JSON)).actionGet()); // the geo-IP databases should not be loaded on any nodes as they are all non-ingest nodes Arrays.stream(internalCluster().getNodeNames()).forEach(node -> assertDatabaseLoadStatus(node, false)); diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/DatabaseReaderLazyLoader.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/DatabaseReaderLazyLoader.java index 05a969daf3836..0c2cde786dd40 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/DatabaseReaderLazyLoader.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/DatabaseReaderLazyLoader.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.CheckedSupplier; diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java index ebffe61f6e756..b27c0f9fe0b31 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/GeoIpProcessor.java @@ -42,6 +42,7 @@ import com.maxmind.geoip2.record.Country; import com.maxmind.geoip2.record.Location; import com.maxmind.geoip2.record.Subdivision; + import org.opensearch.OpenSearchParseException; import org.opensearch.SpecialPermission; import org.opensearch.common.network.InetAddresses; @@ -217,6 +218,7 @@ Set<Property> getProperties() { return properties; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { SpecialPermission.check(); CityResponse response = AccessController.doPrivileged( @@ -304,6 +306,7 @@ private Map<String, Object> retrieveCityGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { SpecialPermission.check(); CountryResponse response = AccessController.doPrivileged( @@ -350,6 +353,7 @@ private Map<String, Object> retrieveCountryGeoData(InetAddress ipAddress) { return geoData; } + @SuppressWarnings("removal") private Map<String, Object> retrieveAsnGeoData(InetAddress ipAddress) { SpecialPermission.check(); AsnResponse response = AccessController.doPrivileged( diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 491f55147ef24..524d1719c37c0 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -37,6 +37,7 @@ import com.maxmind.db.Reader; import com.maxmind.geoip2.DatabaseReader; import com.maxmind.geoip2.model.AbstractResponse; + import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.cache.Cache; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java index d459686162cd0..d022f3aba6074 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.geoip; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.OpenSearchParseException; import org.opensearch.common.Randomness; import org.opensearch.index.VersionType; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java index 8b94e8cc114ed..5cf7133156d39 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/GeoIpProcessorTests.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; + import org.opensearch.common.CheckedSupplier; import org.opensearch.common.io.PathUtils; import org.opensearch.ingest.IngestDocument; diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index cbaab11bd4346..f6d42d1d65670 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.geoip; import com.maxmind.geoip2.model.AbstractResponse; + import org.opensearch.common.network.InetAddresses; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentParser.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentParser.java index b189c8ed8905e..88129ffb922b8 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentParser.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/UserAgentParser.java @@ -34,10 +34,10 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; + import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; @@ -68,7 +68,7 @@ final class UserAgentParser { private void init(InputStream regexStream) throws IOException { // EMPTY is safe here because we don't use namedObject - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + XContentParser yamlParser = XContentType.YAML.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, regexStream); XContentParser.Token token = yamlParser.nextToken(); diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorTests.java index 51ff8aae21365..b7dd45b209113 100644 --- a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/UserAgentProcessorTests.java @@ -32,8 +32,8 @@ package org.opensearch.ingest.useragent; -import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.test.OpenSearchTestCase; import org.junit.BeforeClass; diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-expression/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-expression/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-expression/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..82a17e2b79290 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +00759eaff8f62b38ba66a05f26ab784c268908d3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 6eaa40708e4ae..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f8a34fc3d450343ab05ccb5af318a836a6a5fb3 \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index 6edc8875de4e5..8e15488900e5f 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -32,14 +32,17 @@ package org.opensearch.script.expression; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.common.lucene.search.function.CombineFunction; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.functionscore.ScoreFunctionBuilders; import org.opensearch.index.query.functionscore.ScriptScoreFunctionBuilder; @@ -53,9 +56,10 @@ import org.opensearch.search.aggregations.pipeline.SimpleValue; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -64,6 +68,7 @@ import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; @@ -74,7 +79,19 @@ import static org.hamcrest.Matchers.notNullValue; // TODO: please convert to unit tests! -public class MoreExpressionIT extends OpenSearchIntegTestCase { +public class MoreExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MoreExpressionIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index 7527092f4344c..7465fa1e5ddbe 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -32,24 +32,41 @@ package org.opensearch.script.expression; -import org.opensearch.core.common.bytes.BytesArray; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; //TODO: please convert to unit tests! -public class StoredExpressionIT extends OpenSearchIntegTestCase { +public class StoredExpressionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public StoredExpressionIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); @@ -67,9 +84,9 @@ public void testAllOpsDisabledIndexedScripts() throws IOException { .cluster() .preparePutStoredScript() .setId("script1") - .setContent(new BytesArray("{\"script\": {\"lang\": \"expression\", \"source\": \"2\"} }"), XContentType.JSON) + .setContent(new BytesArray("{\"script\": {\"lang\": \"expression\", \"source\": \"2\"} }"), MediaTypeRegistry.JSON) .get(); - client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", MediaTypeRegistry.JSON).get(); try { client().prepareUpdate("test", "1").setScript(new Script(ScriptType.STORED, null, "script1", Collections.emptyMap())).get(); fail("update script should have been rejected"); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/CountMethodValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/CountMethodValueSource.java index 97ea247c15bbd..77580f88c16f7 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/CountMethodValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/CountMethodValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import java.io.IOException; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateMethodValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateMethodValueSource.java index 6aa7e640c77cb..31be3cc3b1264 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateMethodValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateMethodValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.search.MultiValueMode; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateObjectValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateObjectValueSource.java index 1ab778008e8c3..69d7b5ad7f769 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateObjectValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/DateObjectValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.search.MultiValueMode; import org.joda.time.DateTimeZone; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/EmptyMemberValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/EmptyMemberValueSource.java index da400b27b0fd3..1fba4241f80f3 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/EmptyMemberValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/EmptyMemberValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import java.io.IOException; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java index ba131473be4fb..a2af636ffdc8a 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionAggregationScript.java @@ -32,7 +32,6 @@ package org.opensearch.script.expression; -import java.io.IOException; import org.apache.lucene.expressions.Bindings; import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.SimpleBindings; @@ -42,6 +41,8 @@ import org.opensearch.script.AggregationScript; import org.opensearch.script.GeneralScriptException; +import java.io.IOException; + /** * A bridge to evaluate an {@link Expression} against {@link Bindings} in the context * of a {@link AggregationScript}. @@ -52,9 +53,9 @@ class ExpressionAggregationScript implements AggregationScript.LeafFactory { final SimpleBindings bindings; final DoubleValuesSource source; final boolean needsScore; - final ReplaceableConstDoubleValueSource specialValue; // _value + final PerThreadReplaceableConstDoubleValueSource specialValue; // _value - ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, ReplaceableConstDoubleValueSource v) { + ExpressionAggregationScript(Expression e, SimpleBindings b, boolean n, PerThreadReplaceableConstDoubleValueSource v) { exprScript = e; bindings = b; source = exprScript.getDoubleValuesSource(bindings); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java index 7640fcf7a3bc5..5a9462a55683a 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionModulePlugin.java @@ -32,14 +32,14 @@ package org.opensearch.script.expression; -import java.util.Collection; - import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; +import java.util.Collection; + public class ExpressionModulePlugin extends Plugin implements ScriptPlugin { @Override diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionNumberSortScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionNumberSortScript.java index fc50e7355f492..72710591fece8 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionNumberSortScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionNumberSortScript.java @@ -32,7 +32,6 @@ package org.opensearch.script.expression; -import java.io.IOException; import org.apache.lucene.expressions.Bindings; import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.SimpleBindings; @@ -42,6 +41,8 @@ import org.opensearch.script.GeneralScriptException; import org.opensearch.script.NumberSortScript; +import java.io.IOException; + /** * A bridge to evaluate an {@link Expression} against {@link Bindings} in the context * of a {@link NumberSortScript}. diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java index 6be299146a181..3932559f7685c 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScoreScript.java @@ -66,7 +66,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(final LeafReaderContext leaf) throws IOException { - return new ScoreScript(null, null, null) { + return new ScoreScript(null, null, null, null) { // Fake the scorer until setScorer is called. DoubleValues values = source.getValues(leaf, new DoubleValues() { @Override diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 1c3dc69359952..0520177b72b62 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -37,6 +37,7 @@ import org.apache.lucene.expressions.js.JavascriptCompiler; import org.apache.lucene.expressions.js.VariableContext; import org.apache.lucene.search.DoubleValuesSource; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.SpecialPermission; import org.opensearch.common.Nullable; import org.opensearch.index.fielddata.IndexFieldData; @@ -72,7 +73,7 @@ /** * Provides the infrastructure for Lucene expressions as a scripting language for OpenSearch. - * + * <p> * Only contexts returning numeric types or {@link Object} are supported. */ public class ExpressionScriptEngine implements ScriptEngine { @@ -110,7 +111,7 @@ public FilterScript.LeafFactory newFactory(Map<String, Object> params, SearchLoo contexts.put(ScoreScript.CONTEXT, (Expression expr) -> new ScoreScript.Factory() { @Override - public ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup) { + public ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher) { return newScoreScript(expr, lookup, params); } @@ -169,6 +170,7 @@ public String getType() { return NAME; } + @SuppressWarnings("removal") @Override public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) { // classloader created here @@ -315,14 +317,14 @@ private static AggregationScript.LeafFactory newAggregationScript( // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); boolean needsScores = false; - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; for (String variable : expr.variables) { try { if (variable.equals("_score")) { bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a @@ -387,7 +389,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo // NOTE: if we need to do anything complicated with bindings in the future, we can just extend Bindings, // instead of complicating SimpleBindings (which should stay simple) SimpleBindings bindings = new SimpleBindings(); - ReplaceableConstDoubleValueSource specialValue = null; + PerThreadReplaceableConstDoubleValueSource specialValue = null; boolean needsScores = false; for (String variable : expr.variables) { try { @@ -395,7 +397,7 @@ private static ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLoo bindings.add("_score", DoubleValuesSource.SCORES); needsScores = true; } else if (variable.equals("_value")) { - specialValue = new ReplaceableConstDoubleValueSource(); + specialValue = new PerThreadReplaceableConstDoubleValueSource(); bindings.add("_value", specialValue); // noop: _value is special for aggregations, and is handled in ExpressionScriptBindings // TODO: if some uses it in a scoring expression, they will get a nasty failure when evaluating...need a diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionTermSetQueryScript.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionTermSetQueryScript.java index 0c14a45804c3e..39be670b7e303 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionTermSetQueryScript.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionTermSetQueryScript.java @@ -32,7 +32,6 @@ package org.opensearch.script.expression; -import java.io.IOException; import org.apache.lucene.expressions.Bindings; import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.SimpleBindings; @@ -42,6 +41,8 @@ import org.opensearch.script.GeneralScriptException; import org.opensearch.script.TermsSetQueryScript; +import java.io.IOException; + /** * A bridge to evaluate an {@link Expression} against {@link Bindings} in the context * of a {@link TermsSetQueryScript}. diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/FieldDataValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/FieldDataValueSource.java index c0488a267507c..21f9d0a94b0c9 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/FieldDataValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/FieldDataValueSource.java @@ -35,8 +35,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.search.MultiValueMode; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLatitudeValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLatitudeValueSource.java index b167393d1b864..3ae45503657be 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLatitudeValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLatitudeValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.MultiGeoPointValues; import java.io.IOException; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLongitudeValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLongitudeValueSource.java index 10d836b7d219f..aa67b11833972 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLongitudeValueSource.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/GeoLongitudeValueSource.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DoubleValues; -import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.MultiGeoPointValues; import java.io.IOException; diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java new file mode 100644 index 0000000000000..40bb957c248f2 --- /dev/null +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/PerThreadReplaceableConstDoubleValueSource.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.script.expression; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DoubleValues; +import org.apache.lucene.search.DoubleValuesSource; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. This is made + * thread-safe for concurrent segment search use case by keeping the {@link DoubleValues} per thread. Any update to the value happens in + * thread specific {@link DoubleValuesSource} instance. + */ +final class PerThreadReplaceableConstDoubleValueSource extends DoubleValuesSource { + // Multiple slices can be processed by same thread but that will be sequential, so keeping per thread is fine + final Map<Long, ReplaceableConstDoubleValues> perThreadDoubleValues; + + PerThreadReplaceableConstDoubleValueSource() { + perThreadDoubleValues = new ConcurrentHashMap<>(); + } + + @Override + public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException { + return perThreadDoubleValues.computeIfAbsent(Thread.currentThread().getId(), threadId -> new ReplaceableConstDoubleValues()); + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + if (currentFv.advanceExact(docId)) return Explanation.match((float) currentFv.doubleValue(), "ReplaceableConstDoubleValues"); + else return Explanation.noMatch("ReplaceableConstDoubleValues"); + } + + @Override + public boolean equals(Object o) { + return o == this; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + public void setValue(double v) { + final ReplaceableConstDoubleValues currentFv = perThreadDoubleValues.computeIfAbsent( + Thread.currentThread().getId(), + threadId -> new ReplaceableConstDoubleValues() + ); + currentFv.setValue(v); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + + @Override + public DoubleValuesSource rewrite(IndexSearcher reader) { + return this; + } +} diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java deleted file mode 100644 index 28e4707a07192..0000000000000 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ReplaceableConstDoubleValueSource.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.script.expression; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DoubleValues; -import org.apache.lucene.search.DoubleValuesSource; -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.IndexSearcher; - -import java.io.IOException; - -/** - * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. - */ -final class ReplaceableConstDoubleValueSource extends DoubleValuesSource { - final ReplaceableConstDoubleValues fv; - - ReplaceableConstDoubleValueSource() { - fv = new ReplaceableConstDoubleValues(); - } - - @Override - public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException { - return fv; - } - - @Override - public boolean needsScores() { - return false; - } - - @Override - public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { - if (fv.advanceExact(docId)) return Explanation.match((float) fv.doubleValue(), "ReplaceableConstDoubleValues"); - else return Explanation.noMatch("ReplaceableConstDoubleValues"); - } - - @Override - public boolean equals(Object o) { - return o == this; - } - - @Override - public int hashCode() { - return System.identityHashCode(this); - } - - public void setValue(double v) { - fv.setValue(v); - } - - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean isCacheable(LeafReaderContext ctx) { - return false; - } - - @Override - public DoubleValuesSource rewrite(IndexSearcher reader) { - return this; - } -} diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java index d0941cbc9452f..d7be890014add 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionFieldScriptTests.java @@ -47,8 +47,8 @@ import java.util.Collections; import static org.hamcrest.Matchers.equalTo; -import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -77,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private FieldScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java index f3559da59f992..94a422503d6bd 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionNumberSortScriptTests.java @@ -32,11 +32,8 @@ package org.opensearch.script.expression; -import java.io.IOException; -import java.text.ParseException; -import java.util.Collections; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -46,8 +43,12 @@ import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; -import static org.mockito.Mockito.anyInt; +import java.io.IOException; +import java.text.ParseException; +import java.util.Collections; + import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -76,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private NumberSortScript.LeafFactory compile(String expression) { diff --git a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java index af7fc580f8a65..a1d6df80715be 100644 --- a/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/opensearch/script/expression/ExpressionTermsSetQueryTests.java @@ -32,11 +32,8 @@ package org.opensearch.script.expression; -import java.io.IOException; -import java.text.ParseException; -import java.util.Collections; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -46,8 +43,12 @@ import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; -import static org.mockito.Mockito.anyInt; +import java.io.IOException; +import java.text.ParseException; +import java.util.Collections; + import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -76,7 +77,7 @@ public void setUp() throws Exception { when(fieldData.load(any())).thenReturn(atomicFieldData); service = new ExpressionScriptEngine(); - lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData); + lookup = new SearchLookup(mapperService, (ignored, lookup) -> fieldData, SearchLookup.UNKNOWN_SHARD_ID); } private TermsSetQueryScript.LeafFactory compile(String expression) { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index fbb8ebdec384c..f7abc220e75d8 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -32,13 +32,15 @@ package org.opensearch.script.mustache; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexNotFoundException; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -47,6 +49,7 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -54,7 +57,19 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.Is.is; -public class MultiSearchTemplateIT extends OpenSearchIntegTestCase { +public class MultiSearchTemplateIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MultiSearchTemplateIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -72,15 +87,14 @@ public void testBasic() throws Exception { } indexRandom(true, indexRequestBuilders); - final String template = Strings.toString( - jsonBuilder().startObject() - .startObject("query") - .startObject("{{query_type}}") - .field("{{field_name}}", "{{field_value}}") - .endObject() - .endObject() - .endObject() - ); + final String template = jsonBuilder().startObject() + .startObject("query") + .startObject("{{query_type}}") + .field("{{field_name}}", "{{field_value}}") + .endObject() + .endObject() + .endObject() + .toString(); MultiSearchTemplateRequest multiRequest = new MultiSearchTemplateRequest(); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java index d0a0b7bcd3e3e..fb3a26ca153da 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/SearchTemplateIT.java @@ -35,9 +35,9 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.search.SearchRequest; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -177,7 +177,7 @@ public void testIndexedTemplateClient() throws Exception { + " }" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -185,11 +185,11 @@ public void testIndexedTemplateClient() throws Exception { assertNotNull(getResponse.getSource()); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", MediaTypeRegistry.JSON)); bulkRequestBuilder.get(); client().admin().indices().prepareRefresh().get(); @@ -224,16 +224,22 @@ public void testIndexedTemplate() throws Exception { + " }" + "}"; - assertAcked(client().admin().cluster().preparePutStoredScript().setId("1a").setContent(new BytesArray(script), XContentType.JSON)); - assertAcked(client().admin().cluster().preparePutStoredScript().setId("2").setContent(new BytesArray(script), XContentType.JSON)); - assertAcked(client().admin().cluster().preparePutStoredScript().setId("3").setContent(new BytesArray(script), XContentType.JSON)); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("1a").setContent(new BytesArray(script), MediaTypeRegistry.JSON) + ); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("2").setContent(new BytesArray(script), MediaTypeRegistry.JSON) + ); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("3").setContent(new BytesArray(script), MediaTypeRegistry.JSON) + ); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", MediaTypeRegistry.JSON)); bulkRequestBuilder.get(); client().admin().indices().prepareRefresh().get(); @@ -295,7 +301,7 @@ public void testIndexedTemplateOverwrite() throws Exception { .cluster() .preparePutStoredScript() .setId("git01") - .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(-1))), XContentType.JSON) + .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(-1))), MediaTypeRegistry.JSON) ); GetStoredScriptResponse getResponse = client().admin().cluster().prepareGetStoredScript("git01").get(); @@ -319,7 +325,7 @@ public void testIndexedTemplateOverwrite() throws Exception { .cluster() .preparePutStoredScript() .setId("git01") - .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), XContentType.JSON) + .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), MediaTypeRegistry.JSON) ); SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) @@ -349,14 +355,14 @@ public void testIndexedTemplateWithArray() throws Exception { + " }\n" + "}"; assertAcked( - client().admin().cluster().preparePutStoredScript().setId("4").setContent(new BytesArray(multiQuery), XContentType.JSON) + client().admin().cluster().preparePutStoredScript().setId("4").setContent(new BytesArray(multiQuery), MediaTypeRegistry.JSON) ); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", XContentType.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("1").setSource("{\"theField\":\"foo\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("2").setSource("{\"theField\":\"foo 2\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("3").setSource("{\"theField\":\"foo 3\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("4").setSource("{\"theField\":\"foo 4\"}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("test").setId("5").setSource("{\"theField\":\"bar\"}", MediaTypeRegistry.JSON)); bulkRequestBuilder.get(); client().admin().indices().prepareRefresh().get(); diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomMustacheFactory.java index a4be60d2b6900..c8d28575fef47 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomMustacheFactory.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomMustacheFactory.java @@ -33,6 +33,7 @@ package org.opensearch.script.mustache; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import com.github.mustachejava.Code; import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.DefaultMustacheVisitor; @@ -44,9 +45,8 @@ import com.github.mustachejava.codes.IterableCode; import com.github.mustachejava.codes.WriteCode; -import org.opensearch.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.StringWriter; @@ -215,7 +215,7 @@ protected Function<String, String> createFunction(Object resolved) { if (resolved == null) { return null; } - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { if (resolved instanceof Iterable) { builder.startArray(); for (Object o : (Iterable<?>) resolved) { @@ -228,7 +228,7 @@ protected Function<String, String> createFunction(Object resolved) { // Do not handle as JSON return oh.stringify(resolved); } - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { throw new MustacheException("Failed to convert object to JSON", e); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomReflectionObjectHandler.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomReflectionObjectHandler.java index 57451a027c5d7..0adac121d77e3 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomReflectionObjectHandler.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/CustomReflectionObjectHandler.java @@ -33,16 +33,17 @@ package org.opensearch.script.mustache; import com.github.mustachejava.reflect.ReflectionObjectHandler; -import org.opensearch.common.util.CollectionUtils; + import org.opensearch.common.util.iterable.Iterables; +import org.opensearch.core.common.util.CollectionUtils; import java.lang.reflect.Array; import java.util.AbstractMap; import java.util.Collection; -import java.util.Set; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import java.util.HashMap; +import java.util.Set; final class CustomReflectionObjectHandler extends ReflectionObjectHandler { diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index f31e5be078a28..ead93158b421c 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -33,15 +33,15 @@ package org.opensearch.script.mustache; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -200,6 +200,6 @@ public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java index 03be9d7efb2db..6b33ac3b6be08 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheModulePlugin.java @@ -33,13 +33,13 @@ package org.opensearch.script.mustache; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -65,6 +65,7 @@ public ScriptEngine getScriptEngine(Settings settings, Collection<ScriptContext< public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() { return Arrays.asList( new ActionHandler<>(SearchTemplateAction.INSTANCE, TransportSearchTemplateAction.class), + new ActionHandler<>(RenderSearchTemplateAction.INSTANCE, TransportRenderSearchTemplateAction.class), new ActionHandler<>(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class) ); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index e94dac2a3bfad..842353fdba336 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -34,6 +34,7 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -58,7 +59,7 @@ /** * Main entry point handling template registration, compilation and * execution. - * + * <p> * Template handling is based on Mustache. Template handling is a two step * process: First compile the string representing the template, the resulting * {@link Mustache} object can then be re-used for subsequent executions. @@ -127,6 +128,7 @@ private class MustacheExecutableScript extends TemplateScript { this.params = params; } + @SuppressWarnings("removal") @Override public String execute() { final StringWriter writer = new StringWriter(); diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java new file mode 100644 index 0000000000000..1feb916c4ce73 --- /dev/null +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RenderSearchTemplateAction.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script.mustache; + +import org.opensearch.action.ActionType; + +public class RenderSearchTemplateAction extends ActionType<SearchTemplateResponse> { + + public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction(); + public static final String NAME = "indices:data/read/search/template/render"; + + private RenderSearchTemplateAction() { + super(NAME, SearchTemplateResponse::new); + } +} diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java index 7a94fc45837d9..9ffa2c94cb56f 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/RestRenderSearchTemplateAction.java @@ -81,6 +81,6 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client renderRequest.setScript(id); } - return channel -> client.execute(SearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(RenderSearchTemplateAction.INSTANCE, renderRequest, new RestToXContentListener<>(channel)); } } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java index c963ea7ba7da9..d02c5f1efa591 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java @@ -35,16 +35,17 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; +import org.opensearch.action.IndicesRequest; import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.support.IndicesOptions; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.script.ScriptType; @@ -57,7 +58,7 @@ /** * A request to execute a search based on a search template. */ -public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest, ToXContentObject { +public class SearchTemplateRequest extends ActionRequest implements IndicesRequest.Replaceable, CompositeIndicesRequest, ToXContentObject { private SearchRequest request; private boolean simulate = false; @@ -207,8 +208,8 @@ public ActionRequestValidationException validate() { request.setScriptType(ScriptType.INLINE); if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder) - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - request.setScript(Strings.toString(builder.copyCurrentStructure(parser))); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + request.setScript(builder.copyCurrentStructure(parser).toString()); } catch (IOException e) { throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e); } @@ -255,4 +256,28 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(scriptParams); } } + + @Override + public String[] indices() { + if (request == null) { + return new String[0]; + } + return request.indices(); + } + + @Override + public IndicesOptions indicesOptions() { + if (request == null) { + return SearchRequest.DEFAULT_INDICES_OPTIONS; + } + return request.indicesOptions(); + } + + @Override + public IndicesRequest indices(String... indices) { + if (request == null) { + return new SearchRequest(new String[0]).indices(indices); + } + return request.indices(indices); + } } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java index 6e56ecf3950bb..9cb6ac127786a 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateResponse.java @@ -32,19 +32,18 @@ package org.opensearch.script.mustache; -import org.opensearch.action.ActionResponse; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.io.InputStream; @@ -104,11 +103,11 @@ public static SearchTemplateResponse fromXContent(XContentParser parser) throws if (contentAsMap.containsKey(TEMPLATE_OUTPUT_FIELD.getPreferredName())) { Object source = contentAsMap.get(TEMPLATE_OUTPUT_FIELD.getPreferredName()); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).value(source); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON).value(source); searchTemplateResponse.setSource(BytesReference.bytes(builder)); } else { MediaType contentType = parser.contentType(); - XContentBuilder builder = XContentFactory.contentBuilder(contentType).map(contentAsMap); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(contentType).map(contentAsMap); XContentParser searchResponseParser = contentType.xContent() .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), BytesReference.bytes(builder).streamInput()); @@ -126,7 +125,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); // we can assume the template is always json as we convert it before compiling it try (InputStream stream = source.streamInput()) { - builder.rawField(TEMPLATE_OUTPUT_FIELD.getPreferredName(), stream, XContentType.JSON); + builder.rawField(TEMPLATE_OUTPUT_FIELD.getPreferredName(), stream, MediaTypeRegistry.JSON); } builder.endObject(); } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportMultiSearchTemplateAction.java index 529af483497e1..b17b48ec1cbf8 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -40,6 +39,7 @@ import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.node.NodeClient; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java new file mode 100644 index 0000000000000..993d77ffaa75c --- /dev/null +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportRenderSearchTemplateAction.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script.mustache; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.script.ScriptService; +import org.opensearch.transport.TransportService; + +public class TransportRenderSearchTemplateAction extends TransportSearchTemplateAction { + + @Inject + public TransportRenderSearchTemplateAction( + TransportService transportService, + ActionFilters actionFilters, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + NodeClient client + ) { + super(RenderSearchTemplateAction.NAME, transportService, actionFilters, scriptService, xContentRegistry, client); + } +} diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java index bbda8d15d9d41..d75cc0337b66c 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/TransportSearchTemplateAction.java @@ -32,19 +32,18 @@ package org.opensearch.script.mustache; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.inject.Inject; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; @@ -62,9 +61,9 @@ public class TransportSearchTemplateAction extends HandledTransportAction<Search private static final String TEMPLATE_LANG = MustacheScriptEngine.NAME; - private final ScriptService scriptService; - private final NamedXContentRegistry xContentRegistry; - private final NodeClient client; + protected final ScriptService scriptService; + protected final NamedXContentRegistry xContentRegistry; + protected final NodeClient client; @Inject public TransportSearchTemplateAction( @@ -80,6 +79,20 @@ public TransportSearchTemplateAction( this.client = client; } + public TransportSearchTemplateAction( + String actionName, + TransportService transportService, + ActionFilters actionFilters, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + NodeClient client + ) { + super(actionName, transportService, actionFilters, SearchTemplateRequest::new); + this.scriptService = scriptService; + this.xContentRegistry = xContentRegistry; + this.client = client; + } + @Override protected void doExecute(Task task, SearchTemplateRequest request, ActionListener<SearchTemplateResponse> listener) { final SearchTemplateResponse response = new SearchTemplateResponse(); @@ -132,7 +145,7 @@ static SearchRequest convert( } try ( - XContentParser parser = XContentFactory.xContent(XContentType.JSON) + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source) ) { SearchSourceBuilder builder = SearchSourceBuilder.searchSource(); diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java index 311f14cb8d80d..7499d86f70b2b 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.script.ScriptType; import org.opensearch.search.Scroll; @@ -56,7 +56,7 @@ public class MultiSearchTemplateRequestTests extends OpenSearchTestCase { public void testParseRequest() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/opensearch/script/mustache/simple-msearch-template.json"); - RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(data), XContentType.JSON) + RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(data), MediaTypeRegistry.JSON) .build(); MultiSearchTemplateRequest request = RestMultiSearchTemplateAction.parseRequest(restRequest, true); @@ -92,8 +92,10 @@ public void testParseRequest() throws Exception { public void testParseWithCarriageReturn() throws Exception { final String content = "{\"index\":[\"test0\", \"test1\"], \"request_cache\": true}\r\n" + "{\"source\": {\"query\" : {\"match_{{template}}\" :{}}}, \"params\": {\"template\": \"all\" } }\r\n"; - RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) - .build(); + RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(content), + MediaTypeRegistry.JSON + ).build(); MultiSearchTemplateRequest request = RestMultiSearchTemplateAction.parseRequest(restRequest, true); @@ -144,8 +146,10 @@ public void testMultiSearchTemplateToJson() throws Exception { String serialized = toJsonString(multiSearchTemplateRequest); // Deserialize the request - RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(serialized), XContentType.JSON) - .build(); + RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(serialized), + MediaTypeRegistry.JSON + ).build(); MultiSearchTemplateRequest deser = RestMultiSearchTemplateAction.parseRequest(restRequest, true); // For object equality purposes need to set the search requests' source to non-null @@ -163,7 +167,7 @@ public void testMultiSearchTemplateToJson() throws Exception { } protected String toJsonString(MultiSearchTemplateRequest multiSearchTemplateRequest) throws IOException { - byte[] bytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); + byte[] bytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, MediaTypeRegistry.JSON.xContent()); return new String(bytes, StandardCharsets.UTF_8); } diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java index 4dc0562586a51..fbb7d09709a91 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java @@ -33,10 +33,10 @@ import com.github.mustachejava.MustacheFactory; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.script.TemplateScript; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.script.Script; +import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -200,7 +200,7 @@ private String getChars() { /** * From https://www.ietf.org/rfc/rfc4627.txt: - * + * <p> * All Unicode characters may be placed within the * quotation marks except for the characters that must be escaped: * quotation mark, reverse solidus, and the control characters (U+0000 diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheTests.java index a5c9a03590321..93e4c2ce928b0 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheTests.java @@ -31,9 +31,9 @@ package org.opensearch.script.mustache; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptException; import org.opensearch.script.TemplateScript; diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java index 72443d1323b44..71ce616fd5d94 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java @@ -32,6 +32,7 @@ package org.opensearch.script.mustache; +import org.opensearch.action.search.SearchRequest; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.script.ScriptType; import org.opensearch.search.RandomSearchRequestGenerator; @@ -110,4 +111,19 @@ public static SearchTemplateRequest createRandomRequest() { request.setRequest(RandomSearchRequestGenerator.randomSearchRequest(SearchSourceBuilder::searchSource)); return request; } + + public void testSimulatedSearchTemplateRequest() { + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setSimulate(true); + + assertEquals(0, request.indices().length); + assertEquals(SearchRequest.DEFAULT_INDICES_OPTIONS, request.indicesOptions()); + assertEquals(2, request.indices("index1", "index2").indices().length); + + SearchTemplateRequest randomRequest = createRandomRequest(); + int expectedIndicesLength = randomRequest.indices().length; + request.setSimulate(true); + + assertEquals(expectedIndicesLength, randomRequest.indices().length); + } } diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestXContentTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestXContentTests.java index 664e87da0a7d8..08d7841306161 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestXContentTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestXContentTests.java @@ -32,14 +32,14 @@ package org.opensearch.script.mustache; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.script.ScriptType; import org.opensearch.test.AbstractXContentTestCase; @@ -101,7 +101,7 @@ public void testToXContentWithInlineTemplate() throws IOException { request.setScriptParams(scriptParams); XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + XContentBuilder expectedRequest = MediaTypeRegistry.contentBuilder(contentType) .startObject() .field("source", "{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } } }") .startObject("params") @@ -112,7 +112,7 @@ public void testToXContentWithInlineTemplate() throws IOException { .field("profile", true) .endObject(); - XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + XContentBuilder actualRequest = MediaTypeRegistry.contentBuilder(contentType); request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); assertToXContentEquivalent(BytesReference.bytes(expectedRequest), BytesReference.bytes(actualRequest), contentType); @@ -131,7 +131,7 @@ public void testToXContentWithStoredTemplate() throws IOException { request.setScriptParams(params); XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + XContentBuilder expectedRequest = MediaTypeRegistry.contentBuilder(contentType) .startObject() .field("id", "match_template") .startObject("params") @@ -142,7 +142,7 @@ public void testToXContentWithStoredTemplate() throws IOException { .field("profile", false) .endObject(); - XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + XContentBuilder actualRequest = MediaTypeRegistry.contentBuilder(contentType); request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); assertToXContentEquivalent(BytesReference.bytes(expectedRequest), BytesReference.bytes(actualRequest), contentType); diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java index 804a72561c10e..c2685e45ecb6b 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateResponseTests.java @@ -35,12 +35,13 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.internal.InternalSearchResponse; @@ -129,7 +130,7 @@ protected void assertEqualInstances(SearchTemplateResponse expectedInstance, Sea assertEquals(expectedSource == null, newSource == null); if (expectedSource != null) { try { - assertToXContentEquivalent(expectedSource, newSource, XContentType.JSON); + assertToXContentEquivalent(expectedSource, newSource, MediaTypeRegistry.JSON); } catch (IOException e) { throw new RuntimeException(e); } @@ -164,7 +165,7 @@ public void testSourceToXContent() throws IOException { response.setSource(BytesReference.bytes(source)); XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + XContentBuilder expectedResponse = MediaTypeRegistry.contentBuilder(contentType) .startObject() .startObject("template_output") .startObject("query") @@ -175,7 +176,7 @@ public void testSourceToXContent() throws IOException { .endObject() .endObject(); - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + XContentBuilder actualResponse = MediaTypeRegistry.contentBuilder(contentType); response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); @@ -210,7 +211,7 @@ public void testSearchResponseToXContent() throws IOException { response.setResponse(searchResponse); XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + XContentBuilder expectedResponse = MediaTypeRegistry.contentBuilder(contentType) .startObject() .field("took", 0) .field("timed_out", false) @@ -235,7 +236,7 @@ public void testSearchResponseToXContent() throws IOException { .endObject() .endObject(); - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + XContentBuilder actualResponse = MediaTypeRegistry.contentBuilder(contentType); response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index d7af8621c478a..fb51a0bb7f157 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,7 +33,6 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin apply plugin: 'opensearch.validate-rest-spec' apply plugin: 'opensearch.yaml-rest-test' -apply plugin: 'com.github.johnrengelman.shadow' opensearchplugin { description 'An easy, safe and fast scripting language for OpenSearch' @@ -62,30 +61,6 @@ dependencies { api project('spi') } -test { - doFirst { - test.classpath -= project.files(project.tasks.named('shadowJar')) - test.classpath -= project.configurations.getByName(ShadowBasePlugin.CONFIGURATION_NAME) - test.classpath += project.extensions.getByType(SourceSetContainer).getByName(SourceSet.MAIN_SOURCE_SET_NAME).runtimeClasspath - } -} - -shadowJar { - archiveClassifier.set('') - relocate 'org.objectweb', 'org.opensearch.repackage.org.objectweb' - dependencies { - include(dependency("org.ow2.asm:asm:${versions.asm}")) - include(dependency("org.ow2.asm:asm-util:${versions.asm}")) - include(dependency("org.ow2.asm:asm-tree:${versions.asm}")) - include(dependency("org.ow2.asm:asm-commons:${versions.asm}")) - include(dependency("org.ow2.asm:asm-analysis:${versions.asm}")) - } -} - -tasks.validateNebulaPom.dependsOn tasks.generatePomFileForShadowPublication -tasks.validateShadowPom.dependsOn tasks.generatePomFileForNebulaPublication -tasks.withType(AbstractPublishToMaven)*.dependsOn "generatePomFileForShadowPublication", "generatePomFileForNebulaPublication" - tasks.named("dependencyLicenses").configure { mapping from: /asm-.*/, to: 'asm' } diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 deleted file mode 100644 index f1b328a6de624..0000000000000 --- a/modules/lang-painless/licenses/antlr4-runtime-4.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069214c1de1960040729702eb58deac8827135e7 \ No newline at end of file diff --git a/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 new file mode 100644 index 0000000000000..e50b9bb646727 --- /dev/null +++ b/modules/lang-painless/licenses/antlr4-runtime-4.13.1.jar.sha1 @@ -0,0 +1 @@ +17125bae1d965624e265ef49552f6465a2bfa307 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-painless/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 deleted file mode 100644 index 9e87d3ce7d719..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 new file mode 100644 index 0000000000000..fa42ea1198165 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 @@ -0,0 +1 @@ +9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 deleted file mode 100644 index 5fffbfe655deb..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 new file mode 100644 index 0000000000000..1f42ac62dc69c --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 @@ -0,0 +1 @@ +f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 59a77870b4987..32556f907fdc0 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.publish' base { group = 'org.opensearch.plugin' - archivesBaseName = 'opensearch-scripting-painless-spi' + archivesName = 'opensearch-scripting-painless-spi' } dependencies { diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java index 56ade63efa5e2..265263b41ca89 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Allowlist.java @@ -42,7 +42,7 @@ * Allowlist contains data structures designed to be used to generate an allowlist of Java classes, * constructors, methods, and fields that can be used within a Painless script at both compile-time * and run-time. - * + * <p> * A Allowlist consists of several pieces with {@link AllowlistClass}s as the top level. Each * {@link AllowlistClass} will contain zero-to-many {@link AllowlistConstructor}s, {@link AllowlistMethod}s, and * {@link AllowlistField}s which are what will be available with a Painless script. See each individual diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java index 17e6814addf3b..67f5a07846c53 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistClass.java @@ -45,12 +45,12 @@ * classes. Though, since multiple allowlists may be combined into a single allowlist for a * specific context, as long as multiple classes representing the same Java class have the same * class name and have legal constructor/method overloading they can be merged together. - * + * <p> * Classes in Painless allow for arity overloading for constructors and methods. Arity overloading * means that multiple constructors are allowed for a single class as long as they have a different * number of parameters, and multiples methods with the same name are allowed for a single class * as long as they have the same return type and a different number of parameters. - * + * <p> * Classes will automatically extend other allowlisted classes if the Java class they represent is a * subclass of other classes including Java interfaces. */ diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java index 71265f82acacc..f18a7fb3ba1a9 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistLoader.java @@ -67,11 +67,11 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, String... filep * is the path of a single text file. The {@link Class}'s {@link ClassLoader} will be used to lookup the Java * reflection objects for each individual {@link Class}, {@link Constructor}, {@link Method}, and {@link Field} * specified as part of the allowlist in the text file. - * + * <p> * A single pass is made through each file to collect all the information about each class, constructor, method, * and field. Most validation will be done at a later point after all allowlists have been gathered and their * merging takes place. - * + * <p> * A painless type name is one of the following: * <ul> * <li> def - The Painless dynamic type which is automatically included without a need to be @@ -129,13 +129,13 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, String... filep * be appropriately parsed and handled. Painless complex types must be specified with the * fully-qualified Java class name. Method argument types, method return types, and field types * must be specified with Painless type names (def, fully-qualified, or short) as described earlier. - * + * <p> * The following example is used to create a single allowlist text file: * - * {@code + * <pre> * # primitive types * - * class int -> int { + * class int -> int { * } * * # complex types @@ -161,7 +161,7 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, String... filep * int value1 * def value2 * } - * } + * </pre> */ public static Allowlist loadFromResourceFiles(Class<?> resource, Map<String, AllowlistAnnotationParser> parsers, String... filepaths) { List<AllowlistClass> allowlistClasses = new ArrayList<>(); @@ -513,6 +513,7 @@ public static Allowlist loadFromResourceFiles(Class<?> resource, Map<String, All } } + @SuppressWarnings("removal") ClassLoader loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>) resource::getClassLoader); return new Allowlist(loader, allowlistClasses, allowlistStatics, allowlistClassBindings, Collections.emptyList()); diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java index 8bb0231ff3f4f..9fcaec3dbf7b6 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/AllowlistMethod.java @@ -45,7 +45,7 @@ * are using the '.' operator on an existing class variable/field. Painless classes may have multiple * methods with the same name as long as they comply with arity overloading described in * {@link AllowlistClass}. - * + * <p> * Classes may also have additional methods that are not part of the Java class the class represents - * these are known as augmented methods. An augmented method can be added to a class as a part of any * Java class as long as the method is static and the first parameter of the method is the Java class diff --git a/modules/lang-painless/src/doc/java/org/opensearch/painless/ContextDocGenerator.java b/modules/lang-painless/src/doc/java/org/opensearch/painless/ContextDocGenerator.java index 3fca92a216b26..c31f279d712f5 100644 --- a/modules/lang-painless/src/doc/java/org/opensearch/painless/ContextDocGenerator.java +++ b/modules/lang-painless/src/doc/java/org/opensearch/painless/ContextDocGenerator.java @@ -34,9 +34,9 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.painless.action.PainlessContextClassBindingInfo; import org.opensearch.painless.action.PainlessContextClassInfo; import org.opensearch.painless.action.PainlessContextConstructorInfo; diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 21b03b85d8edd..69b789dd2aa25 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -25,7 +25,7 @@ protected abstract boolean isSlashRegex(); } WS: [ \t\n\r]+ -> skip; -COMMENT: ( '//' .*? [\n\r] | '/*' .*? '*/' ) -> skip; +COMMENT: ( '//' ~[\n\r]* | '/*' .*? '*/' ) -> skip; LBRACK: '{'; RBRACK: '}'; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/AnalyzerCaster.java b/modules/lang-painless/src/main/java/org/opensearch/painless/AnalyzerCaster.java index d830ef2ab6290..9aae14e0a4a52 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/AnalyzerCaster.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/AnalyzerCaster.java @@ -35,9 +35,7 @@ import org.opensearch.painless.lookup.PainlessCast; import org.opensearch.painless.lookup.PainlessLookupUtility; import org.opensearch.painless.lookup.def; -import org.opensearch.script.JodaCompatibleZonedDateTime; -import java.time.ZonedDateTime; import java.util.Objects; /** @@ -87,19 +85,11 @@ public static PainlessCast getLegalCast(Location location, Class<?> actual, Clas return PainlessCast.originalTypetoTargetType(def.class, Float.class, explicit); } else if (expected == Double.class) { return PainlessCast.originalTypetoTargetType(def.class, Double.class, explicit); - // TODO: remove this when the transition from Joda to Java datetimes is completed - } else if (expected == ZonedDateTime.class) { - return PainlessCast.originalTypetoTargetType(def.class, ZonedDateTime.class, explicit); } } else if (actual == String.class) { if (expected == char.class && explicit) { return PainlessCast.originalTypetoTargetType(String.class, char.class, true); } - // TODO: remove this when the transition from Joda to Java datetimes is completed - } else if (actual == JodaCompatibleZonedDateTime.class) { - if (expected == ZonedDateTime.class) { - return PainlessCast.originalTypetoTargetType(JodaCompatibleZonedDateTime.class, ZonedDateTime.class, explicit); - } } else if (actual == boolean.class) { if (expected == def.class) { return PainlessCast.boxOriginalType(Boolean.class, def.class, explicit, boolean.class); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 35c676653fdc3..c19d4f361b2b6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -73,9 +73,7 @@ final class Compiler { */ private static final CodeSource CODESOURCE; - /** - * Setup the code privileges. - */ + /* Setup the code privileges. */ static { try { // Setup the code privileges. diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Def.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Def.java index de6fd5ebc0177..3320686466762 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Def.java @@ -36,13 +36,11 @@ import org.opensearch.painless.lookup.PainlessLookupUtility; import org.opensearch.painless.lookup.PainlessMethod; import org.opensearch.painless.symbol.FunctionTable; -import org.opensearch.script.JodaCompatibleZonedDateTime; import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; -import java.time.ZonedDateTime; import java.util.BitSet; import java.util.Collections; import java.util.Iterator; @@ -1506,15 +1504,6 @@ public static String defToStringExplicit(final Object value) { } } - // TODO: remove this when the transition from Joda to Java datetimes is completed - public static ZonedDateTime defToZonedDateTime(final Object value) { - if (value instanceof JodaCompatibleZonedDateTime) { - return ((JodaCompatibleZonedDateTime) value).getZonedDateTime(); - } - - return (ZonedDateTime) value; - } - /** * "Normalizes" the index into a {@code Map} by making no change to the index. */ diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java index d0af4651d2d3b..2bf70882a501b 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/LambdaBootstrap.java @@ -501,6 +501,7 @@ private static void endLambdaClass(ClassWriter cw) { * Defines the {@link Class} for the lambda class using the same {@link Compiler.Loader} * that originally defined the class for the Painless script. */ + @SuppressWarnings("removal") private static Class<?> createLambdaClass(Compiler.Loader loader, ClassWriter cw, Type lambdaClassType) { byte[] classBytes = cw.toByteArray(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/opensearch/painless/MethodWriter.java index 028acdb7d87c9..5ea09e59591f1 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/MethodWriter.java @@ -35,7 +35,6 @@ import org.opensearch.painless.lookup.PainlessCast; import org.opensearch.painless.lookup.PainlessMethod; import org.opensearch.painless.lookup.def; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; @@ -44,7 +43,6 @@ import org.objectweb.asm.commons.Method; import java.lang.reflect.Modifier; -import java.time.ZonedDateTime; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; @@ -86,10 +84,8 @@ import static org.opensearch.painless.WriterConstants.DEF_TO_P_SHORT_IMPLICIT; import static org.opensearch.painless.WriterConstants.DEF_TO_STRING_EXPLICIT; import static org.opensearch.painless.WriterConstants.DEF_TO_STRING_IMPLICIT; -import static org.opensearch.painless.WriterConstants.DEF_TO_ZONEDDATETIME; import static org.opensearch.painless.WriterConstants.DEF_UTIL_TYPE; import static org.opensearch.painless.WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE; -import static org.opensearch.painless.WriterConstants.JCZDT_TO_ZONEDDATETIME; import static org.opensearch.painless.WriterConstants.LAMBDA_BOOTSTRAP_HANDLE; import static org.opensearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS; import static org.opensearch.painless.WriterConstants.PAINLESS_ERROR_TYPE; @@ -181,9 +177,6 @@ public void writeCast(PainlessCast cast) { invokeStatic(UTILITY_TYPE, CHAR_TO_STRING); } else if (cast.originalType == String.class && cast.targetType == char.class) { invokeStatic(UTILITY_TYPE, STRING_TO_CHAR); - // TODO: remove this when the transition from Joda to Java datetimes is completed - } else if (cast.originalType == JodaCompatibleZonedDateTime.class && cast.targetType == ZonedDateTime.class) { - invokeStatic(UTILITY_TYPE, JCZDT_TO_ZONEDDATETIME); } else if (cast.unboxOriginalType != null && cast.boxTargetType != null) { unbox(getType(cast.unboxOriginalType)); writeCast(cast.unboxOriginalType, cast.boxTargetType); @@ -219,8 +212,6 @@ public void writeCast(PainlessCast cast) { else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_EXPLICIT); else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_EXPLICIT); else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_EXPLICIT); - // TODO: remove this when the transition from Joda to Java datetimes is completed - else if (cast.targetType == ZonedDateTime.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_ZONEDDATETIME); else { writeCast(cast.originalType, cast.targetType); } @@ -242,8 +233,6 @@ public void writeCast(PainlessCast cast) { else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_IMPLICIT); else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_IMPLICIT); else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_IMPLICIT); - // TODO: remove this when the transition from Joda to Java datetimes is completed - else if (cast.targetType == ZonedDateTime.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_ZONEDDATETIME); else { writeCast(cast.originalType, cast.targetType); } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index c7b9c75570899..c7638b3c41c63 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -33,26 +33,26 @@ package org.opensearch.painless; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.painless.action.PainlessContextAction; import org.opensearch.painless.action.PainlessExecuteAction; -import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistLoader; +import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.ExtensiblePlugin; import org.opensearch.plugins.Plugin; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java index e9edfb73c740c..257687bfb98c5 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessScriptEngine.java @@ -67,6 +67,7 @@ /** * Implementation of a ScriptEngine for the Painless language. */ +@SuppressWarnings("removal") public final class PainlessScriptEngine implements ScriptEngine { /** diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Utility.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Utility.java index c9a9419fd4821..1f6f203032933 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Utility.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Utility.java @@ -32,10 +32,6 @@ package org.opensearch.painless; -import org.opensearch.script.JodaCompatibleZonedDateTime; - -import java.time.ZonedDateTime; - /** * A set of methods for non-native boxing and non-native * exact math operations used at both compile-time and runtime. @@ -62,10 +58,5 @@ public static char StringTochar(final String value) { return value.charAt(0); } - // TODO: remove this when the transition from Joda to Java datetimes is completed - public static ZonedDateTime JCZDTToZonedDateTime(final JodaCompatibleZonedDateTime jczdt) { - return jczdt.getZonedDateTime(); - } - private Utility() {} } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/opensearch/painless/WriterConstants.java index 530b4d2607931..1dbcde91a7df4 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/WriterConstants.java @@ -32,7 +32,6 @@ package org.opensearch.painless; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.objectweb.asm.Handle; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; @@ -42,7 +41,6 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; -import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; @@ -82,13 +80,6 @@ public final class WriterConstants { public static final Method STRING_TO_CHAR = getAsmMethod(char.class, "StringTochar", String.class); public static final Method CHAR_TO_STRING = getAsmMethod(String.class, "charToString", char.class); - // TODO: remove this when the transition from Joda to Java datetimes is completed - public static final Method JCZDT_TO_ZONEDDATETIME = getAsmMethod( - ZonedDateTime.class, - "JCZDTToZonedDateTime", - JodaCompatibleZonedDateTime.class - ); - /** * A Method instance for {@linkplain Pattern}. This isn't available from PainlessLookup because we intentionally don't add it * there so that the script can't create regexes without this syntax. Essentially, our static regex syntax has a monopoly on building @@ -157,9 +148,6 @@ public final class WriterConstants { public static final Method DEF_TO_STRING_IMPLICIT = getAsmMethod(String.class, "defToStringImplicit", Object.class); public static final Method DEF_TO_STRING_EXPLICIT = getAsmMethod(String.class, "defToStringExplicit", Object.class); - // TODO: remove this when the transition from Joda to Java datetimes is completed - public static final Method DEF_TO_ZONEDDATETIME = getAsmMethod(ZonedDateTime.class, "defToZonedDateTime", Object.class); - /** invokedynamic bootstrap for lambda expression/method references */ public static final MethodType LAMBDA_BOOTSTRAP_TYPE = MethodType.methodType( CallSite.class, diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessContextAction.java index db1ef8f7a30fb..a26cb79a9717d 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessContextAction.java @@ -32,16 +32,16 @@ package org.opensearch.painless.action; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.ParseField; import org.opensearch.common.inject.Inject; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java index 9a2c8c1f0aa55..67b298eee7973 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/action/PainlessExecuteAction.java @@ -45,7 +45,6 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; @@ -58,28 +57,29 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedBiFunction; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -558,7 +558,11 @@ static Response innerShardOperation(Request request, ScriptService scriptService } else if (scriptContext == ScoreScript.CONTEXT) { return prepareRamIndex(request, (context, leafReaderContext) -> { ScoreScript.Factory factory = scriptService.compile(request.script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory leafFactory = factory.newFactory(request.getScript().getParams(), context.lookup()); + ScoreScript.LeafFactory leafFactory = factory.newFactory( + request.getScript().getParams(), + context.lookup(), + context.searcher() + ); ScoreScript scoreScript = leafFactory.newInstance(leafReaderContext); scoreScript.setDocument(0); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/EnhancedPainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/EnhancedPainlessLexer.java index 805b8a8a45bdc..5cc28e060c7cd 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/EnhancedPainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/EnhancedPainlessLexer.java @@ -36,6 +36,7 @@ import org.antlr.v4.runtime.LexerNoViableAltException; import org.antlr.v4.runtime.Token; import org.antlr.v4.runtime.misc.Interval; + import org.opensearch.painless.Location; /** diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java index 3a8dac72e7a65..6e3448e5eea77 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java @@ -33,23 +33,22 @@ package org.opensearch.painless.antlr; -import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.RuntimeMetaData; import org.antlr.v4.runtime.Vocabulary; import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.atn.ATN; import org.antlr.v4.runtime.atn.ATNDeserializer; import org.antlr.v4.runtime.atn.LexerATNSimulator; import org.antlr.v4.runtime.atn.PredictionContextCache; - import org.antlr.v4.runtime.dfa.DFA; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue", "this-escape" }) abstract class PainlessLexer extends Lexer { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -436,7 +435,7 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { return true; } - public static final String _serializedATN = "\u0004\u0000U\u0278\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + public static final String _serializedATN = "\u0004\u0000U\u0277\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + "\u0000\u0002\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007" + "\u0003\u0002\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007" + "\u0006\u0002\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n" @@ -460,381 +459,381 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { + "R\u0007R\u0002S\u0007S\u0002T\u0007T\u0001\u0000\u0004\u0000\u00ae\b\u0000" + "\u000b\u0000\f\u0000\u00af\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001" + "\u0001\u0001\u0001\u0001\u0005\u0001\u00b8\b\u0001\n\u0001\f\u0001\u00bb" - + "\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005" - + "\u0001\u00c2\b\u0001\n\u0001\f\u0001\u00c5\t\u0001\u0001\u0001\u0001\u0001" - + "\u0003\u0001\u00c9\b\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002" - + "\u0001\u0003\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005" - + "\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001" - + "\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001" - + "\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001" - + "\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001" - + "\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001" - + "\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001" - + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001" - + "\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001" - + "\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001" - + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001" - + "\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001" - + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001" - + "\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001" - + "\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001" - + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001" - + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001" - + "\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001" - + "\u001e\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001!\u0001!\u0001\"\u0001" - + "\"\u0001\"\u0001#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0001%\u0001" - + "%\u0001&\u0001&\u0001&\u0001\'\u0001\'\u0001(\u0001(\u0001(\u0001)\u0001" - + ")\u0001)\u0001*\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0001,\u0001" - + ",\u0001,\u0001,\u0001-\u0001-\u0001.\u0001.\u0001/\u0001/\u00010\u0001" - + "0\u00010\u00011\u00011\u00011\u00012\u00012\u00013\u00013\u00014\u0001" - + "4\u00014\u00015\u00015\u00015\u00016\u00016\u00016\u00017\u00017\u0001" - + "7\u00018\u00018\u00018\u00018\u00019\u00019\u00019\u0001:\u0001:\u0001" - + ":\u0001;\u0001;\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001>\u0001" - + ">\u0001>\u0001?\u0001?\u0001?\u0001@\u0001@\u0001@\u0001A\u0001A\u0001" - + "A\u0001B\u0001B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001" - + "D\u0001E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001" - + "G\u0001G\u0004G\u01b8\bG\u000bG\fG\u01b9\u0001G\u0003G\u01bd\bG\u0001" - + "H\u0001H\u0001H\u0004H\u01c2\bH\u000bH\fH\u01c3\u0001H\u0003H\u01c7\b" - + "H\u0001I\u0001I\u0001I\u0005I\u01cc\bI\nI\fI\u01cf\tI\u0003I\u01d1\bI" - + "\u0001I\u0003I\u01d4\bI\u0001J\u0001J\u0001J\u0005J\u01d9\bJ\nJ\fJ\u01dc" - + "\tJ\u0003J\u01de\bJ\u0001J\u0001J\u0004J\u01e2\bJ\u000bJ\fJ\u01e3\u0003" - + "J\u01e6\bJ\u0001J\u0001J\u0003J\u01ea\bJ\u0001J\u0004J\u01ed\bJ\u000b" - + "J\fJ\u01ee\u0003J\u01f1\bJ\u0001J\u0003J\u01f4\bJ\u0001K\u0001K\u0001" - + "K\u0001K\u0001K\u0001K\u0005K\u01fc\bK\nK\fK\u01ff\tK\u0001K\u0001K\u0001" - + "K\u0001K\u0001K\u0001K\u0001K\u0005K\u0208\bK\nK\fK\u020b\tK\u0001K\u0003" - + "K\u020e\bK\u0001L\u0001L\u0001L\u0001L\u0004L\u0214\bL\u000bL\fL\u0215" - + "\u0001L\u0001L\u0005L\u021a\bL\nL\fL\u021d\tL\u0001L\u0001L\u0001M\u0001" - + "M\u0001M\u0001M\u0001M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001" - + "O\u0001O\u0001O\u0001O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" - + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + + "\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001\u00c1" + + "\b\u0001\n\u0001\f\u0001\u00c4\t\u0001\u0001\u0001\u0001\u0001\u0003\u0001" + + "\u00c8\b\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0003" + + "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0006" + + "\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001" + + "\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001\u000b\u0001\u000b" + + "\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e" + + "\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001\u000f" + + "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012" + + "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012" + + "\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013" + + "\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014" + + "\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015" + + "\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017" + + "\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019" + + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a" + + "\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a" + + "\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c" + + "\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001f" + + "\u0001\u001f\u0001 \u0001 \u0001!\u0001!\u0001\"\u0001\"\u0001\"\u0001" + + "#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0001%\u0001%\u0001&\u0001" + + "&\u0001&\u0001\'\u0001\'\u0001(\u0001(\u0001(\u0001)\u0001)\u0001)\u0001" + + "*\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001" + + ",\u0001-\u0001-\u0001.\u0001.\u0001/\u0001/\u00010\u00010\u00010\u0001" + + "1\u00011\u00011\u00012\u00012\u00013\u00013\u00014\u00014\u00014\u0001" + + "5\u00015\u00015\u00016\u00016\u00016\u00017\u00017\u00017\u00018\u0001" + + "8\u00018\u00018\u00019\u00019\u00019\u0001:\u0001:\u0001:\u0001;\u0001" + + ";\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001>\u0001>\u0001>\u0001" + + "?\u0001?\u0001?\u0001@\u0001@\u0001@\u0001A\u0001A\u0001A\u0001B\u0001" + + "B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001D\u0001E\u0001" + + "E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001G\u0001G\u0004" + + "G\u01b7\bG\u000bG\fG\u01b8\u0001G\u0003G\u01bc\bG\u0001H\u0001H\u0001" + + "H\u0004H\u01c1\bH\u000bH\fH\u01c2\u0001H\u0003H\u01c6\bH\u0001I\u0001" + + "I\u0001I\u0005I\u01cb\bI\nI\fI\u01ce\tI\u0003I\u01d0\bI\u0001I\u0003I" + + "\u01d3\bI\u0001J\u0001J\u0001J\u0005J\u01d8\bJ\nJ\fJ\u01db\tJ\u0003J\u01dd" + + "\bJ\u0001J\u0001J\u0004J\u01e1\bJ\u000bJ\fJ\u01e2\u0003J\u01e5\bJ\u0001" + + "J\u0001J\u0003J\u01e9\bJ\u0001J\u0004J\u01ec\bJ\u000bJ\fJ\u01ed\u0003" + + "J\u01f0\bJ\u0001J\u0003J\u01f3\bJ\u0001K\u0001K\u0001K\u0001K\u0001K\u0001" + + "K\u0005K\u01fb\bK\nK\fK\u01fe\tK\u0001K\u0001K\u0001K\u0001K\u0001K\u0001" + + "K\u0001K\u0005K\u0207\bK\nK\fK\u020a\tK\u0001K\u0003K\u020d\bK\u0001L" + + "\u0001L\u0001L\u0001L\u0004L\u0213\bL\u000bL\fL\u0214\u0001L\u0001L\u0005" + + "L\u0219\bL\nL\fL\u021c\tL\u0001L\u0001L\u0001M\u0001M\u0001M\u0001M\u0001" + + "M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001O\u0001O\u0001O\u0001" + + "O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" - + "P\u0001P\u0001P\u0003P\u0257\bP\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001" - + "R\u0005R\u025f\bR\nR\fR\u0262\tR\u0001S\u0001S\u0001S\u0005S\u0267\bS" - + "\nS\fS\u026a\tS\u0003S\u026c\bS\u0001S\u0001S\u0001T\u0001T\u0005T\u0272" - + "\bT\nT\fT\u0275\tT\u0001T\u0001T\u0005\u00b9\u00c3\u01fd\u0209\u0215\u0000" - + "U\u0002\u0001\u0004\u0002\u0006\u0003\b\u0004\n\u0005\f\u0006\u000e\u0007" - + "\u0010\b\u0012\t\u0014\n\u0016\u000b\u0018\f\u001a\r\u001c\u000e\u001e" - + "\u000f \u0010\"\u0011$\u0012&\u0013(\u0014*\u0015,\u0016.\u00170\u0018" - + "2\u00194\u001a6\u001b8\u001c:\u001d<\u001e>\u001f@ B!D\"F#H$J%L&N\'P(" - + "R)T*V+X,Z-\\.^/`0b1d2f3h4j5l6n7p8r9t:v;x<z=|>~?\u0080@\u0082A\u0084B\u0086" - + "C\u0088D\u008aE\u008cF\u008eG\u0090H\u0092I\u0094J\u0096K\u0098L\u009a" - + "M\u009cN\u009eO\u00a0P\u00a2Q\u00a4R\u00a6S\u00a8T\u00aaU\u0002\u0000" - + "\u0001\u0013\u0003\u0000\t\n\r\r \u0002\u0000\n\n\r\r\u0001\u000007\u0002" - + "\u0000LLll\u0002\u0000XXxx\u0003\u000009AFaf\u0001\u000019\u0001\u0000" - + "09\u0006\u0000DDFFLLddffll\u0002\u0000EEee\u0002\u0000++--\u0004\u0000" - + "DDFFddff\u0002\u0000\"\"\\\\\u0002\u0000\'\'\\\\\u0001\u0000\n\n\u0002" - + "\u0000\n\n//\u0007\u0000UUcciilmssuuxx\u0003\u0000AZ__az\u0004\u00000" - + "9AZ__az\u029e\u0000\u0002\u0001\u0000\u0000\u0000\u0000\u0004\u0001\u0000" - + "\u0000\u0000\u0000\u0006\u0001\u0000\u0000\u0000\u0000\b\u0001\u0000\u0000" - + "\u0000\u0000\n\u0001\u0000\u0000\u0000\u0000\f\u0001\u0000\u0000\u0000" - + "\u0000\u000e\u0001\u0000\u0000\u0000\u0000\u0010\u0001\u0000\u0000\u0000" - + "\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000" - + "\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000" - + "\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000" - + "\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000" - + "\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001" - + "\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000" - + "\u0000\u0000,\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u0000" - + "0\u0001\u0000\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001" - + "\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000" - + "\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000" - + ">\u0001\u0000\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001" - + "\u0000\u0000\u0000\u0000D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000" - + "\u0000\u0000H\u0001\u0000\u0000\u0000\u0000J\u0001\u0000\u0000\u0000\u0000" - + "L\u0001\u0000\u0000\u0000\u0000N\u0001\u0000\u0000\u0000\u0000P\u0001" - + "\u0000\u0000\u0000\u0000R\u0001\u0000\u0000\u0000\u0000T\u0001\u0000\u0000" - + "\u0000\u0000V\u0001\u0000\u0000\u0000\u0000X\u0001\u0000\u0000\u0000\u0000" - + "Z\u0001\u0000\u0000\u0000\u0000\\\u0001\u0000\u0000\u0000\u0000^\u0001" - + "\u0000\u0000\u0000\u0000`\u0001\u0000\u0000\u0000\u0000b\u0001\u0000\u0000" - + "\u0000\u0000d\u0001\u0000\u0000\u0000\u0000f\u0001\u0000\u0000\u0000\u0000" - + "h\u0001\u0000\u0000\u0000\u0000j\u0001\u0000\u0000\u0000\u0000l\u0001" - + "\u0000\u0000\u0000\u0000n\u0001\u0000\u0000\u0000\u0000p\u0001\u0000\u0000" - + "\u0000\u0000r\u0001\u0000\u0000\u0000\u0000t\u0001\u0000\u0000\u0000\u0000" - + "v\u0001\u0000\u0000\u0000\u0000x\u0001\u0000\u0000\u0000\u0000z\u0001" - + "\u0000\u0000\u0000\u0000|\u0001\u0000\u0000\u0000\u0000~\u0001\u0000\u0000" - + "\u0000\u0000\u0080\u0001\u0000\u0000\u0000\u0000\u0082\u0001\u0000\u0000" - + "\u0000\u0000\u0084\u0001\u0000\u0000\u0000\u0000\u0086\u0001\u0000\u0000" - + "\u0000\u0000\u0088\u0001\u0000\u0000\u0000\u0000\u008a\u0001\u0000\u0000" - + "\u0000\u0000\u008c\u0001\u0000\u0000\u0000\u0000\u008e\u0001\u0000\u0000" - + "\u0000\u0000\u0090\u0001\u0000\u0000\u0000\u0000\u0092\u0001\u0000\u0000" - + "\u0000\u0000\u0094\u0001\u0000\u0000\u0000\u0000\u0096\u0001\u0000\u0000" - + "\u0000\u0000\u0098\u0001\u0000\u0000\u0000\u0000\u009a\u0001\u0000\u0000" - + "\u0000\u0000\u009c\u0001\u0000\u0000\u0000\u0000\u009e\u0001\u0000\u0000" - + "\u0000\u0000\u00a0\u0001\u0000\u0000\u0000\u0000\u00a2\u0001\u0000\u0000" - + "\u0000\u0000\u00a4\u0001\u0000\u0000\u0000\u0000\u00a6\u0001\u0000\u0000" - + "\u0000\u0001\u00a8\u0001\u0000\u0000\u0000\u0001\u00aa\u0001\u0000\u0000" - + "\u0000\u0002\u00ad\u0001\u0000\u0000\u0000\u0004\u00c8\u0001\u0000\u0000" - + "\u0000\u0006\u00cc\u0001\u0000\u0000\u0000\b\u00ce\u0001\u0000\u0000\u0000" - + "\n\u00d0\u0001\u0000\u0000\u0000\f\u00d2\u0001\u0000\u0000\u0000\u000e" - + "\u00d4\u0001\u0000\u0000\u0000\u0010\u00d6\u0001\u0000\u0000\u0000\u0012" - + "\u00d8\u0001\u0000\u0000\u0000\u0014\u00dc\u0001\u0000\u0000\u0000\u0016" - + "\u00e1\u0001\u0000\u0000\u0000\u0018\u00e3\u0001\u0000\u0000\u0000\u001a" - + "\u00e5\u0001\u0000\u0000\u0000\u001c\u00e8\u0001\u0000\u0000\u0000\u001e" - + "\u00eb\u0001\u0000\u0000\u0000 \u00f0\u0001\u0000\u0000\u0000\"\u00f6" - + "\u0001\u0000\u0000\u0000$\u00f9\u0001\u0000\u0000\u0000&\u00fd\u0001\u0000" - + "\u0000\u0000(\u0106\u0001\u0000\u0000\u0000*\u010c\u0001\u0000\u0000\u0000" - + ",\u0113\u0001\u0000\u0000\u0000.\u0117\u0001\u0000\u0000\u00000\u011b" - + "\u0001\u0000\u0000\u00002\u0121\u0001\u0000\u0000\u00004\u0127\u0001\u0000" - + "\u0000\u00006\u012c\u0001\u0000\u0000\u00008\u0137\u0001\u0000\u0000\u0000" - + ":\u0139\u0001\u0000\u0000\u0000<\u013b\u0001\u0000\u0000\u0000>\u013d" - + "\u0001\u0000\u0000\u0000@\u0140\u0001\u0000\u0000\u0000B\u0142\u0001\u0000" - + "\u0000\u0000D\u0144\u0001\u0000\u0000\u0000F\u0146\u0001\u0000\u0000\u0000" - + "H\u0149\u0001\u0000\u0000\u0000J\u014c\u0001\u0000\u0000\u0000L\u0150" - + "\u0001\u0000\u0000\u0000N\u0152\u0001\u0000\u0000\u0000P\u0155\u0001\u0000" - + "\u0000\u0000R\u0157\u0001\u0000\u0000\u0000T\u015a\u0001\u0000\u0000\u0000" - + "V\u015d\u0001\u0000\u0000\u0000X\u0161\u0001\u0000\u0000\u0000Z\u0164" - + "\u0001\u0000\u0000\u0000\\\u0168\u0001\u0000\u0000\u0000^\u016a\u0001" - + "\u0000\u0000\u0000`\u016c\u0001\u0000\u0000\u0000b\u016e\u0001\u0000\u0000" - + "\u0000d\u0171\u0001\u0000\u0000\u0000f\u0174\u0001\u0000\u0000\u0000h" - + "\u0176\u0001\u0000\u0000\u0000j\u0178\u0001\u0000\u0000\u0000l\u017b\u0001" - + "\u0000\u0000\u0000n\u017e\u0001\u0000\u0000\u0000p\u0181\u0001\u0000\u0000" - + "\u0000r\u0184\u0001\u0000\u0000\u0000t\u0188\u0001\u0000\u0000\u0000v" - + "\u018b\u0001\u0000\u0000\u0000x\u018e\u0001\u0000\u0000\u0000z\u0190\u0001" - + "\u0000\u0000\u0000|\u0193\u0001\u0000\u0000\u0000~\u0196\u0001\u0000\u0000" - + "\u0000\u0080\u0199\u0001\u0000\u0000\u0000\u0082\u019c\u0001\u0000\u0000" - + "\u0000\u0084\u019f\u0001\u0000\u0000\u0000\u0086\u01a2\u0001\u0000\u0000" - + "\u0000\u0088\u01a5\u0001\u0000\u0000\u0000\u008a\u01a8\u0001\u0000\u0000" - + "\u0000\u008c\u01ac\u0001\u0000\u0000\u0000\u008e\u01b0\u0001\u0000\u0000" - + "\u0000\u0090\u01b5\u0001\u0000\u0000\u0000\u0092\u01be\u0001\u0000\u0000" - + "\u0000\u0094\u01d0\u0001\u0000\u0000\u0000\u0096\u01dd\u0001\u0000\u0000" - + "\u0000\u0098\u020d\u0001\u0000\u0000\u0000\u009a\u020f\u0001\u0000\u0000" - + "\u0000\u009c\u0220\u0001\u0000\u0000\u0000\u009e\u0225\u0001\u0000\u0000" - + "\u0000\u00a0\u022b\u0001\u0000\u0000\u0000\u00a2\u0256\u0001\u0000\u0000" - + "\u0000\u00a4\u0258\u0001\u0000\u0000\u0000\u00a6\u025c\u0001\u0000\u0000" - + "\u0000\u00a8\u026b\u0001\u0000\u0000\u0000\u00aa\u026f\u0001\u0000\u0000" - + "\u0000\u00ac\u00ae\u0007\u0000\u0000\u0000\u00ad\u00ac\u0001\u0000\u0000" - + "\u0000\u00ae\u00af\u0001\u0000\u0000\u0000\u00af\u00ad\u0001\u0000\u0000" - + "\u0000\u00af\u00b0\u0001\u0000\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000" - + "\u0000\u00b1\u00b2\u0006\u0000\u0000\u0000\u00b2\u0003\u0001\u0000\u0000" - + "\u0000\u00b3\u00b4\u0005/\u0000\u0000\u00b4\u00b5\u0005/\u0000\u0000\u00b5" - + "\u00b9\u0001\u0000\u0000\u0000\u00b6\u00b8\t\u0000\u0000\u0000\u00b7\u00b6" - + "\u0001\u0000\u0000\u0000\u00b8\u00bb\u0001\u0000\u0000\u0000\u00b9\u00ba" - + "\u0001\u0000\u0000\u0000\u00b9\u00b7\u0001\u0000\u0000\u0000\u00ba\u00bc" - + "\u0001\u0000\u0000\u0000\u00bb\u00b9\u0001\u0000\u0000\u0000\u00bc\u00c9" - + "\u0007\u0001\u0000\u0000\u00bd\u00be\u0005/\u0000\u0000\u00be\u00bf\u0005" - + "*\u0000\u0000\u00bf\u00c3\u0001\u0000\u0000\u0000\u00c0\u00c2\t\u0000" - + "\u0000\u0000\u00c1\u00c0\u0001\u0000\u0000\u0000\u00c2\u00c5\u0001\u0000" - + "\u0000\u0000\u00c3\u00c4\u0001\u0000\u0000\u0000\u00c3\u00c1\u0001\u0000" - + "\u0000\u0000\u00c4\u00c6\u0001\u0000\u0000\u0000\u00c5\u00c3\u0001\u0000" - + "\u0000\u0000\u00c6\u00c7\u0005*\u0000\u0000\u00c7\u00c9\u0005/\u0000\u0000" - + "\u00c8\u00b3\u0001\u0000\u0000\u0000\u00c8\u00bd\u0001\u0000\u0000\u0000" - + "\u00c9\u00ca\u0001\u0000\u0000\u0000\u00ca\u00cb\u0006\u0001\u0000\u0000" - + "\u00cb\u0005\u0001\u0000\u0000\u0000\u00cc\u00cd\u0005{\u0000\u0000\u00cd" - + "\u0007\u0001\u0000\u0000\u0000\u00ce\u00cf\u0005}\u0000\u0000\u00cf\t" - + "\u0001\u0000\u0000\u0000\u00d0\u00d1\u0005[\u0000\u0000\u00d1\u000b\u0001" - + "\u0000\u0000\u0000\u00d2\u00d3\u0005]\u0000\u0000\u00d3\r\u0001\u0000" - + "\u0000\u0000\u00d4\u00d5\u0005(\u0000\u0000\u00d5\u000f\u0001\u0000\u0000" - + "\u0000\u00d6\u00d7\u0005)\u0000\u0000\u00d7\u0011\u0001\u0000\u0000\u0000" - + "\u00d8\u00d9\u0005.\u0000\u0000\u00d9\u00da\u0001\u0000\u0000\u0000\u00da" - + "\u00db\u0006\b\u0001\u0000\u00db\u0013\u0001\u0000\u0000\u0000\u00dc\u00dd" - + "\u0005?\u0000\u0000\u00dd\u00de\u0005.\u0000\u0000\u00de\u00df\u0001\u0000" - + "\u0000\u0000\u00df\u00e0\u0006\t\u0001\u0000\u00e0\u0015\u0001\u0000\u0000" - + "\u0000\u00e1\u00e2\u0005,\u0000\u0000\u00e2\u0017\u0001\u0000\u0000\u0000" - + "\u00e3\u00e4\u0005;\u0000\u0000\u00e4\u0019\u0001\u0000\u0000\u0000\u00e5" - + "\u00e6\u0005i\u0000\u0000\u00e6\u00e7\u0005f\u0000\u0000\u00e7\u001b\u0001" - + "\u0000\u0000\u0000\u00e8\u00e9\u0005i\u0000\u0000\u00e9\u00ea\u0005n\u0000" - + "\u0000\u00ea\u001d\u0001\u0000\u0000\u0000\u00eb\u00ec\u0005e\u0000\u0000" - + "\u00ec\u00ed\u0005l\u0000\u0000\u00ed\u00ee\u0005s\u0000\u0000\u00ee\u00ef" - + "\u0005e\u0000\u0000\u00ef\u001f\u0001\u0000\u0000\u0000\u00f0\u00f1\u0005" - + "w\u0000\u0000\u00f1\u00f2\u0005h\u0000\u0000\u00f2\u00f3\u0005i\u0000" - + "\u0000\u00f3\u00f4\u0005l\u0000\u0000\u00f4\u00f5\u0005e\u0000\u0000\u00f5" - + "!\u0001\u0000\u0000\u0000\u00f6\u00f7\u0005d\u0000\u0000\u00f7\u00f8\u0005" - + "o\u0000\u0000\u00f8#\u0001\u0000\u0000\u0000\u00f9\u00fa\u0005f\u0000" - + "\u0000\u00fa\u00fb\u0005o\u0000\u0000\u00fb\u00fc\u0005r\u0000\u0000\u00fc" - + "%\u0001\u0000\u0000\u0000\u00fd\u00fe\u0005c\u0000\u0000\u00fe\u00ff\u0005" - + "o\u0000\u0000\u00ff\u0100\u0005n\u0000\u0000\u0100\u0101\u0005t\u0000" - + "\u0000\u0101\u0102\u0005i\u0000\u0000\u0102\u0103\u0005n\u0000\u0000\u0103" - + "\u0104\u0005u\u0000\u0000\u0104\u0105\u0005e\u0000\u0000\u0105\'\u0001" - + "\u0000\u0000\u0000\u0106\u0107\u0005b\u0000\u0000\u0107\u0108\u0005r\u0000" - + "\u0000\u0108\u0109\u0005e\u0000\u0000\u0109\u010a\u0005a\u0000\u0000\u010a" - + "\u010b\u0005k\u0000\u0000\u010b)\u0001\u0000\u0000\u0000\u010c\u010d\u0005" - + "r\u0000\u0000\u010d\u010e\u0005e\u0000\u0000\u010e\u010f\u0005t\u0000" - + "\u0000\u010f\u0110\u0005u\u0000\u0000\u0110\u0111\u0005r\u0000\u0000\u0111" - + "\u0112\u0005n\u0000\u0000\u0112+\u0001\u0000\u0000\u0000\u0113\u0114\u0005" - + "n\u0000\u0000\u0114\u0115\u0005e\u0000\u0000\u0115\u0116\u0005w\u0000" - + "\u0000\u0116-\u0001\u0000\u0000\u0000\u0117\u0118\u0005t\u0000\u0000\u0118" - + "\u0119\u0005r\u0000\u0000\u0119\u011a\u0005y\u0000\u0000\u011a/\u0001" - + "\u0000\u0000\u0000\u011b\u011c\u0005c\u0000\u0000\u011c\u011d\u0005a\u0000" - + "\u0000\u011d\u011e\u0005t\u0000\u0000\u011e\u011f\u0005c\u0000\u0000\u011f" - + "\u0120\u0005h\u0000\u0000\u01201\u0001\u0000\u0000\u0000\u0121\u0122\u0005" - + "t\u0000\u0000\u0122\u0123\u0005h\u0000\u0000\u0123\u0124\u0005r\u0000" - + "\u0000\u0124\u0125\u0005o\u0000\u0000\u0125\u0126\u0005w\u0000\u0000\u0126" - + "3\u0001\u0000\u0000\u0000\u0127\u0128\u0005t\u0000\u0000\u0128\u0129\u0005" - + "h\u0000\u0000\u0129\u012a\u0005i\u0000\u0000\u012a\u012b\u0005s\u0000" - + "\u0000\u012b5\u0001\u0000\u0000\u0000\u012c\u012d\u0005i\u0000\u0000\u012d" - + "\u012e\u0005n\u0000\u0000\u012e\u012f\u0005s\u0000\u0000\u012f\u0130\u0005" - + "t\u0000\u0000\u0130\u0131\u0005a\u0000\u0000\u0131\u0132\u0005n\u0000" - + "\u0000\u0132\u0133\u0005c\u0000\u0000\u0133\u0134\u0005e\u0000\u0000\u0134" - + "\u0135\u0005o\u0000\u0000\u0135\u0136\u0005f\u0000\u0000\u01367\u0001" - + "\u0000\u0000\u0000\u0137\u0138\u0005!\u0000\u0000\u01389\u0001\u0000\u0000" - + "\u0000\u0139\u013a\u0005~\u0000\u0000\u013a;\u0001\u0000\u0000\u0000\u013b" - + "\u013c\u0005*\u0000\u0000\u013c=\u0001\u0000\u0000\u0000\u013d\u013e\u0005" - + "/\u0000\u0000\u013e\u013f\u0004\u001e\u0000\u0000\u013f?\u0001\u0000\u0000" - + "\u0000\u0140\u0141\u0005%\u0000\u0000\u0141A\u0001\u0000\u0000\u0000\u0142" - + "\u0143\u0005+\u0000\u0000\u0143C\u0001\u0000\u0000\u0000\u0144\u0145\u0005" - + "-\u0000\u0000\u0145E\u0001\u0000\u0000\u0000\u0146\u0147\u0005<\u0000" - + "\u0000\u0147\u0148\u0005<\u0000\u0000\u0148G\u0001\u0000\u0000\u0000\u0149" - + "\u014a\u0005>\u0000\u0000\u014a\u014b\u0005>\u0000\u0000\u014bI\u0001" - + "\u0000\u0000\u0000\u014c\u014d\u0005>\u0000\u0000\u014d\u014e\u0005>\u0000" - + "\u0000\u014e\u014f\u0005>\u0000\u0000\u014fK\u0001\u0000\u0000\u0000\u0150" - + "\u0151\u0005<\u0000\u0000\u0151M\u0001\u0000\u0000\u0000\u0152\u0153\u0005" - + "<\u0000\u0000\u0153\u0154\u0005=\u0000\u0000\u0154O\u0001\u0000\u0000" - + "\u0000\u0155\u0156\u0005>\u0000\u0000\u0156Q\u0001\u0000\u0000\u0000\u0157" - + "\u0158\u0005>\u0000\u0000\u0158\u0159\u0005=\u0000\u0000\u0159S\u0001" - + "\u0000\u0000\u0000\u015a\u015b\u0005=\u0000\u0000\u015b\u015c\u0005=\u0000" - + "\u0000\u015cU\u0001\u0000\u0000\u0000\u015d\u015e\u0005=\u0000\u0000\u015e" - + "\u015f\u0005=\u0000\u0000\u015f\u0160\u0005=\u0000\u0000\u0160W\u0001" - + "\u0000\u0000\u0000\u0161\u0162\u0005!\u0000\u0000\u0162\u0163\u0005=\u0000" - + "\u0000\u0163Y\u0001\u0000\u0000\u0000\u0164\u0165\u0005!\u0000\u0000\u0165" - + "\u0166\u0005=\u0000\u0000\u0166\u0167\u0005=\u0000\u0000\u0167[\u0001" - + "\u0000\u0000\u0000\u0168\u0169\u0005&\u0000\u0000\u0169]\u0001\u0000\u0000" - + "\u0000\u016a\u016b\u0005^\u0000\u0000\u016b_\u0001\u0000\u0000\u0000\u016c" - + "\u016d\u0005|\u0000\u0000\u016da\u0001\u0000\u0000\u0000\u016e\u016f\u0005" - + "&\u0000\u0000\u016f\u0170\u0005&\u0000\u0000\u0170c\u0001\u0000\u0000" - + "\u0000\u0171\u0172\u0005|\u0000\u0000\u0172\u0173\u0005|\u0000\u0000\u0173" - + "e\u0001\u0000\u0000\u0000\u0174\u0175\u0005?\u0000\u0000\u0175g\u0001" - + "\u0000\u0000\u0000\u0176\u0177\u0005:\u0000\u0000\u0177i\u0001\u0000\u0000" - + "\u0000\u0178\u0179\u0005?\u0000\u0000\u0179\u017a\u0005:\u0000\u0000\u017a" - + "k\u0001\u0000\u0000\u0000\u017b\u017c\u0005:\u0000\u0000\u017c\u017d\u0005" - + ":\u0000\u0000\u017dm\u0001\u0000\u0000\u0000\u017e\u017f\u0005-\u0000" - + "\u0000\u017f\u0180\u0005>\u0000\u0000\u0180o\u0001\u0000\u0000\u0000\u0181" - + "\u0182\u0005=\u0000\u0000\u0182\u0183\u0005~\u0000\u0000\u0183q\u0001" - + "\u0000\u0000\u0000\u0184\u0185\u0005=\u0000\u0000\u0185\u0186\u0005=\u0000" - + "\u0000\u0186\u0187\u0005~\u0000\u0000\u0187s\u0001\u0000\u0000\u0000\u0188" - + "\u0189\u0005+\u0000\u0000\u0189\u018a\u0005+\u0000\u0000\u018au\u0001" - + "\u0000\u0000\u0000\u018b\u018c\u0005-\u0000\u0000\u018c\u018d\u0005-\u0000" - + "\u0000\u018dw\u0001\u0000\u0000\u0000\u018e\u018f\u0005=\u0000\u0000\u018f" - + "y\u0001\u0000\u0000\u0000\u0190\u0191\u0005+\u0000\u0000\u0191\u0192\u0005" - + "=\u0000\u0000\u0192{\u0001\u0000\u0000\u0000\u0193\u0194\u0005-\u0000" - + "\u0000\u0194\u0195\u0005=\u0000\u0000\u0195}\u0001\u0000\u0000\u0000\u0196" - + "\u0197\u0005*\u0000\u0000\u0197\u0198\u0005=\u0000\u0000\u0198\u007f\u0001" - + "\u0000\u0000\u0000\u0199\u019a\u0005/\u0000\u0000\u019a\u019b\u0005=\u0000" - + "\u0000\u019b\u0081\u0001\u0000\u0000\u0000\u019c\u019d\u0005%\u0000\u0000" - + "\u019d\u019e\u0005=\u0000\u0000\u019e\u0083\u0001\u0000\u0000\u0000\u019f" - + "\u01a0\u0005&\u0000\u0000\u01a0\u01a1\u0005=\u0000\u0000\u01a1\u0085\u0001" - + "\u0000\u0000\u0000\u01a2\u01a3\u0005^\u0000\u0000\u01a3\u01a4\u0005=\u0000" - + "\u0000\u01a4\u0087\u0001\u0000\u0000\u0000\u01a5\u01a6\u0005|\u0000\u0000" - + "\u01a6\u01a7\u0005=\u0000\u0000\u01a7\u0089\u0001\u0000\u0000\u0000\u01a8" - + "\u01a9\u0005<\u0000\u0000\u01a9\u01aa\u0005<\u0000\u0000\u01aa\u01ab\u0005" - + "=\u0000\u0000\u01ab\u008b\u0001\u0000\u0000\u0000\u01ac\u01ad\u0005>\u0000" - + "\u0000\u01ad\u01ae\u0005>\u0000\u0000\u01ae\u01af\u0005=\u0000\u0000\u01af" - + "\u008d\u0001\u0000\u0000\u0000\u01b0\u01b1\u0005>\u0000\u0000\u01b1\u01b2" - + "\u0005>\u0000\u0000\u01b2\u01b3\u0005>\u0000\u0000\u01b3\u01b4\u0005=" - + "\u0000\u0000\u01b4\u008f\u0001\u0000\u0000\u0000\u01b5\u01b7\u00050\u0000" - + "\u0000\u01b6\u01b8\u0007\u0002\u0000\u0000\u01b7\u01b6\u0001\u0000\u0000" - + "\u0000\u01b8\u01b9\u0001\u0000\u0000\u0000\u01b9\u01b7\u0001\u0000\u0000" - + "\u0000\u01b9\u01ba\u0001\u0000\u0000\u0000\u01ba\u01bc\u0001\u0000\u0000" - + "\u0000\u01bb\u01bd\u0007\u0003\u0000\u0000\u01bc\u01bb\u0001\u0000\u0000" - + "\u0000\u01bc\u01bd\u0001\u0000\u0000\u0000\u01bd\u0091\u0001\u0000\u0000" - + "\u0000\u01be\u01bf\u00050\u0000\u0000\u01bf\u01c1\u0007\u0004\u0000\u0000" - + "\u01c0\u01c2\u0007\u0005\u0000\u0000\u01c1\u01c0\u0001\u0000\u0000\u0000" - + "\u01c2\u01c3\u0001\u0000\u0000\u0000\u01c3\u01c1\u0001\u0000\u0000\u0000" - + "\u01c3\u01c4\u0001\u0000\u0000\u0000\u01c4\u01c6\u0001\u0000\u0000\u0000" - + "\u01c5\u01c7\u0007\u0003\u0000\u0000\u01c6\u01c5\u0001\u0000\u0000\u0000" - + "\u01c6\u01c7\u0001\u0000\u0000\u0000\u01c7\u0093\u0001\u0000\u0000\u0000" - + "\u01c8\u01d1\u00050\u0000\u0000\u01c9\u01cd\u0007\u0006\u0000\u0000\u01ca" - + "\u01cc\u0007\u0007\u0000\u0000\u01cb\u01ca\u0001\u0000\u0000\u0000\u01cc" - + "\u01cf\u0001\u0000\u0000\u0000\u01cd\u01cb\u0001\u0000\u0000\u0000\u01cd" - + "\u01ce\u0001\u0000\u0000\u0000\u01ce\u01d1\u0001\u0000\u0000\u0000\u01cf" - + "\u01cd\u0001\u0000\u0000\u0000\u01d0\u01c8\u0001\u0000\u0000\u0000\u01d0" - + "\u01c9\u0001\u0000\u0000\u0000\u01d1\u01d3\u0001\u0000\u0000\u0000\u01d2" - + "\u01d4\u0007\b\u0000\u0000\u01d3\u01d2\u0001\u0000\u0000\u0000\u01d3\u01d4" - + "\u0001\u0000\u0000\u0000\u01d4\u0095\u0001\u0000\u0000\u0000\u01d5\u01de" - + "\u00050\u0000\u0000\u01d6\u01da\u0007\u0006\u0000\u0000\u01d7\u01d9\u0007" - + "\u0007\u0000\u0000\u01d8\u01d7\u0001\u0000\u0000\u0000\u01d9\u01dc\u0001" - + "\u0000\u0000\u0000\u01da\u01d8\u0001\u0000\u0000\u0000\u01da\u01db\u0001" - + "\u0000\u0000\u0000\u01db\u01de\u0001\u0000\u0000\u0000\u01dc\u01da\u0001" - + "\u0000\u0000\u0000\u01dd\u01d5\u0001\u0000\u0000\u0000\u01dd\u01d6\u0001" - + "\u0000\u0000\u0000\u01de\u01e5\u0001\u0000\u0000\u0000\u01df\u01e1\u0003" - + "\u0012\b\u0000\u01e0\u01e2\u0007\u0007\u0000\u0000\u01e1\u01e0\u0001\u0000" - + "\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000\u0000\u01e3\u01e1\u0001\u0000" - + "\u0000\u0000\u01e3\u01e4\u0001\u0000\u0000\u0000\u01e4\u01e6\u0001\u0000" - + "\u0000\u0000\u01e5\u01df\u0001\u0000\u0000\u0000\u01e5\u01e6\u0001\u0000" - + "\u0000\u0000\u01e6\u01f0\u0001\u0000\u0000\u0000\u01e7\u01e9\u0007\t\u0000" - + "\u0000\u01e8\u01ea\u0007\n\u0000\u0000\u01e9\u01e8\u0001\u0000\u0000\u0000" - + "\u01e9\u01ea\u0001\u0000\u0000\u0000\u01ea\u01ec\u0001\u0000\u0000\u0000" - + "\u01eb\u01ed\u0007\u0007\u0000\u0000\u01ec\u01eb\u0001\u0000\u0000\u0000" - + "\u01ed\u01ee\u0001\u0000\u0000\u0000\u01ee\u01ec\u0001\u0000\u0000\u0000" - + "\u01ee\u01ef\u0001\u0000\u0000\u0000\u01ef\u01f1\u0001\u0000\u0000\u0000" - + "\u01f0\u01e7\u0001\u0000\u0000\u0000\u01f0\u01f1\u0001\u0000\u0000\u0000" - + "\u01f1\u01f3\u0001\u0000\u0000\u0000\u01f2\u01f4\u0007\u000b\u0000\u0000" - + "\u01f3\u01f2\u0001\u0000\u0000\u0000\u01f3\u01f4\u0001\u0000\u0000\u0000" - + "\u01f4\u0097\u0001\u0000\u0000\u0000\u01f5\u01fd\u0005\"\u0000\u0000\u01f6" - + "\u01f7\u0005\\\u0000\u0000\u01f7\u01fc\u0005\"\u0000\u0000\u01f8\u01f9" - + "\u0005\\\u0000\u0000\u01f9\u01fc\u0005\\\u0000\u0000\u01fa\u01fc\b\f\u0000" - + "\u0000\u01fb\u01f6\u0001\u0000\u0000\u0000\u01fb\u01f8\u0001\u0000\u0000" - + "\u0000\u01fb\u01fa\u0001\u0000\u0000\u0000\u01fc\u01ff\u0001\u0000\u0000" - + "\u0000\u01fd\u01fe\u0001\u0000\u0000\u0000\u01fd\u01fb\u0001\u0000\u0000" - + "\u0000\u01fe\u0200\u0001\u0000\u0000\u0000\u01ff\u01fd\u0001\u0000\u0000" - + "\u0000\u0200\u020e\u0005\"\u0000\u0000\u0201\u0209\u0005\'\u0000\u0000" - + "\u0202\u0203\u0005\\\u0000\u0000\u0203\u0208\u0005\'\u0000\u0000\u0204" - + "\u0205\u0005\\\u0000\u0000\u0205\u0208\u0005\\\u0000\u0000\u0206\u0208" - + "\b\r\u0000\u0000\u0207\u0202\u0001\u0000\u0000\u0000\u0207\u0204\u0001" - + "\u0000\u0000\u0000\u0207\u0206\u0001\u0000\u0000\u0000\u0208\u020b\u0001" - + "\u0000\u0000\u0000\u0209\u020a\u0001\u0000\u0000\u0000\u0209\u0207\u0001" - + "\u0000\u0000\u0000\u020a\u020c\u0001\u0000\u0000\u0000\u020b\u0209\u0001" - + "\u0000\u0000\u0000\u020c\u020e\u0005\'\u0000\u0000\u020d\u01f5\u0001\u0000" - + "\u0000\u0000\u020d\u0201\u0001\u0000\u0000\u0000\u020e\u0099\u0001\u0000" - + "\u0000\u0000\u020f\u0213\u0005/\u0000\u0000\u0210\u0211\u0005\\\u0000" - + "\u0000\u0211\u0214\b\u000e\u0000\u0000\u0212\u0214\b\u000f\u0000\u0000" - + "\u0213\u0210\u0001\u0000\u0000\u0000\u0213\u0212\u0001\u0000\u0000\u0000" - + "\u0214\u0215\u0001\u0000\u0000\u0000\u0215\u0216\u0001\u0000\u0000\u0000" - + "\u0215\u0213\u0001\u0000\u0000\u0000\u0216\u0217\u0001\u0000\u0000\u0000" - + "\u0217\u021b\u0005/\u0000\u0000\u0218\u021a\u0007\u0010\u0000\u0000\u0219" - + "\u0218\u0001\u0000\u0000\u0000\u021a\u021d\u0001\u0000\u0000\u0000\u021b" - + "\u0219\u0001\u0000\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000\u021c" - + "\u021e\u0001\u0000\u0000\u0000\u021d\u021b\u0001\u0000\u0000\u0000\u021e" - + "\u021f\u0004L\u0001\u0000\u021f\u009b\u0001\u0000\u0000\u0000\u0220\u0221" - + "\u0005t\u0000\u0000\u0221\u0222\u0005r\u0000\u0000\u0222\u0223\u0005u" - + "\u0000\u0000\u0223\u0224\u0005e\u0000\u0000\u0224\u009d\u0001\u0000\u0000" - + "\u0000\u0225\u0226\u0005f\u0000\u0000\u0226\u0227\u0005a\u0000\u0000\u0227" - + "\u0228\u0005l\u0000\u0000\u0228\u0229\u0005s\u0000\u0000\u0229\u022a\u0005" - + "e\u0000\u0000\u022a\u009f\u0001\u0000\u0000\u0000\u022b\u022c\u0005n\u0000" - + "\u0000\u022c\u022d\u0005u\u0000\u0000\u022d\u022e\u0005l\u0000\u0000\u022e" - + "\u022f\u0005l\u0000\u0000\u022f\u00a1\u0001\u0000\u0000\u0000\u0230\u0231" - + "\u0005b\u0000\u0000\u0231\u0232\u0005o\u0000\u0000\u0232\u0233\u0005o" - + "\u0000\u0000\u0233\u0234\u0005l\u0000\u0000\u0234\u0235\u0005e\u0000\u0000" - + "\u0235\u0236\u0005a\u0000\u0000\u0236\u0257\u0005n\u0000\u0000\u0237\u0238" - + "\u0005b\u0000\u0000\u0238\u0239\u0005y\u0000\u0000\u0239\u023a\u0005t" - + "\u0000\u0000\u023a\u0257\u0005e\u0000\u0000\u023b\u023c\u0005s\u0000\u0000" - + "\u023c\u023d\u0005h\u0000\u0000\u023d\u023e\u0005o\u0000\u0000\u023e\u023f" - + "\u0005r\u0000\u0000\u023f\u0257\u0005t\u0000\u0000\u0240\u0241\u0005c" - + "\u0000\u0000\u0241\u0242\u0005h\u0000\u0000\u0242\u0243\u0005a\u0000\u0000" - + "\u0243\u0257\u0005r\u0000\u0000\u0244\u0245\u0005i\u0000\u0000\u0245\u0246" - + "\u0005n\u0000\u0000\u0246\u0257\u0005t\u0000\u0000\u0247\u0248\u0005l" - + "\u0000\u0000\u0248\u0249\u0005o\u0000\u0000\u0249\u024a\u0005n\u0000\u0000" - + "\u024a\u0257\u0005g\u0000\u0000\u024b\u024c\u0005f\u0000\u0000\u024c\u024d" - + "\u0005l\u0000\u0000\u024d\u024e\u0005o\u0000\u0000\u024e\u024f\u0005a" - + "\u0000\u0000\u024f\u0257\u0005t\u0000\u0000\u0250\u0251\u0005d\u0000\u0000" - + "\u0251\u0252\u0005o\u0000\u0000\u0252\u0253\u0005u\u0000\u0000\u0253\u0254" - + "\u0005b\u0000\u0000\u0254\u0255\u0005l\u0000\u0000\u0255\u0257\u0005e" - + "\u0000\u0000\u0256\u0230\u0001\u0000\u0000\u0000\u0256\u0237\u0001\u0000" - + "\u0000\u0000\u0256\u023b\u0001\u0000\u0000\u0000\u0256\u0240\u0001\u0000" - + "\u0000\u0000\u0256\u0244\u0001\u0000\u0000\u0000\u0256\u0247\u0001\u0000" - + "\u0000\u0000\u0256\u024b\u0001\u0000\u0000\u0000\u0256\u0250\u0001\u0000" - + "\u0000\u0000\u0257\u00a3\u0001\u0000\u0000\u0000\u0258\u0259\u0005d\u0000" - + "\u0000\u0259\u025a\u0005e\u0000\u0000\u025a\u025b\u0005f\u0000\u0000\u025b" - + "\u00a5\u0001\u0000\u0000\u0000\u025c\u0260\u0007\u0011\u0000\u0000\u025d" - + "\u025f\u0007\u0012\u0000\u0000\u025e\u025d\u0001\u0000\u0000\u0000\u025f" - + "\u0262\u0001\u0000\u0000\u0000\u0260\u025e\u0001\u0000\u0000\u0000\u0260" - + "\u0261\u0001\u0000\u0000\u0000\u0261\u00a7\u0001\u0000\u0000\u0000\u0262" - + "\u0260\u0001\u0000\u0000\u0000\u0263\u026c\u00050\u0000\u0000\u0264\u0268" - + "\u0007\u0006\u0000\u0000\u0265\u0267\u0007\u0007\u0000\u0000\u0266\u0265" - + "\u0001\u0000\u0000\u0000\u0267\u026a\u0001\u0000\u0000\u0000\u0268\u0266" - + "\u0001\u0000\u0000\u0000\u0268\u0269\u0001\u0000\u0000\u0000\u0269\u026c" - + "\u0001\u0000\u0000\u0000\u026a\u0268\u0001\u0000\u0000\u0000\u026b\u0263" - + "\u0001\u0000\u0000\u0000\u026b\u0264\u0001\u0000\u0000\u0000\u026c\u026d" - + "\u0001\u0000\u0000\u0000\u026d\u026e\u0006S\u0002\u0000\u026e\u00a9\u0001" - + "\u0000\u0000\u0000\u026f\u0273\u0007\u0011\u0000\u0000\u0270\u0272\u0007" - + "\u0012\u0000\u0000\u0271\u0270\u0001\u0000\u0000\u0000\u0272\u0275\u0001" - + "\u0000\u0000\u0000\u0273\u0271\u0001\u0000\u0000\u0000\u0273\u0274\u0001" - + "\u0000\u0000\u0000\u0274\u0276\u0001\u0000\u0000\u0000\u0275\u0273\u0001" - + "\u0000\u0000\u0000\u0276\u0277\u0006T\u0002\u0000\u0277\u00ab\u0001\u0000" - + "\u0000\u0000\"\u0000\u0001\u00af\u00b9\u00c3\u00c8\u01b9\u01bc\u01c3\u01c6" - + "\u01cd\u01d0\u01d3\u01da\u01dd\u01e3\u01e5\u01e9\u01ee\u01f0\u01f3\u01fb" - + "\u01fd\u0207\u0209\u020d\u0213\u0215\u021b\u0256\u0260\u0268\u026b\u0273" - + "\u0003\u0006\u0000\u0000\u0002\u0001\u0000\u0002\u0000\u0000"; + + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0003" + + "P\u0256\bP\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001R\u0005R\u025e\bR\n" + + "R\fR\u0261\tR\u0001S\u0001S\u0001S\u0005S\u0266\bS\nS\fS\u0269\tS\u0003" + + "S\u026b\bS\u0001S\u0001S\u0001T\u0001T\u0005T\u0271\bT\nT\fT\u0274\tT" + + "\u0001T\u0001T\u0004\u00c2\u01fc\u0208\u0214\u0000U\u0002\u0001\u0004" + + "\u0002\u0006\u0003\b\u0004\n\u0005\f\u0006\u000e\u0007\u0010\b\u0012\t" + + "\u0014\n\u0016\u000b\u0018\f\u001a\r\u001c\u000e\u001e\u000f \u0010\"" + + "\u0011$\u0012&\u0013(\u0014*\u0015,\u0016.\u00170\u00182\u00194\u001a" + + "6\u001b8\u001c:\u001d<\u001e>\u001f@ B!D\"F#H$J%L&N\'P(R)T*V+X,Z-\\.^" + + "/`0b1d2f3h4j5l6n7p8r9t:v;x<z=|>~?\u0080@\u0082A\u0084B\u0086C\u0088D\u008a" + + "E\u008cF\u008eG\u0090H\u0092I\u0094J\u0096K\u0098L\u009aM\u009cN\u009e" + + "O\u00a0P\u00a2Q\u00a4R\u00a6S\u00a8T\u00aaU\u0002\u0000\u0001\u0013\u0003" + + "\u0000\t\n\r\r \u0002\u0000\n\n\r\r\u0001\u000007\u0002\u0000LLll\u0002" + + "\u0000XXxx\u0003\u000009AFaf\u0001\u000019\u0001\u000009\u0006\u0000D" + + "DFFLLddffll\u0002\u0000EEee\u0002\u0000++--\u0004\u0000DDFFddff\u0002" + + "\u0000\"\"\\\\\u0002\u0000\'\'\\\\\u0001\u0000\n\n\u0002\u0000\n\n//\u0007" + + "\u0000UUcciilmssuuxx\u0003\u0000AZ__az\u0004\u000009AZ__az\u029d\u0000" + + "\u0002\u0001\u0000\u0000\u0000\u0000\u0004\u0001\u0000\u0000\u0000\u0000" + + "\u0006\u0001\u0000\u0000\u0000\u0000\b\u0001\u0000\u0000\u0000\u0000\n" + + "\u0001\u0000\u0000\u0000\u0000\f\u0001\u0000\u0000\u0000\u0000\u000e\u0001" + + "\u0000\u0000\u0000\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001" + + "\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001" + + "\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001" + + "\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001" + + "\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000" + + "\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000" + + "\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000," + + "\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000" + + "\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000" + + "\u00006\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:" + + "\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000" + + "\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001\u0000\u0000\u0000" + + "\u0000D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000\u0000\u0000H" + + "\u0001\u0000\u0000\u0000\u0000J\u0001\u0000\u0000\u0000\u0000L\u0001\u0000" + + "\u0000\u0000\u0000N\u0001\u0000\u0000\u0000\u0000P\u0001\u0000\u0000\u0000" + + "\u0000R\u0001\u0000\u0000\u0000\u0000T\u0001\u0000\u0000\u0000\u0000V" + + "\u0001\u0000\u0000\u0000\u0000X\u0001\u0000\u0000\u0000\u0000Z\u0001\u0000" + + "\u0000\u0000\u0000\\\u0001\u0000\u0000\u0000\u0000^\u0001\u0000\u0000" + + "\u0000\u0000`\u0001\u0000\u0000\u0000\u0000b\u0001\u0000\u0000\u0000\u0000" + + "d\u0001\u0000\u0000\u0000\u0000f\u0001\u0000\u0000\u0000\u0000h\u0001" + + "\u0000\u0000\u0000\u0000j\u0001\u0000\u0000\u0000\u0000l\u0001\u0000\u0000" + + "\u0000\u0000n\u0001\u0000\u0000\u0000\u0000p\u0001\u0000\u0000\u0000\u0000" + + "r\u0001\u0000\u0000\u0000\u0000t\u0001\u0000\u0000\u0000\u0000v\u0001" + + "\u0000\u0000\u0000\u0000x\u0001\u0000\u0000\u0000\u0000z\u0001\u0000\u0000" + + "\u0000\u0000|\u0001\u0000\u0000\u0000\u0000~\u0001\u0000\u0000\u0000\u0000" + + "\u0080\u0001\u0000\u0000\u0000\u0000\u0082\u0001\u0000\u0000\u0000\u0000" + + "\u0084\u0001\u0000\u0000\u0000\u0000\u0086\u0001\u0000\u0000\u0000\u0000" + + "\u0088\u0001\u0000\u0000\u0000\u0000\u008a\u0001\u0000\u0000\u0000\u0000" + + "\u008c\u0001\u0000\u0000\u0000\u0000\u008e\u0001\u0000\u0000\u0000\u0000" + + "\u0090\u0001\u0000\u0000\u0000\u0000\u0092\u0001\u0000\u0000\u0000\u0000" + + "\u0094\u0001\u0000\u0000\u0000\u0000\u0096\u0001\u0000\u0000\u0000\u0000" + + "\u0098\u0001\u0000\u0000\u0000\u0000\u009a\u0001\u0000\u0000\u0000\u0000" + + "\u009c\u0001\u0000\u0000\u0000\u0000\u009e\u0001\u0000\u0000\u0000\u0000" + + "\u00a0\u0001\u0000\u0000\u0000\u0000\u00a2\u0001\u0000\u0000\u0000\u0000" + + "\u00a4\u0001\u0000\u0000\u0000\u0000\u00a6\u0001\u0000\u0000\u0000\u0001" + + "\u00a8\u0001\u0000\u0000\u0000\u0001\u00aa\u0001\u0000\u0000\u0000\u0002" + + "\u00ad\u0001\u0000\u0000\u0000\u0004\u00c7\u0001\u0000\u0000\u0000\u0006" + + "\u00cb\u0001\u0000\u0000\u0000\b\u00cd\u0001\u0000\u0000\u0000\n\u00cf" + + "\u0001\u0000\u0000\u0000\f\u00d1\u0001\u0000\u0000\u0000\u000e\u00d3\u0001" + + "\u0000\u0000\u0000\u0010\u00d5\u0001\u0000\u0000\u0000\u0012\u00d7\u0001" + + "\u0000\u0000\u0000\u0014\u00db\u0001\u0000\u0000\u0000\u0016\u00e0\u0001" + + "\u0000\u0000\u0000\u0018\u00e2\u0001\u0000\u0000\u0000\u001a\u00e4\u0001" + + "\u0000\u0000\u0000\u001c\u00e7\u0001\u0000\u0000\u0000\u001e\u00ea\u0001" + + "\u0000\u0000\u0000 \u00ef\u0001\u0000\u0000\u0000\"\u00f5\u0001\u0000" + + "\u0000\u0000$\u00f8\u0001\u0000\u0000\u0000&\u00fc\u0001\u0000\u0000\u0000" + + "(\u0105\u0001\u0000\u0000\u0000*\u010b\u0001\u0000\u0000\u0000,\u0112" + + "\u0001\u0000\u0000\u0000.\u0116\u0001\u0000\u0000\u00000\u011a\u0001\u0000" + + "\u0000\u00002\u0120\u0001\u0000\u0000\u00004\u0126\u0001\u0000\u0000\u0000" + + "6\u012b\u0001\u0000\u0000\u00008\u0136\u0001\u0000\u0000\u0000:\u0138" + + "\u0001\u0000\u0000\u0000<\u013a\u0001\u0000\u0000\u0000>\u013c\u0001\u0000" + + "\u0000\u0000@\u013f\u0001\u0000\u0000\u0000B\u0141\u0001\u0000\u0000\u0000" + + "D\u0143\u0001\u0000\u0000\u0000F\u0145\u0001\u0000\u0000\u0000H\u0148" + + "\u0001\u0000\u0000\u0000J\u014b\u0001\u0000\u0000\u0000L\u014f\u0001\u0000" + + "\u0000\u0000N\u0151\u0001\u0000\u0000\u0000P\u0154\u0001\u0000\u0000\u0000" + + "R\u0156\u0001\u0000\u0000\u0000T\u0159\u0001\u0000\u0000\u0000V\u015c" + + "\u0001\u0000\u0000\u0000X\u0160\u0001\u0000\u0000\u0000Z\u0163\u0001\u0000" + + "\u0000\u0000\\\u0167\u0001\u0000\u0000\u0000^\u0169\u0001\u0000\u0000" + + "\u0000`\u016b\u0001\u0000\u0000\u0000b\u016d\u0001\u0000\u0000\u0000d" + + "\u0170\u0001\u0000\u0000\u0000f\u0173\u0001\u0000\u0000\u0000h\u0175\u0001" + + "\u0000\u0000\u0000j\u0177\u0001\u0000\u0000\u0000l\u017a\u0001\u0000\u0000" + + "\u0000n\u017d\u0001\u0000\u0000\u0000p\u0180\u0001\u0000\u0000\u0000r" + + "\u0183\u0001\u0000\u0000\u0000t\u0187\u0001\u0000\u0000\u0000v\u018a\u0001" + + "\u0000\u0000\u0000x\u018d\u0001\u0000\u0000\u0000z\u018f\u0001\u0000\u0000" + + "\u0000|\u0192\u0001\u0000\u0000\u0000~\u0195\u0001\u0000\u0000\u0000\u0080" + + "\u0198\u0001\u0000\u0000\u0000\u0082\u019b\u0001\u0000\u0000\u0000\u0084" + + "\u019e\u0001\u0000\u0000\u0000\u0086\u01a1\u0001\u0000\u0000\u0000\u0088" + + "\u01a4\u0001\u0000\u0000\u0000\u008a\u01a7\u0001\u0000\u0000\u0000\u008c" + + "\u01ab\u0001\u0000\u0000\u0000\u008e\u01af\u0001\u0000\u0000\u0000\u0090" + + "\u01b4\u0001\u0000\u0000\u0000\u0092\u01bd\u0001\u0000\u0000\u0000\u0094" + + "\u01cf\u0001\u0000\u0000\u0000\u0096\u01dc\u0001\u0000\u0000\u0000\u0098" + + "\u020c\u0001\u0000\u0000\u0000\u009a\u020e\u0001\u0000\u0000\u0000\u009c" + + "\u021f\u0001\u0000\u0000\u0000\u009e\u0224\u0001\u0000\u0000\u0000\u00a0" + + "\u022a\u0001\u0000\u0000\u0000\u00a2\u0255\u0001\u0000\u0000\u0000\u00a4" + + "\u0257\u0001\u0000\u0000\u0000\u00a6\u025b\u0001\u0000\u0000\u0000\u00a8" + + "\u026a\u0001\u0000\u0000\u0000\u00aa\u026e\u0001\u0000\u0000\u0000\u00ac" + + "\u00ae\u0007\u0000\u0000\u0000\u00ad\u00ac\u0001\u0000\u0000\u0000\u00ae" + + "\u00af\u0001\u0000\u0000\u0000\u00af\u00ad\u0001\u0000\u0000\u0000\u00af" + + "\u00b0\u0001\u0000\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000\u0000\u00b1" + + "\u00b2\u0006\u0000\u0000\u0000\u00b2\u0003\u0001\u0000\u0000\u0000\u00b3" + + "\u00b4\u0005/\u0000\u0000\u00b4\u00b5\u0005/\u0000\u0000\u00b5\u00b9\u0001" + + "\u0000\u0000\u0000\u00b6\u00b8\b\u0001\u0000\u0000\u00b7\u00b6\u0001\u0000" + + "\u0000\u0000\u00b8\u00bb\u0001\u0000\u0000\u0000\u00b9\u00b7\u0001\u0000" + + "\u0000\u0000\u00b9\u00ba\u0001\u0000\u0000\u0000\u00ba\u00c8\u0001\u0000" + + "\u0000\u0000\u00bb\u00b9\u0001\u0000\u0000\u0000\u00bc\u00bd\u0005/\u0000" + + "\u0000\u00bd\u00be\u0005*\u0000\u0000\u00be\u00c2\u0001\u0000\u0000\u0000" + + "\u00bf\u00c1\t\u0000\u0000\u0000\u00c0\u00bf\u0001\u0000\u0000\u0000\u00c1" + + "\u00c4\u0001\u0000\u0000\u0000\u00c2\u00c3\u0001\u0000\u0000\u0000\u00c2" + + "\u00c0\u0001\u0000\u0000\u0000\u00c3\u00c5\u0001\u0000\u0000\u0000\u00c4" + + "\u00c2\u0001\u0000\u0000\u0000\u00c5\u00c6\u0005*\u0000\u0000\u00c6\u00c8" + + "\u0005/\u0000\u0000\u00c7\u00b3\u0001\u0000\u0000\u0000\u00c7\u00bc\u0001" + + "\u0000\u0000\u0000\u00c8\u00c9\u0001\u0000\u0000\u0000\u00c9\u00ca\u0006" + + "\u0001\u0000\u0000\u00ca\u0005\u0001\u0000\u0000\u0000\u00cb\u00cc\u0005" + + "{\u0000\u0000\u00cc\u0007\u0001\u0000\u0000\u0000\u00cd\u00ce\u0005}\u0000" + + "\u0000\u00ce\t\u0001\u0000\u0000\u0000\u00cf\u00d0\u0005[\u0000\u0000" + + "\u00d0\u000b\u0001\u0000\u0000\u0000\u00d1\u00d2\u0005]\u0000\u0000\u00d2" + + "\r\u0001\u0000\u0000\u0000\u00d3\u00d4\u0005(\u0000\u0000\u00d4\u000f" + + "\u0001\u0000\u0000\u0000\u00d5\u00d6\u0005)\u0000\u0000\u00d6\u0011\u0001" + + "\u0000\u0000\u0000\u00d7\u00d8\u0005.\u0000\u0000\u00d8\u00d9\u0001\u0000" + + "\u0000\u0000\u00d9\u00da\u0006\b\u0001\u0000\u00da\u0013\u0001\u0000\u0000" + + "\u0000\u00db\u00dc\u0005?\u0000\u0000\u00dc\u00dd\u0005.\u0000\u0000\u00dd" + + "\u00de\u0001\u0000\u0000\u0000\u00de\u00df\u0006\t\u0001\u0000\u00df\u0015" + + "\u0001\u0000\u0000\u0000\u00e0\u00e1\u0005,\u0000\u0000\u00e1\u0017\u0001" + + "\u0000\u0000\u0000\u00e2\u00e3\u0005;\u0000\u0000\u00e3\u0019\u0001\u0000" + + "\u0000\u0000\u00e4\u00e5\u0005i\u0000\u0000\u00e5\u00e6\u0005f\u0000\u0000" + + "\u00e6\u001b\u0001\u0000\u0000\u0000\u00e7\u00e8\u0005i\u0000\u0000\u00e8" + + "\u00e9\u0005n\u0000\u0000\u00e9\u001d\u0001\u0000\u0000\u0000\u00ea\u00eb" + + "\u0005e\u0000\u0000\u00eb\u00ec\u0005l\u0000\u0000\u00ec\u00ed\u0005s" + + "\u0000\u0000\u00ed\u00ee\u0005e\u0000\u0000\u00ee\u001f\u0001\u0000\u0000" + + "\u0000\u00ef\u00f0\u0005w\u0000\u0000\u00f0\u00f1\u0005h\u0000\u0000\u00f1" + + "\u00f2\u0005i\u0000\u0000\u00f2\u00f3\u0005l\u0000\u0000\u00f3\u00f4\u0005" + + "e\u0000\u0000\u00f4!\u0001\u0000\u0000\u0000\u00f5\u00f6\u0005d\u0000" + + "\u0000\u00f6\u00f7\u0005o\u0000\u0000\u00f7#\u0001\u0000\u0000\u0000\u00f8" + + "\u00f9\u0005f\u0000\u0000\u00f9\u00fa\u0005o\u0000\u0000\u00fa\u00fb\u0005" + + "r\u0000\u0000\u00fb%\u0001\u0000\u0000\u0000\u00fc\u00fd\u0005c\u0000" + + "\u0000\u00fd\u00fe\u0005o\u0000\u0000\u00fe\u00ff\u0005n\u0000\u0000\u00ff" + + "\u0100\u0005t\u0000\u0000\u0100\u0101\u0005i\u0000\u0000\u0101\u0102\u0005" + + "n\u0000\u0000\u0102\u0103\u0005u\u0000\u0000\u0103\u0104\u0005e\u0000" + + "\u0000\u0104\'\u0001\u0000\u0000\u0000\u0105\u0106\u0005b\u0000\u0000" + + "\u0106\u0107\u0005r\u0000\u0000\u0107\u0108\u0005e\u0000\u0000\u0108\u0109" + + "\u0005a\u0000\u0000\u0109\u010a\u0005k\u0000\u0000\u010a)\u0001\u0000" + + "\u0000\u0000\u010b\u010c\u0005r\u0000\u0000\u010c\u010d\u0005e\u0000\u0000" + + "\u010d\u010e\u0005t\u0000\u0000\u010e\u010f\u0005u\u0000\u0000\u010f\u0110" + + "\u0005r\u0000\u0000\u0110\u0111\u0005n\u0000\u0000\u0111+\u0001\u0000" + + "\u0000\u0000\u0112\u0113\u0005n\u0000\u0000\u0113\u0114\u0005e\u0000\u0000" + + "\u0114\u0115\u0005w\u0000\u0000\u0115-\u0001\u0000\u0000\u0000\u0116\u0117" + + "\u0005t\u0000\u0000\u0117\u0118\u0005r\u0000\u0000\u0118\u0119\u0005y" + + "\u0000\u0000\u0119/\u0001\u0000\u0000\u0000\u011a\u011b\u0005c\u0000\u0000" + + "\u011b\u011c\u0005a\u0000\u0000\u011c\u011d\u0005t\u0000\u0000\u011d\u011e" + + "\u0005c\u0000\u0000\u011e\u011f\u0005h\u0000\u0000\u011f1\u0001\u0000" + + "\u0000\u0000\u0120\u0121\u0005t\u0000\u0000\u0121\u0122\u0005h\u0000\u0000" + + "\u0122\u0123\u0005r\u0000\u0000\u0123\u0124\u0005o\u0000\u0000\u0124\u0125" + + "\u0005w\u0000\u0000\u01253\u0001\u0000\u0000\u0000\u0126\u0127\u0005t" + + "\u0000\u0000\u0127\u0128\u0005h\u0000\u0000\u0128\u0129\u0005i\u0000\u0000" + + "\u0129\u012a\u0005s\u0000\u0000\u012a5\u0001\u0000\u0000\u0000\u012b\u012c" + + "\u0005i\u0000\u0000\u012c\u012d\u0005n\u0000\u0000\u012d\u012e\u0005s" + + "\u0000\u0000\u012e\u012f\u0005t\u0000\u0000\u012f\u0130\u0005a\u0000\u0000" + + "\u0130\u0131\u0005n\u0000\u0000\u0131\u0132\u0005c\u0000\u0000\u0132\u0133" + + "\u0005e\u0000\u0000\u0133\u0134\u0005o\u0000\u0000\u0134\u0135\u0005f" + + "\u0000\u0000\u01357\u0001\u0000\u0000\u0000\u0136\u0137\u0005!\u0000\u0000" + + "\u01379\u0001\u0000\u0000\u0000\u0138\u0139\u0005~\u0000\u0000\u0139;" + + "\u0001\u0000\u0000\u0000\u013a\u013b\u0005*\u0000\u0000\u013b=\u0001\u0000" + + "\u0000\u0000\u013c\u013d\u0005/\u0000\u0000\u013d\u013e\u0004\u001e\u0000" + + "\u0000\u013e?\u0001\u0000\u0000\u0000\u013f\u0140\u0005%\u0000\u0000\u0140" + + "A\u0001\u0000\u0000\u0000\u0141\u0142\u0005+\u0000\u0000\u0142C\u0001" + + "\u0000\u0000\u0000\u0143\u0144\u0005-\u0000\u0000\u0144E\u0001\u0000\u0000" + + "\u0000\u0145\u0146\u0005<\u0000\u0000\u0146\u0147\u0005<\u0000\u0000\u0147" + + "G\u0001\u0000\u0000\u0000\u0148\u0149\u0005>\u0000\u0000\u0149\u014a\u0005" + + ">\u0000\u0000\u014aI\u0001\u0000\u0000\u0000\u014b\u014c\u0005>\u0000" + + "\u0000\u014c\u014d\u0005>\u0000\u0000\u014d\u014e\u0005>\u0000\u0000\u014e" + + "K\u0001\u0000\u0000\u0000\u014f\u0150\u0005<\u0000\u0000\u0150M\u0001" + + "\u0000\u0000\u0000\u0151\u0152\u0005<\u0000\u0000\u0152\u0153\u0005=\u0000" + + "\u0000\u0153O\u0001\u0000\u0000\u0000\u0154\u0155\u0005>\u0000\u0000\u0155" + + "Q\u0001\u0000\u0000\u0000\u0156\u0157\u0005>\u0000\u0000\u0157\u0158\u0005" + + "=\u0000\u0000\u0158S\u0001\u0000\u0000\u0000\u0159\u015a\u0005=\u0000" + + "\u0000\u015a\u015b\u0005=\u0000\u0000\u015bU\u0001\u0000\u0000\u0000\u015c" + + "\u015d\u0005=\u0000\u0000\u015d\u015e\u0005=\u0000\u0000\u015e\u015f\u0005" + + "=\u0000\u0000\u015fW\u0001\u0000\u0000\u0000\u0160\u0161\u0005!\u0000" + + "\u0000\u0161\u0162\u0005=\u0000\u0000\u0162Y\u0001\u0000\u0000\u0000\u0163" + + "\u0164\u0005!\u0000\u0000\u0164\u0165\u0005=\u0000\u0000\u0165\u0166\u0005" + + "=\u0000\u0000\u0166[\u0001\u0000\u0000\u0000\u0167\u0168\u0005&\u0000" + + "\u0000\u0168]\u0001\u0000\u0000\u0000\u0169\u016a\u0005^\u0000\u0000\u016a" + + "_\u0001\u0000\u0000\u0000\u016b\u016c\u0005|\u0000\u0000\u016ca\u0001" + + "\u0000\u0000\u0000\u016d\u016e\u0005&\u0000\u0000\u016e\u016f\u0005&\u0000" + + "\u0000\u016fc\u0001\u0000\u0000\u0000\u0170\u0171\u0005|\u0000\u0000\u0171" + + "\u0172\u0005|\u0000\u0000\u0172e\u0001\u0000\u0000\u0000\u0173\u0174\u0005" + + "?\u0000\u0000\u0174g\u0001\u0000\u0000\u0000\u0175\u0176\u0005:\u0000" + + "\u0000\u0176i\u0001\u0000\u0000\u0000\u0177\u0178\u0005?\u0000\u0000\u0178" + + "\u0179\u0005:\u0000\u0000\u0179k\u0001\u0000\u0000\u0000\u017a\u017b\u0005" + + ":\u0000\u0000\u017b\u017c\u0005:\u0000\u0000\u017cm\u0001\u0000\u0000" + + "\u0000\u017d\u017e\u0005-\u0000\u0000\u017e\u017f\u0005>\u0000\u0000\u017f" + + "o\u0001\u0000\u0000\u0000\u0180\u0181\u0005=\u0000\u0000\u0181\u0182\u0005" + + "~\u0000\u0000\u0182q\u0001\u0000\u0000\u0000\u0183\u0184\u0005=\u0000" + + "\u0000\u0184\u0185\u0005=\u0000\u0000\u0185\u0186\u0005~\u0000\u0000\u0186" + + "s\u0001\u0000\u0000\u0000\u0187\u0188\u0005+\u0000\u0000\u0188\u0189\u0005" + + "+\u0000\u0000\u0189u\u0001\u0000\u0000\u0000\u018a\u018b\u0005-\u0000" + + "\u0000\u018b\u018c\u0005-\u0000\u0000\u018cw\u0001\u0000\u0000\u0000\u018d" + + "\u018e\u0005=\u0000\u0000\u018ey\u0001\u0000\u0000\u0000\u018f\u0190\u0005" + + "+\u0000\u0000\u0190\u0191\u0005=\u0000\u0000\u0191{\u0001\u0000\u0000" + + "\u0000\u0192\u0193\u0005-\u0000\u0000\u0193\u0194\u0005=\u0000\u0000\u0194" + + "}\u0001\u0000\u0000\u0000\u0195\u0196\u0005*\u0000\u0000\u0196\u0197\u0005" + + "=\u0000\u0000\u0197\u007f\u0001\u0000\u0000\u0000\u0198\u0199\u0005/\u0000" + + "\u0000\u0199\u019a\u0005=\u0000\u0000\u019a\u0081\u0001\u0000\u0000\u0000" + + "\u019b\u019c\u0005%\u0000\u0000\u019c\u019d\u0005=\u0000\u0000\u019d\u0083" + + "\u0001\u0000\u0000\u0000\u019e\u019f\u0005&\u0000\u0000\u019f\u01a0\u0005" + + "=\u0000\u0000\u01a0\u0085\u0001\u0000\u0000\u0000\u01a1\u01a2\u0005^\u0000" + + "\u0000\u01a2\u01a3\u0005=\u0000\u0000\u01a3\u0087\u0001\u0000\u0000\u0000" + + "\u01a4\u01a5\u0005|\u0000\u0000\u01a5\u01a6\u0005=\u0000\u0000\u01a6\u0089" + + "\u0001\u0000\u0000\u0000\u01a7\u01a8\u0005<\u0000\u0000\u01a8\u01a9\u0005" + + "<\u0000\u0000\u01a9\u01aa\u0005=\u0000\u0000\u01aa\u008b\u0001\u0000\u0000" + + "\u0000\u01ab\u01ac\u0005>\u0000\u0000\u01ac\u01ad\u0005>\u0000\u0000\u01ad" + + "\u01ae\u0005=\u0000\u0000\u01ae\u008d\u0001\u0000\u0000\u0000\u01af\u01b0" + + "\u0005>\u0000\u0000\u01b0\u01b1\u0005>\u0000\u0000\u01b1\u01b2\u0005>" + + "\u0000\u0000\u01b2\u01b3\u0005=\u0000\u0000\u01b3\u008f\u0001\u0000\u0000" + + "\u0000\u01b4\u01b6\u00050\u0000\u0000\u01b5\u01b7\u0007\u0002\u0000\u0000" + + "\u01b6\u01b5\u0001\u0000\u0000\u0000\u01b7\u01b8\u0001\u0000\u0000\u0000" + + "\u01b8\u01b6\u0001\u0000\u0000\u0000\u01b8\u01b9\u0001\u0000\u0000\u0000" + + "\u01b9\u01bb\u0001\u0000\u0000\u0000\u01ba\u01bc\u0007\u0003\u0000\u0000" + + "\u01bb\u01ba\u0001\u0000\u0000\u0000\u01bb\u01bc\u0001\u0000\u0000\u0000" + + "\u01bc\u0091\u0001\u0000\u0000\u0000\u01bd\u01be\u00050\u0000\u0000\u01be" + + "\u01c0\u0007\u0004\u0000\u0000\u01bf\u01c1\u0007\u0005\u0000\u0000\u01c0" + + "\u01bf\u0001\u0000\u0000\u0000\u01c1\u01c2\u0001\u0000\u0000\u0000\u01c2" + + "\u01c0\u0001\u0000\u0000\u0000\u01c2\u01c3\u0001\u0000\u0000\u0000\u01c3" + + "\u01c5\u0001\u0000\u0000\u0000\u01c4\u01c6\u0007\u0003\u0000\u0000\u01c5" + + "\u01c4\u0001\u0000\u0000\u0000\u01c5\u01c6\u0001\u0000\u0000\u0000\u01c6" + + "\u0093\u0001\u0000\u0000\u0000\u01c7\u01d0\u00050\u0000\u0000\u01c8\u01cc" + + "\u0007\u0006\u0000\u0000\u01c9\u01cb\u0007\u0007\u0000\u0000\u01ca\u01c9" + + "\u0001\u0000\u0000\u0000\u01cb\u01ce\u0001\u0000\u0000\u0000\u01cc\u01ca" + + "\u0001\u0000\u0000\u0000\u01cc\u01cd\u0001\u0000\u0000\u0000\u01cd\u01d0" + + "\u0001\u0000\u0000\u0000\u01ce\u01cc\u0001\u0000\u0000\u0000\u01cf\u01c7" + + "\u0001\u0000\u0000\u0000\u01cf\u01c8\u0001\u0000\u0000\u0000\u01d0\u01d2" + + "\u0001\u0000\u0000\u0000\u01d1\u01d3\u0007\b\u0000\u0000\u01d2\u01d1\u0001" + + "\u0000\u0000\u0000\u01d2\u01d3\u0001\u0000\u0000\u0000\u01d3\u0095\u0001" + + "\u0000\u0000\u0000\u01d4\u01dd\u00050\u0000\u0000\u01d5\u01d9\u0007\u0006" + + "\u0000\u0000\u01d6\u01d8\u0007\u0007\u0000\u0000\u01d7\u01d6\u0001\u0000" + + "\u0000\u0000\u01d8\u01db\u0001\u0000\u0000\u0000\u01d9\u01d7\u0001\u0000" + + "\u0000\u0000\u01d9\u01da\u0001\u0000\u0000\u0000\u01da\u01dd\u0001\u0000" + + "\u0000\u0000\u01db\u01d9\u0001\u0000\u0000\u0000\u01dc\u01d4\u0001\u0000" + + "\u0000\u0000\u01dc\u01d5\u0001\u0000\u0000\u0000\u01dd\u01e4\u0001\u0000" + + "\u0000\u0000\u01de\u01e0\u0003\u0012\b\u0000\u01df\u01e1\u0007\u0007\u0000" + + "\u0000\u01e0\u01df\u0001\u0000\u0000\u0000\u01e1\u01e2\u0001\u0000\u0000" + + "\u0000\u01e2\u01e0\u0001\u0000\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000" + + "\u0000\u01e3\u01e5\u0001\u0000\u0000\u0000\u01e4\u01de\u0001\u0000\u0000" + + "\u0000\u01e4\u01e5\u0001\u0000\u0000\u0000\u01e5\u01ef\u0001\u0000\u0000" + + "\u0000\u01e6\u01e8\u0007\t\u0000\u0000\u01e7\u01e9\u0007\n\u0000\u0000" + + "\u01e8\u01e7\u0001\u0000\u0000\u0000\u01e8\u01e9\u0001\u0000\u0000\u0000" + + "\u01e9\u01eb\u0001\u0000\u0000\u0000\u01ea\u01ec\u0007\u0007\u0000\u0000" + + "\u01eb\u01ea\u0001\u0000\u0000\u0000\u01ec\u01ed\u0001\u0000\u0000\u0000" + + "\u01ed\u01eb\u0001\u0000\u0000\u0000\u01ed\u01ee\u0001\u0000\u0000\u0000" + + "\u01ee\u01f0\u0001\u0000\u0000\u0000\u01ef\u01e6\u0001\u0000\u0000\u0000" + + "\u01ef\u01f0\u0001\u0000\u0000\u0000\u01f0\u01f2\u0001\u0000\u0000\u0000" + + "\u01f1\u01f3\u0007\u000b\u0000\u0000\u01f2\u01f1\u0001\u0000\u0000\u0000" + + "\u01f2\u01f3\u0001\u0000\u0000\u0000\u01f3\u0097\u0001\u0000\u0000\u0000" + + "\u01f4\u01fc\u0005\"\u0000\u0000\u01f5\u01f6\u0005\\\u0000\u0000\u01f6" + + "\u01fb\u0005\"\u0000\u0000\u01f7\u01f8\u0005\\\u0000\u0000\u01f8\u01fb" + + "\u0005\\\u0000\u0000\u01f9\u01fb\b\f\u0000\u0000\u01fa\u01f5\u0001\u0000" + + "\u0000\u0000\u01fa\u01f7\u0001\u0000\u0000\u0000\u01fa\u01f9\u0001\u0000" + + "\u0000\u0000\u01fb\u01fe\u0001\u0000\u0000\u0000\u01fc\u01fd\u0001\u0000" + + "\u0000\u0000\u01fc\u01fa\u0001\u0000\u0000\u0000\u01fd\u01ff\u0001\u0000" + + "\u0000\u0000\u01fe\u01fc\u0001\u0000\u0000\u0000\u01ff\u020d\u0005\"\u0000" + + "\u0000\u0200\u0208\u0005\'\u0000\u0000\u0201\u0202\u0005\\\u0000\u0000" + + "\u0202\u0207\u0005\'\u0000\u0000\u0203\u0204\u0005\\\u0000\u0000\u0204" + + "\u0207\u0005\\\u0000\u0000\u0205\u0207\b\r\u0000\u0000\u0206\u0201\u0001" + + "\u0000\u0000\u0000\u0206\u0203\u0001\u0000\u0000\u0000\u0206\u0205\u0001" + + "\u0000\u0000\u0000\u0207\u020a\u0001\u0000\u0000\u0000\u0208\u0209\u0001" + + "\u0000\u0000\u0000\u0208\u0206\u0001\u0000\u0000\u0000\u0209\u020b\u0001" + + "\u0000\u0000\u0000\u020a\u0208\u0001\u0000\u0000\u0000\u020b\u020d\u0005" + + "\'\u0000\u0000\u020c\u01f4\u0001\u0000\u0000\u0000\u020c\u0200\u0001\u0000" + + "\u0000\u0000\u020d\u0099\u0001\u0000\u0000\u0000\u020e\u0212\u0005/\u0000" + + "\u0000\u020f\u0210\u0005\\\u0000\u0000\u0210\u0213\b\u000e\u0000\u0000" + + "\u0211\u0213\b\u000f\u0000\u0000\u0212\u020f\u0001\u0000\u0000\u0000\u0212" + + "\u0211\u0001\u0000\u0000\u0000\u0213\u0214\u0001\u0000\u0000\u0000\u0214" + + "\u0215\u0001\u0000\u0000\u0000\u0214\u0212\u0001\u0000\u0000\u0000\u0215" + + "\u0216\u0001\u0000\u0000\u0000\u0216\u021a\u0005/\u0000\u0000\u0217\u0219" + + "\u0007\u0010\u0000\u0000\u0218\u0217\u0001\u0000\u0000\u0000\u0219\u021c" + + "\u0001\u0000\u0000\u0000\u021a\u0218\u0001\u0000\u0000\u0000\u021a\u021b" + + "\u0001\u0000\u0000\u0000\u021b\u021d\u0001\u0000\u0000\u0000\u021c\u021a" + + "\u0001\u0000\u0000\u0000\u021d\u021e\u0004L\u0001\u0000\u021e\u009b\u0001" + + "\u0000\u0000\u0000\u021f\u0220\u0005t\u0000\u0000\u0220\u0221\u0005r\u0000" + + "\u0000\u0221\u0222\u0005u\u0000\u0000\u0222\u0223\u0005e\u0000\u0000\u0223" + + "\u009d\u0001\u0000\u0000\u0000\u0224\u0225\u0005f\u0000\u0000\u0225\u0226" + + "\u0005a\u0000\u0000\u0226\u0227\u0005l\u0000\u0000\u0227\u0228\u0005s" + + "\u0000\u0000\u0228\u0229\u0005e\u0000\u0000\u0229\u009f\u0001\u0000\u0000" + + "\u0000\u022a\u022b\u0005n\u0000\u0000\u022b\u022c\u0005u\u0000\u0000\u022c" + + "\u022d\u0005l\u0000\u0000\u022d\u022e\u0005l\u0000\u0000\u022e\u00a1\u0001" + + "\u0000\u0000\u0000\u022f\u0230\u0005b\u0000\u0000\u0230\u0231\u0005o\u0000" + + "\u0000\u0231\u0232\u0005o\u0000\u0000\u0232\u0233\u0005l\u0000\u0000\u0233" + + "\u0234\u0005e\u0000\u0000\u0234\u0235\u0005a\u0000\u0000\u0235\u0256\u0005" + + "n\u0000\u0000\u0236\u0237\u0005b\u0000\u0000\u0237\u0238\u0005y\u0000" + + "\u0000\u0238\u0239\u0005t\u0000\u0000\u0239\u0256\u0005e\u0000\u0000\u023a" + + "\u023b\u0005s\u0000\u0000\u023b\u023c\u0005h\u0000\u0000\u023c\u023d\u0005" + + "o\u0000\u0000\u023d\u023e\u0005r\u0000\u0000\u023e\u0256\u0005t\u0000" + + "\u0000\u023f\u0240\u0005c\u0000\u0000\u0240\u0241\u0005h\u0000\u0000\u0241" + + "\u0242\u0005a\u0000\u0000\u0242\u0256\u0005r\u0000\u0000\u0243\u0244\u0005" + + "i\u0000\u0000\u0244\u0245\u0005n\u0000\u0000\u0245\u0256\u0005t\u0000" + + "\u0000\u0246\u0247\u0005l\u0000\u0000\u0247\u0248\u0005o\u0000\u0000\u0248" + + "\u0249\u0005n\u0000\u0000\u0249\u0256\u0005g\u0000\u0000\u024a\u024b\u0005" + + "f\u0000\u0000\u024b\u024c\u0005l\u0000\u0000\u024c\u024d\u0005o\u0000" + + "\u0000\u024d\u024e\u0005a\u0000\u0000\u024e\u0256\u0005t\u0000\u0000\u024f" + + "\u0250\u0005d\u0000\u0000\u0250\u0251\u0005o\u0000\u0000\u0251\u0252\u0005" + + "u\u0000\u0000\u0252\u0253\u0005b\u0000\u0000\u0253\u0254\u0005l\u0000" + + "\u0000\u0254\u0256\u0005e\u0000\u0000\u0255\u022f\u0001\u0000\u0000\u0000" + + "\u0255\u0236\u0001\u0000\u0000\u0000\u0255\u023a\u0001\u0000\u0000\u0000" + + "\u0255\u023f\u0001\u0000\u0000\u0000\u0255\u0243\u0001\u0000\u0000\u0000" + + "\u0255\u0246\u0001\u0000\u0000\u0000\u0255\u024a\u0001\u0000\u0000\u0000" + + "\u0255\u024f\u0001\u0000\u0000\u0000\u0256\u00a3\u0001\u0000\u0000\u0000" + + "\u0257\u0258\u0005d\u0000\u0000\u0258\u0259\u0005e\u0000\u0000\u0259\u025a" + + "\u0005f\u0000\u0000\u025a\u00a5\u0001\u0000\u0000\u0000\u025b\u025f\u0007" + + "\u0011\u0000\u0000\u025c\u025e\u0007\u0012\u0000\u0000\u025d\u025c\u0001" + + "\u0000\u0000\u0000\u025e\u0261\u0001\u0000\u0000\u0000\u025f\u025d\u0001" + + "\u0000\u0000\u0000\u025f\u0260\u0001\u0000\u0000\u0000\u0260\u00a7\u0001" + + "\u0000\u0000\u0000\u0261\u025f\u0001\u0000\u0000\u0000\u0262\u026b\u0005" + + "0\u0000\u0000\u0263\u0267\u0007\u0006\u0000\u0000\u0264\u0266\u0007\u0007" + + "\u0000\u0000\u0265\u0264\u0001\u0000\u0000\u0000\u0266\u0269\u0001\u0000" + + "\u0000\u0000\u0267\u0265\u0001\u0000\u0000\u0000\u0267\u0268\u0001\u0000" + + "\u0000\u0000\u0268\u026b\u0001\u0000\u0000\u0000\u0269\u0267\u0001\u0000" + + "\u0000\u0000\u026a\u0262\u0001\u0000\u0000\u0000\u026a\u0263\u0001\u0000" + + "\u0000\u0000\u026b\u026c\u0001\u0000\u0000\u0000\u026c\u026d\u0006S\u0002" + + "\u0000\u026d\u00a9\u0001\u0000\u0000\u0000\u026e\u0272\u0007\u0011\u0000" + + "\u0000\u026f\u0271\u0007\u0012\u0000\u0000\u0270\u026f\u0001\u0000\u0000" + + "\u0000\u0271\u0274\u0001\u0000\u0000\u0000\u0272\u0270\u0001\u0000\u0000" + + "\u0000\u0272\u0273\u0001\u0000\u0000\u0000\u0273\u0275\u0001\u0000\u0000" + + "\u0000\u0274\u0272\u0001\u0000\u0000\u0000\u0275\u0276\u0006T\u0002\u0000" + + "\u0276\u00ab\u0001\u0000\u0000\u0000\"\u0000\u0001\u00af\u00b9\u00c2\u00c7" + + "\u01b8\u01bb\u01c2\u01c5\u01cc\u01cf\u01d2\u01d9\u01dc\u01e2\u01e4\u01e8" + + "\u01ed\u01ef\u01f2\u01fa\u01fc\u0206\u0208\u020c\u0212\u0214\u021a\u0255" + + "\u025f\u0267\u026a\u0272\u0003\u0006\u0000\u0000\u0002\u0001\u0000\u0002" + + "\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java index 52928eb6c7623..7ad5d113637c8 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessParser.java @@ -32,31 +32,31 @@ */ package org.opensearch.painless.antlr; -import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.atn.PredictionContextCache; -import org.antlr.v4.runtime.atn.ParserATNSimulator; -import org.antlr.v4.runtime.atn.ATN; -import org.antlr.v4.runtime.atn.ATNDeserializer; -import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; import org.antlr.v4.runtime.Vocabulary; import org.antlr.v4.runtime.VocabularyImpl; -import org.antlr.v4.runtime.RecognitionException; -import org.antlr.v4.runtime.TokenStream; -import org.antlr.v4.runtime.ParserRuleContext; -import org.antlr.v4.runtime.Token; -import org.antlr.v4.runtime.NoViableAltException; -import org.antlr.v4.runtime.FailedPredicateException; -import org.antlr.v4.runtime.RuleContext; -import org.antlr.v4.runtime.tree.TerminalNode; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.dfa.DFA; import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; + import java.util.List; @SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) class PainlessParser extends Parser { static { - RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.13.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -337,7 +337,7 @@ public Vocabulary getVocabulary() { @Override public String getGrammarFileName() { - return "java-escape"; + return "PainlessParser.g4"; } @Override @@ -425,8 +425,8 @@ public final SourceContext source() throws RecognitionException { setState(87); _errHandler.sync(this); _la = _input.LA(1); - while (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155080519840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { { setState(84); @@ -571,7 +571,7 @@ public final ParametersContext parameters() throws RecognitionException { setState(109); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(98); decltype(); @@ -1088,8 +1088,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(140); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(139); initializer(); @@ -1101,8 +1101,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(144); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(143); expression(); @@ -1114,8 +1114,8 @@ public final RstatementContext rstatement() throws RecognitionException { setState(148); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(147); afterthought(); @@ -1470,8 +1470,8 @@ public final DstatementContext dstatement() throws RecognitionException { setState(193); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(192); expression(); @@ -1661,8 +1661,8 @@ public final BlockContext block() throws RecognitionException { setState(212); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155071795360L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(211); dstatement(); @@ -2491,7 +2491,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); setState(269); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 7516192768L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2545,7 +2545,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); setState(278); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 240518168576L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2563,7 +2563,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); setState(281); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 4123168604160L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2581,7 +2581,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); setState(284); _la = _input.LA(1); - if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0)) { + if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & 65970697666560L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -2861,7 +2861,7 @@ public final ExpressionContext expression() throws RecognitionException { noncondexpression(0); setState(320); _la = _input.LA(1); - if (!((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0)) { + if (!(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & 4095L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -3938,7 +3938,7 @@ public final PrimaryContext primary() throws RecognitionException { enterOuterAlt(_localctx, 2); { setState(400); _la = _input.LA(1); - if (!((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0)) { + if (!(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 15L) != 0))) { _errHandler.recoverInline(this); } else { if (_input.LA(1) == Token.EOF) matchedEOF = true; @@ -4564,8 +4564,8 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(469); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155034439840L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 2559L) != 0)) { { setState(461); expression(); @@ -4923,8 +4923,8 @@ public final ArgumentsContext arguments() throws RecognitionException { setState(524); _errHandler.sync(this); _la = _input.LA(1); - if (((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0 - || (((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 864691155101548704L) != 0) + || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & 4095L) != 0)) { { setState(516); argument(); @@ -5104,7 +5104,7 @@ public final LambdaContext lambda() throws RecognitionException { setState(543); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0) { + if (((((_la - 81)) & ~0x3f) == 0 && ((1L << (_la - 81)) & 7L) != 0)) { { setState(535); lamtype(); diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/ParserErrorStrategy.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/ParserErrorStrategy.java index 7ddf9cd54fe83..6928b28a746f6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/ParserErrorStrategy.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/ParserErrorStrategy.java @@ -38,6 +38,7 @@ import org.antlr.v4.runtime.Parser; import org.antlr.v4.runtime.RecognitionException; import org.antlr.v4.runtime.Token; + import org.opensearch.painless.Location; /** diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java index c03b4199ce8d9..eeb1101c321f9 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/Walker.java @@ -41,6 +41,7 @@ import org.antlr.v4.runtime.Recognizer; import org.antlr.v4.runtime.atn.PredictionMode; import org.antlr.v4.runtime.tree.TerminalNode; + import org.opensearch.painless.CompilerSettings; import org.opensearch.painless.Location; import org.opensearch.painless.Operation; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/api/Augmentation.java b/modules/lang-painless/src/main/java/org/opensearch/painless/api/Augmentation.java index 821fbc45c42e3..5a105e8c03e02 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/api/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/api/Augmentation.java @@ -35,6 +35,8 @@ import org.opensearch.common.hash.MessageDigests; import java.nio.charset.StandardCharsets; +import java.time.DayOfWeek; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; @@ -721,4 +723,8 @@ public static Matcher matcher(Pattern receiver, int limitFactor, CharSequence in } return receiver.matcher(new LimitedCharSequence(input, receiver, limitFactor)); } + + public static DayOfWeek getDayOfWeekEnum(ZonedDateTime receiver) { + return receiver.getDayOfWeek(); + } } diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/api/Json.java b/modules/lang-painless/src/main/java/org/opensearch/painless/api/Json.java index a862cd3bf5516..e3dd1b50736fa 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/api/Json.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/api/Json.java @@ -32,11 +32,11 @@ package org.opensearch.painless.api; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.io.OutputStream; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/api/LimitedCharSequence.java b/modules/lang-painless/src/main/java/org/opensearch/painless/api/LimitedCharSequence.java index 8084420295280..c8a28158ad8db 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/api/LimitedCharSequence.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/api/LimitedCharSequence.java @@ -32,8 +32,8 @@ package org.opensearch.painless.api; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.painless.CompilerSettings; import java.util.regex.Pattern; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessClass.java index efa2d51524557..fdf7df94252b6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessClass.java @@ -32,7 +32,7 @@ package org.opensearch.painless.lookup; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.lang.invoke.MethodHandle; import java.util.Map; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookup.java index 1249a9cffecb2..9a3b8bf9e2eee 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookup.java @@ -32,7 +32,7 @@ package org.opensearch.painless.lookup; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.lang.invoke.MethodHandle; import java.util.Map; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java index e79eda975f417..e155a890c03d1 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupBuilder.java @@ -2189,6 +2189,7 @@ private void generateBridgeMethod(PainlessClassBuilder painlessClassBuilder, Pai bridgeClassWriter.visitEnd(); try { + @SuppressWarnings("removal") BridgeLoader bridgeLoader = AccessController.doPrivileged(new PrivilegedAction<BridgeLoader>() { @Override public BridgeLoader run() { diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java index cae425ad1fe3b..3164f5e6388c7 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java @@ -42,13 +42,13 @@ /** * PainlessLookupUtility contains methods shared by {@link PainlessLookupBuilder}, {@link PainlessLookup}, and other classes within * Painless for conversion between type names and types along with some other various utility methods. - * + * <p> * The following terminology is used for variable names throughout the lookup package: - * + * <p> * A class is a set of methods and fields under a specific class name. A type is either a class or an array under a specific type name. * Note the distinction between class versus type is class means that no array classes will be be represented whereas type allows array * classes to be represented. The set of available classes will always be a subset of the available types. - * + * <p> * Under ambiguous circumstances most variable names are prefixed with asm, java, or painless. If the variable value is the same for asm, * java, and painless, no prefix is used. Target is used as a prefix to represent if a constructor, method, or field is being * called/accessed on that specific class. Parameter is often a postfix used to represent if a type is used as a parameter to a diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java index 8b55e70e8ea7c..06607b639e07c 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java @@ -34,8 +34,8 @@ import org.opensearch.painless.AnalyzerCaster; import org.opensearch.painless.Operation; -import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BinaryImplNode; +import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BooleanNode; import org.opensearch.painless.ir.CastNode; import org.opensearch.painless.ir.ComparisonNode; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeBaseVisitor.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeBaseVisitor.java index 0b947decd7dad..deff3201a2fd5 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeBaseVisitor.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeBaseVisitor.java @@ -32,8 +32,8 @@ package org.opensearch.painless.phase; -import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BinaryImplNode; +import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BlockNode; import org.opensearch.painless.ir.BooleanNode; import org.opensearch.painless.ir.BreakNode; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeVisitor.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeVisitor.java index babb9b14bcfb1..17bd1889a11bc 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeVisitor.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/IRTreeVisitor.java @@ -32,8 +32,8 @@ package org.opensearch.painless.phase; -import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BinaryImplNode; +import org.opensearch.painless.ir.BinaryMathNode; import org.opensearch.painless.ir.BlockNode; import org.opensearch.painless.ir.BooleanNode; import org.opensearch.painless.ir.BreakNode; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java index 04165f44ba212..8a05d6742af97 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -126,9 +126,9 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { /** * Visits an expression that is also considered a statement. - * + * <p> * If the statement is a return from the execute method, performs return value conversion. - * + * <p> * Checks: control flow, type validation */ @Override @@ -168,9 +168,9 @@ public void visitExpression(SExpression userExpressionNode, SemanticScope semant /** * Visits a return statement and casts the value to the return type if possible. - * + * <p> * If the statement is a return from the execute method, performs return value conversion. - * + * <p> * Checks: type validation */ @Override diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java index e27530d745e8f..5ac802038afa6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java @@ -49,7 +49,7 @@ * Tracks information within a scope required for compilation during the * semantic phase in the user tree. There are three types of scopes - * {@link FunctionScope}, {@link LambdaScope}, and {@link BlockScope}. - * + * <p> * Scopes are stacked as they are created during the user tree's semantic * phase with each scope beyond the top-level containing a reference to * its parent. As a scope is no longer necessary, it's dropped automatically diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/java.time.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/java.time.txt index 38c6e8a4f575e..6a97fd4038e94 100644 --- a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/java.time.txt +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/java.time.txt @@ -485,6 +485,7 @@ class java.time.YearMonth { class java.time.ZonedDateTime { int getDayOfMonth() DayOfWeek getDayOfWeek() + DayOfWeek org.opensearch.painless.api.Augmentation getDayOfWeekEnum() int getDayOfYear() int getHour() LocalDate toLocalDate() diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt index cca7e07a95388..9bce617099c6f 100644 --- a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt @@ -23,6 +23,9 @@ class org.opensearch.script.ScoreScript @no_import { } static_import { + int termFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TermFreq + long totalTermFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TotalTermFreq + long sumTotalTermFreq(org.opensearch.script.ScoreScript, String) bound_to org.opensearch.script.ScoreScriptUtils$SumTotalTermFreq double saturation(double, double) from_class org.opensearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.opensearch.script.ScoreScriptUtils double randomScore(org.opensearch.script.ScoreScript, int, String) bound_to org.opensearch.script.ScoreScriptUtils$RandomScoreField @@ -33,8 +36,8 @@ static_import { double decayNumericLinear(double, double, double, double, double)bound_to org.opensearch.script.ScoreScriptUtils$DecayNumericLinear double decayNumericExp(double, double, double, double, double) bound_to org.opensearch.script.ScoreScriptUtils$DecayNumericExp double decayNumericGauss(double, double, double, double, double) bound_to org.opensearch.script.ScoreScriptUtils$DecayNumericGauss - double decayDateLinear(String, String, String, double, JodaCompatibleZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateLinear - double decayDateExp(String, String, String, double, JodaCompatibleZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateExp - double decayDateGauss(String, String, String, double, JodaCompatibleZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateGauss + double decayDateLinear(String, String, String, double, ZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateLinear + double decayDateExp(String, String, String, double, ZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateExp + double decayDateGauss(String, String, String, double, ZonedDateTime) bound_to org.opensearch.script.ScoreScriptUtils$DecayDateGauss } diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.txt index dccf29b629ce8..b91d9bb6115d4 100644 --- a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.txt +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.txt @@ -79,62 +79,9 @@ class org.opensearch.index.fielddata.ScriptDocValues$UnsignedLongs { BigInteger getValue() } -class org.opensearch.script.JodaCompatibleZonedDateTime { - ##### ZonedDateTime methods - int getDayOfMonth() - int getDayOfYear() - int getHour() - LocalDate toLocalDate() - LocalDateTime toLocalDateTime() - int getMinute() - Month getMonth() - int getMonthValue() - int getNano() - int getSecond() - int getYear() - ZonedDateTime minus(TemporalAmount) - ZonedDateTime minus(long,TemporalUnit) - ZonedDateTime minusYears(long) - ZonedDateTime minusMonths(long) - ZonedDateTime minusWeeks(long) - ZonedDateTime minusDays(long) - ZonedDateTime minusHours(long) - ZonedDateTime minusMinutes(long) - ZonedDateTime minusSeconds(long) - ZonedDateTime minusNanos(long) - ZonedDateTime plus(TemporalAmount) - ZonedDateTime plus(long,TemporalUnit) - ZonedDateTime plusDays(long) - ZonedDateTime plusHours(long) - ZonedDateTime plusMinutes(long) - ZonedDateTime plusMonths(long) - ZonedDateTime plusNanos(long) - ZonedDateTime plusSeconds(long) - ZonedDateTime plusWeeks(long) - ZonedDateTime plusYears(long) - OffsetDateTime toOffsetDateTime() - ZonedDateTime truncatedTo(TemporalUnit) - ZonedDateTime with(TemporalAdjuster) - ZonedDateTime with(TemporalField,long) - ZonedDateTime withDayOfMonth(int) - ZonedDateTime withDayOfYear(int) - ZonedDateTime withEarlierOffsetAtOverlap() - ZonedDateTime withFixedOffsetZone() - ZonedDateTime withHour(int) - ZonedDateTime withLaterOffsetAtOverlap() - ZonedDateTime withMinute(int) - ZonedDateTime withMonth(int) - ZonedDateTime withNano(int) - ZonedDateTime withSecond(int) - ZonedDateTime withYear(int) - ZonedDateTime withZoneSameLocal(ZoneId) - ZonedDateTime withZoneSameInstant(ZoneId) - DayOfWeek getDayOfWeekEnum() -} - class org.opensearch.index.fielddata.ScriptDocValues$Dates { - JodaCompatibleZonedDateTime get(int) - JodaCompatibleZonedDateTime getValue() + ZonedDateTime get(int) + ZonedDateTime getValue() } class org.opensearch.index.fielddata.ScriptDocValues$Doubles { diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/AllowlistLoaderTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/AllowlistLoaderTests.java index f378eff383a80..58268ae61cfb1 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/AllowlistLoaderTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/AllowlistLoaderTests.java @@ -36,9 +36,9 @@ import org.opensearch.painless.spi.AllowlistClass; import org.opensearch.painless.spi.AllowlistLoader; import org.opensearch.painless.spi.AllowlistMethod; +import org.opensearch.painless.spi.annotation.AllowlistAnnotationParser; import org.opensearch.painless.spi.annotation.DeprecatedAnnotation; import org.opensearch.painless.spi.annotation.NoImportAnnotation; -import org.opensearch.painless.spi.annotation.AllowlistAnnotationParser; import java.util.HashMap; import java.util.Map; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/AugmentationTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/AugmentationTests.java index 8740e1fc8f954..249ffe07e3985 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/AugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/AugmentationTests.java @@ -32,12 +32,12 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistLoader; import org.opensearch.script.ScriptContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/BaseClassTests.java index 29bf5df28db82..6166fea23c268 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/BaseClassTests.java @@ -32,11 +32,11 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.script.ScriptContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.Collections; import java.util.HashMap; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/BasicAPITests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/BasicAPITests.java index c4cd7503bbb2e..31c2525029d06 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/BasicAPITests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/BasicAPITests.java @@ -199,19 +199,6 @@ public void testStatic() { assertEquals(15.5f, exec("staticAddFloatsTest(6.5f, 9.0f)")); } - // TODO: remove this when the transition from Joda to Java datetimes is completed - public void testJCZDTToZonedDateTime() { - assertEquals( - 0L, - exec( - "Instant instant = Instant.ofEpochMilli(434931330000L);" - + "JodaCompatibleZonedDateTime d = new JodaCompatibleZonedDateTime(instant, ZoneId.of('Z'));" - + "ZonedDateTime t = d;" - + "return ChronoUnit.MILLIS.between(d, t);" - ) - ); - } - public void testRandomUUID() { assertTrue( Pattern.compile("\\p{XDigit}{8}(-\\p{XDigit}{4}){3}-\\p{XDigit}{12}") diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/BasicStatementTests.java index a2ddec10e3c7d..e55d73d7c0951 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/BasicStatementTests.java @@ -32,11 +32,11 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.script.ScriptContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/BindingsTests.java index f4435bc865923..51ca26e7fe892 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/BindingsTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/BindingsTests.java @@ -32,13 +32,13 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistInstanceBinding; import org.opensearch.painless.spi.AllowlistLoader; import org.opensearch.script.ScriptContext; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java new file mode 100644 index 0000000000000..dbba3226ba300 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.painless; + +public class CommentTests extends ScriptTestCase { + + public void testSingleLineComments() { + assertEquals(5, exec("// comment\n return 5")); + assertEquals(5, exec("// comment\r return 5")); + assertEquals(5, exec("return 5 // comment no newline or return char")); + } + + public void testOpenCloseComments() { + assertEquals(5, exec("/* single-line comment */ return 5")); + assertEquals(5, exec("/* multi-line \n */ return 5")); + assertEquals(5, exec("/* multi-line \r */ return 5")); + assertEquals(5, exec("/* multi-line \n\n\r\r */ return 5")); + assertEquals(5, exec("def five = 5; /* multi-line \r */ return five")); + assertEquals(5, exec("return 5 /* multi-line ignored code */")); + } +} diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DefCastTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DefCastTests.java index df8ffd2be4437..a267ef1701e2c 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/DefCastTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DefCastTests.java @@ -709,20 +709,4 @@ public void testConstFoldingDefCast() { assertFalse((boolean) exec("def chr = (char)10L; return (chr > (byte)10);")); assertFalse((boolean) exec("def chr = (char)10L; return (chr > (double)(byte)(char)10);")); } - - // TODO: remove this when the transition from Joda to Java datetimes is completed - public void testdefToZonedDateTime() { - assertEquals( - 0L, - exec( - "Instant instant = Instant.ofEpochMilli(434931330000L);" - + "def d = new JodaCompatibleZonedDateTime(instant, ZoneId.of('Z'));" - + "def x = new HashMap(); x.put('dt', d);" - + "ZonedDateTime t = x['dt'];" - + "def y = t;" - + "t = y;" - + "return ChronoUnit.MILLIS.between(d, t);" - ) - ); - } } diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java index 3418bcf01e19f..691e84176dce3 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DocFieldsPhaseTests.java @@ -48,6 +48,7 @@ public class DocFieldsPhaseTests extends ScriptTestCase { PainlessLookup lookup = PainlessLookupBuilder.buildFromAllowlists(Allowlist.BASE_ALLOWLISTS); + @SuppressWarnings("removal") ScriptScope compile(String script) { Compiler compiler = new Compiler( MockDocTestScript.CONTEXT.instanceClazz, diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/FactoryTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/FactoryTests.java index b4e322e12bc45..6d1e0a1634086 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/FactoryTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/FactoryTests.java @@ -32,14 +32,14 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptException; import org.opensearch.script.ScriptFactory; import org.opensearch.script.TemplateScript; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.Arrays; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimit2Tests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimit2Tests.java index 1d08fdac9c58c..b860c783a5c0d 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimit2Tests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimit2Tests.java @@ -32,9 +32,9 @@ package org.opensearch.painless; +import org.opensearch.common.settings.Settings; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.opensearch.common.settings.Settings; public class RegexLimit2Tests extends ScriptTestCase { // This regex has backtracking due to .*? diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimitTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimitTests.java index c3233bc0d924a..8a965e715f810 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimitTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexLimitTests.java @@ -32,10 +32,10 @@ package org.opensearch.painless; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.settings.Settings; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexTests.java index 8c1f545efcf7a..628e15b9bb544 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/RegexTests.java @@ -32,10 +32,10 @@ package org.opensearch.painless; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.script.ScriptException; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.nio.CharBuffer; import java.util.Arrays; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptTestCase.java index 1f8824113df92..cc967efc3e850 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptTestCase.java @@ -32,8 +32,6 @@ package org.opensearch.painless; -import junit.framework.AssertionFailedError; - import org.opensearch.common.settings.Settings; import org.opensearch.painless.antlr.Walker; import org.opensearch.painless.spi.Allowlist; @@ -48,8 +46,10 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.hasSize; +import junit.framework.AssertionFailedError; + import static org.opensearch.painless.action.PainlessExecuteAction.PainlessTestScript; +import static org.hamcrest.Matchers.hasSize; /** * Base test case for scripting unit tests. diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptedMetricAggContextsTests.java index e9d800b39f0ab..72ab00c4f15cb 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/ScriptedMetricAggContextsTests.java @@ -35,8 +35,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.Scorable; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.painless.spi.Allowlist; import org.opensearch.script.ScriptContext; @@ -44,6 +42,8 @@ import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/SimilarityScriptTests.java index ac8f45bb598ba..3b3f77d982450 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/SimilarityScriptTests.java @@ -49,14 +49,14 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.common.settings.Settings; import org.opensearch.index.similarity.ScriptedSimilarity; import org.opensearch.painless.spi.Allowlist; import org.opensearch.script.ScriptContext; import org.opensearch.script.SimilarityScript; import org.opensearch.script.SimilarityWeightScript; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.Collections; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java index 3f12bf57a0e33..0d498e16154c8 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/WhenThingsGoWrongTests.java @@ -32,13 +32,14 @@ package org.opensearch.painless; -import junit.framework.AssertionFailedError; import org.apache.lucene.util.Constants; import org.opensearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; import java.util.Collections; +import junit.framework.AssertionFailedError; + import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.instanceOf; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java index e80cc2d23e290..d1ab998c314b0 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteApiTests.java @@ -31,9 +31,9 @@ package org.opensearch.painless.action; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.painless.PainlessModulePlugin; @@ -92,13 +92,13 @@ public void testFilterExecutionContext() throws IOException { IndexService indexService = createIndex("index", Settings.EMPTY, "doc", "field", "type=long"); Request.ContextSetup contextSetup = new Request.ContextSetup("index", new BytesArray("{\"field\": 3}"), null); - contextSetup.setXContentType(XContentType.JSON); + contextSetup.setXContentType(MediaTypeRegistry.JSON); Request request = new Request(new Script("doc['field'].value >= 3"), "filter", contextSetup); Response response = innerShardOperation(request, scriptService, indexService); assertThat(response.getResult(), equalTo(true)); contextSetup = new Request.ContextSetup("index", new BytesArray("{\"field\": 3}"), null); - contextSetup.setXContentType(XContentType.JSON); + contextSetup.setXContentType(MediaTypeRegistry.JSON); request = new Request( new Script(ScriptType.INLINE, "painless", "doc['field'].value >= params.max", singletonMap("max", 3)), "filter", @@ -108,7 +108,7 @@ public void testFilterExecutionContext() throws IOException { assertThat(response.getResult(), equalTo(true)); contextSetup = new Request.ContextSetup("index", new BytesArray("{\"field\": 2}"), null); - contextSetup.setXContentType(XContentType.JSON); + contextSetup.setXContentType(MediaTypeRegistry.JSON); request = new Request( new Script(ScriptType.INLINE, "painless", "doc['field'].value >= params.max", singletonMap("max", 3)), "filter", @@ -127,7 +127,7 @@ public void testScoreExecutionContext() throws IOException { new BytesArray("{\"rank\": 4.0, \"text\": \"quick brown fox\"}"), new MatchQueryBuilder("text", "fox") ); - contextSetup.setXContentType(XContentType.JSON); + contextSetup.setXContentType(MediaTypeRegistry.JSON); Request request = new Request( new Script( ScriptType.INLINE, diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteRequestTests.java index 8204f9db0b8d6..661c12a111eb7 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteRequestTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/action/PainlessExecuteRequestTests.java @@ -31,17 +31,17 @@ package org.opensearch.painless.action; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.painless.action.PainlessExecuteAction.Request.ContextSetup; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/api/LimitedCharSequenceTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/api/LimitedCharSequenceTests.java index a7787f4bc3c29..4117eb331197f 100644 --- a/modules/lang-painless/src/test/java/org/opensearch/painless/api/LimitedCharSequenceTests.java +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/api/LimitedCharSequenceTests.java @@ -32,7 +32,7 @@ package org.opensearch.painless.api; -import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.test.OpenSearchTestCase; import java.util.regex.Pattern; diff --git a/modules/lang-painless/src/test/resources/org/opensearch/painless/spi/org.opensearch.painless.test b/modules/lang-painless/src/test/resources/org/opensearch/painless/spi/org.opensearch.painless.test index 5345f7fab8794..cb4cd70c809b1 100644 --- a/modules/lang-painless/src/test/resources/org/opensearch/painless/spi/org.opensearch.painless.test +++ b/modules/lang-painless/src/test/resources/org/opensearch/painless/spi/org.opensearch.painless.test @@ -1,10 +1,5 @@ # allowlist for tests -# TODO: remove this when the transition from Joda to Java datetimes is completed -class org.opensearch.script.JodaCompatibleZonedDateTime { - (Instant, ZoneId) -} - # for unit tests only class org.opensearch.painless.api.Json { def load(String) diff --git a/modules/lang-painless/src/yamlRestTest/java/org/opensearch/painless/LangPainlessClientYamlTestSuiteIT.java b/modules/lang-painless/src/yamlRestTest/java/org/opensearch/painless/LangPainlessClientYamlTestSuiteIT.java index 520c656723177..f83a8929d979d 100644 --- a/modules/lang-painless/src/yamlRestTest/java/org/opensearch/painless/LangPainlessClientYamlTestSuiteIT.java +++ b/modules/lang-painless/src/yamlRestTest/java/org/opensearch/painless/LangPainlessClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml new file mode 100644 index 0000000000000..b3ff66251938d --- /dev/null +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/120_script_score_term_frequency.yml @@ -0,0 +1,95 @@ +--- +setup: + - skip: + version: " - 2.9.99" + reason: "termFreq functions for script_score was introduced in 2.10.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + f1: + type: keyword + f2: + type: text + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "doc1"}}' + - '{"f1": "v0", "f2": "v1"}' + - '{"index": {"_index": "test", "_id": "doc2"}}' + - '{"f2": "v2"}' + +--- +"Script score function using the termFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "termFreq(params.field, params.term)" + params: + field: "f1" + term: "v0" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.0 } + +--- +"Script score function using the totalTermFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "if (doc[params.field].size() == 0) return params.default_value; else { return totalTermFreq(params.field, params.term); }" + params: + default_value: 0.5 + field: "f1" + term: "v0" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.5 } + +--- +"Script score function using the sumTotalTermFreq function": + - do: + search: + index: test + rest_total_hits_as_int: true + body: + query: + function_score: + query: + match_all: {} + script_score: + script: + source: "if (doc[params.field].size() == 0) return params.default_value; else { return sumTotalTermFreq(params.field); }" + params: + default_value: 0.5 + field: "f1" + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "doc1" } + - match: { hits.hits.1._id: "doc2" } + - match: { hits.hits.0._score: 1.0 } + - match: { hits.hits.1._score: 0.5 } diff --git a/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java index e25344a4bb4e3..bd0795f07139b 100644 --- a/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java +++ b/modules/mapper-extras/src/javaRestTest/java/org/opensearch/index/mapper/TokenCountFieldMapperIntegrationIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkResponse; diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java index 40c2ab13fc9ca..400d867296e5f 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.lucene.document.Field; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; @@ -44,9 +45,9 @@ import org.opensearch.common.Explicit; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; @@ -198,9 +199,9 @@ public String typeName() { @Override public Query termQuery(Object value, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); long scaledValue = Math.round(scale(value)); - Query query = NumberFieldMapper.NumberType.LONG.termQuery(name(), scaledValue); + Query query = NumberFieldMapper.NumberType.LONG.termQuery(name(), scaledValue, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -209,13 +210,18 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List<?> values, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); List<Long> scaledValues = new ArrayList<>(values.size()); for (Object value : values) { long scaledValue = Math.round(scale(value)); scaledValues.add(scaledValue); } - Query query = NumberFieldMapper.NumberType.LONG.termsQuery(name(), Collections.unmodifiableList(scaledValues)); + Query query = NumberFieldMapper.NumberType.LONG.termsQuery( + name(), + Collections.unmodifiableList(scaledValues), + hasDocValues(), + isSearchable() + ); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -224,7 +230,7 @@ public Query termsQuery(List<?> values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); Long lo = null; if (lowerTerm != null) { double dValue = scale(lowerTerm); @@ -241,7 +247,7 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower } hi = Math.round(Math.floor(dValue)); } - Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), context); + Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), isSearchable(), context); if (boost() != 1f) { query = new BoostQuery(query, boost()); } diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 397a7b48b472a..366e848416328 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -82,7 +82,7 @@ /** * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field - * + * <p> * The structure of these fields is * * <pre> diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/query/RankFeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/opensearch/index/query/RankFeatureQueryBuilder.java index 3b0384ca7d62f..13591d0782ea2 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/query/RankFeatureQueryBuilder.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/query/RankFeatureQueryBuilder.java @@ -41,10 +41,10 @@ import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.RankFeatureFieldMapper.RankFeatureFieldType; import org.opensearch.index.mapper.RankFeatureMetaFieldMapper; import org.opensearch.index.mapper.RankFeaturesFieldMapper.RankFeaturesFieldType; -import org.opensearch.index.mapper.MappedFieldType; import java.io.IOException; import java.util.Arrays; diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java index d498116efc108..6b14a1d930287 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/BWCTemplateTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.mapper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -54,9 +54,9 @@ public void testBeatsTemplatesBWC() throws Exception { byte[] metricBeat = copyToBytesFromClasspath("/org/opensearch/index/mapper/metricbeat-6.0.template.json"); byte[] packetBeat = copyToBytesFromClasspath("/org/opensearch/index/mapper/packetbeat-6.0.template.json"); byte[] fileBeat = copyToBytesFromClasspath("/org/opensearch/index/mapper/filebeat-6.0.template.json"); - client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat, XContentType.JSON).get(); - client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat, XContentType.JSON).get(); - client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat, XContentType.JSON).get(); + client().admin().indices().preparePutTemplate("metricbeat").setSource(metricBeat, MediaTypeRegistry.JSON).get(); + client().admin().indices().preparePutTemplate("packetbeat").setSource(packetBeat, MediaTypeRegistry.JSON).get(); + client().admin().indices().preparePutTemplate("filebeat").setSource(fileBeat, MediaTypeRegistry.JSON).get(); client().prepareIndex("metricbeat-foo").setId("1").setSource("message", "foo").get(); client().prepareIndex("packetbeat-foo").setId("1").setSource("message", "foo").get(); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java index 6412059075e5c..fee9471444c19 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureFieldMapperTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; @@ -91,7 +90,7 @@ protected void minimalMapping(XContentBuilder b) throws IOException { public void testDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); ParsedDocument doc1 = mapper.parse(source(b -> b.field("field", 10))); IndexableField[] fields = doc1.rootDoc().getFields("_feature"); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java index 63b1b4760b6fe..6cfdbcc581aad 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeatureMetaFieldMapperTests.java @@ -32,11 +32,9 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -61,18 +59,17 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testBasics() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field") - .field("type", "rank_feature") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = MediaTypeRegistry.JSON.contentBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "rank_feature") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -85,13 +82,15 @@ public void testBasics() throws Exception { * and parsing of a document fails if the document contains these meta-fields. */ public void testDocumentParsingFailsOnMetaField() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()); + String mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").endObject().endObject().toString(); DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); String rfMetaField = RankFeatureMetaFieldMapper.CONTENT_TYPE; - BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(rfMetaField, 0).endObject()); + BytesReference bytes = BytesReference.bytes( + MediaTypeRegistry.JSON.contentBuilder().startObject().field(rfMetaField, 0).endObject() + ); MapperParsingException e = expectThrows( MapperParsingException.class, - () -> mapper.parse(new SourceToParse("test", "1", bytes, XContentType.JSON)) + () -> mapper.parse(new SourceToParse("test", "1", bytes, MediaTypeRegistry.JSON)) ); assertTrue( e.getCause().getMessage().contains("Field [" + rfMetaField + "] is a metadata field and cannot be added inside a document.") diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java index 6c844bae73da4..b95572835e612 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/RankFeaturesFieldMapperTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; import org.hamcrest.Matchers; @@ -79,7 +78,7 @@ protected boolean supportsMeta() { public void testDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); ParsedDocument doc1 = mapper.parse(source(this::writeField)); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index 390f10c0684bd..45507867f6691 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -34,11 +34,10 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugins.Plugin; import java.io.IOException; @@ -95,7 +94,7 @@ public void testExistsQueryDocValuesDisabled() throws IOException { public void testDefaults() throws Exception { XContentBuilder mapping = fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0)); DocumentMapper mapper = createDocumentMapper(mapping); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123))); IndexableField[] fields = doc.rootDoc().getFields("field"); @@ -136,7 +135,7 @@ public void testNotIndexed() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -157,7 +156,7 @@ public void testNoDocValues() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -178,7 +177,7 @@ public void testStore() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", 123).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -201,7 +200,7 @@ public void testCoerce() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "123").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = doc.rootDoc().getFields("field"); @@ -220,7 +219,7 @@ public void testCoerce() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "123").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); @@ -244,7 +243,7 @@ private void doTestIgnoreMalformed(Object value, String exceptionMessageContains "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", value).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); @@ -258,7 +257,7 @@ private void doTestIgnoreMalformed(Object value, String exceptionMessageContains "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", value).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -273,7 +272,7 @@ public void testNullValue() throws IOException { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField("field").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); @@ -286,7 +285,7 @@ public void testNullValue() throws IOException { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField("field").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = doc.rootDoc().getFields("field"); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java index be12c49321b87..a653edbd05992 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -34,11 +34,13 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.LongField; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -63,7 +65,9 @@ public void testTermQuery() { ); double value = (randomDouble() * 2 - 1) * 10000; long scaledValue = Math.round(value * ft.getScalingFactor()); - assertEquals(LongPoint.newExactQuery("scaled_float", scaledValue), ft.termQuery(value, null)); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery("scaled_float", scaledValue); + Query query = new IndexOrDocValuesQuery(LongPoint.newExactQuery("scaled_float", scaledValue), dvQuery); + assertEquals(query, ft.termQuery(value, null)); } public void testTermsQuery() { @@ -75,7 +79,7 @@ public void testTermsQuery() { long scaledValue1 = Math.round(value1 * ft.getScalingFactor()); double value2 = (randomDouble() * 2 - 1) * 10000; long scaledValue2 = Math.round(value2 * ft.getScalingFactor()); - assertEquals(LongPoint.newSetQuery("scaled_float", scaledValue1, scaledValue2), ft.termsQuery(Arrays.asList(value1, value2), null)); + assertEquals(LongField.newSetQuery("scaled_float", scaledValue1, scaledValue2), ft.termsQuery(Arrays.asList(value1, value2), null)); } public void testRangeQuery() throws IOException { @@ -112,7 +116,16 @@ public void testRangeQuery() throws IOException { Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000; boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery( + "double", + l, + u, + includeLower, + includeUpper, + false, + true, + MOCK_QSC + ); Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, MOCK_QSC); assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ)); } @@ -122,35 +135,40 @@ public void testRangeQuery() throws IOException { public void testRoundsUpperBoundCorrectly() { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType("scaled_float", 100); Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 7999]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 7999]", getQueryString(scaledFloatQ)); } public void testRoundsLowerBoundCorrectly() { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType("scaled_float", 100); Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); + } + + private String getQueryString(Query query) { + assertTrue(query instanceof IndexOrDocValuesQuery); + return ((IndexOrDocValuesQuery) query).getIndexQuery().toString(); } public void testValueForSearch() { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index 5e67aaa2ed246..b5f687ce34d4b 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -50,9 +50,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; -import org.opensearch.common.Strings; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; @@ -600,7 +600,7 @@ public void testAnalyzerSerialization() throws IOException { b.field("type", "search_as_you_type"); b.field("analyzer", "simple"); })); - String serialized = Strings.toString(XContentType.JSON, ms.documentMapper()); + String serialized = Strings.toString(MediaTypeRegistry.JSON, ms.documentMapper()); assertEquals( serialized, "{\"_doc\":{\"properties\":{\"field\":" @@ -608,7 +608,7 @@ public void testAnalyzerSerialization() throws IOException { ); merge(ms, mapping(b -> {})); - assertEquals(serialized, Strings.toString(XContentType.JSON, ms.documentMapper())); + assertEquals(serialized, Strings.toString(MediaTypeRegistry.JSON, ms.documentMapper())); } private void documentParsingTestCase(Collection<String> values) throws IOException { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java index 3b70455960966..62b906d7442f6 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldTypeTests.java @@ -52,8 +52,8 @@ import java.util.List; import static java.util.Arrays.asList; -import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; public class SearchAsYouTypeFieldTypeTests extends FieldTypeTestCase { diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java index 548de34505816..7790ed12c60f0 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/TokenCountFieldMapperTests.java @@ -33,12 +33,12 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.CannedTokenStream; -import org.apache.lucene.tests.analysis.MockTokenizer; -import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.tests.analysis.CannedTokenStream; +import org.apache.lucene.tests.analysis.MockTokenizer; +import org.apache.lucene.tests.analysis.Token; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java index f57aac8a244b7..a8d672c025af0 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/query/RankFeatureQueryBuilderTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.index.mapper.MapperExtrasModulePlugin; import org.opensearch.index.mapper.MapperService; @@ -61,16 +60,14 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge( "_doc", new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping( - "my_feature_field", - "type=rank_feature", - "my_negative_feature_field", - "type=rank_feature,positive_score_impact=false", - "my_feature_vector_field", - "type=rank_features" - ) - ) + PutMappingRequest.simpleMapping( + "my_feature_field", + "type=rank_feature", + "my_negative_feature_field", + "type=rank_feature,positive_score_impact=false", + "my_feature_vector_field", + "type=rank_features" + ).toString() ), MapperService.MergeReason.MAPPING_UPDATE ); diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml new file mode 100644 index 0000000000000..1d6a938675e39 --- /dev/null +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml @@ -0,0 +1,201 @@ +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + text_field: + type: match_only_text + analyzer: simple + + - do: + index: + index: test + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + text_field: "quick brown fox jump lazy dog" + + - do: + indices.refresh: {} + +--- +"phrase query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick <em>brown</em> fox jump lazy dog" } + +--- +"bool prefix query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fo" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick <em>brown</em> fox jump lazy dog" } + +--- +"multi match bool prefix query 1 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fo" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick <em>brown</em> fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: null } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 2 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick <em>brown</em> <em>fox</em> jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick <em>brown fox</em> jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 3 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick <em>brown</em> <em>fox</em> <em>jump</em> lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick <em>brown fox jump</em> lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick <em>brown fox jump</em> lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 4 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy d" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick <em>brown</em> <em>fox</em> <em>jump</em> <em>lazy</em> dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick <em>brown fox jump lazy</em> dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick <em>brown fox jump lazy</em> dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: ["quick <em>brown fox jump lazy</em> dog"] } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java index 5566c688aefbf..e049edf843069 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java @@ -32,7 +32,14 @@ package org.opensearch.join.aggregations; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.join.query.ParentChildTestCase; +import org.junit.Before; import java.util.ArrayList; import java.util.HashMap; @@ -41,9 +48,8 @@ import java.util.Map; import java.util.Set; -import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.join.query.ParentChildTestCase; -import org.junit.Before; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; /** * Small base test-class which combines stuff used for Children and Parent aggregation tests @@ -52,6 +58,10 @@ public abstract class AbstractParentChildTestCase extends ParentChildTestCase { protected final Map<String, Control> categoryToControl = new HashMap<>(); protected final Map<String, ParentControl> articleToControl = new HashMap<>(); + public AbstractParentChildTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + @Before public void setupCluster() throws Exception { assertAcked( @@ -154,4 +164,38 @@ private ParentControl(String category) { this.category = category; } } + + // Test when there is 1 child document and 1 parent document per segment. + public void testSparseSegments() throws InterruptedException { + assertAcked( + prepareCreate("sparse").setMapping( + addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), + "commenter", + "keyword", + "category", + "keyword" + ) + ) + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + + List<IndexRequestBuilder> requests = new ArrayList<>(); + requests.add(createIndexRequest("sparse", "article", "article-0", null, "category", List.of("0"))); + indexRandom(true, false, requests); + client().admin().indices().refresh(Requests.refreshRequest("sparse")).actionGet(); + requests = new ArrayList<>(); + requests.add(createIndexRequest("sparse", "comment", "comment-0", "article-0", "commenter", "0")); + indexRandom(true, false, requests); + + SearchResponse searchResponse = getSearchRequest().get(); + assertSearchResponse(searchResponse); + validateSpareSegmentsSearchResponse(searchResponse); + } + + abstract SearchRequestBuilder getSearchRequest(); + + abstract void validateSpareSegmentsSearchResponse(SearchResponse searchResponse); } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java index 72c502c616ff8..ab74463382aaa 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java @@ -31,8 +31,11 @@ package org.opensearch.join.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; @@ -47,14 +50,18 @@ import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.join.aggregations.JoinAggregationBuilders.children; import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.AggregationBuilders.topHits; @@ -69,6 +76,18 @@ public class ChildrenIT extends AbstractParentChildTestCase { + public ChildrenIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testChildrenAggs() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(matchQuery("randomized", true)) @@ -407,4 +426,18 @@ public void testPostCollectAllLeafReaders() throws Exception { children = parents.getBuckets().get(0).getAggregations().get("child_docs"); assertThat(children.getDocCount(), equalTo(2L)); } + + @Override + SearchRequestBuilder getSearchRequest() { + return client().prepareSearch("sparse") + .setSize(10000) + .setQuery(matchAllQuery()) + .addAggregation(children("to_comment", "comment").subAggregation(terms("commenters").field("commenter").size(10000))); + } + + @Override + void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) { + Children children = searchResponse.getAggregations().get("to_comment"); + assertEquals(children.getDocCount(), 1); + } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java index 351b0beec481b..4a6157e388777 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java @@ -32,12 +32,17 @@ package org.opensearch.join.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.terms.Terms; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -47,8 +52,10 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.join.aggregations.JoinAggregationBuilders.parent; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.AggregationBuilders.topHits; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -56,6 +63,18 @@ public class ParentIT extends AbstractParentChildTestCase { + public ParentIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testSimpleParentAgg() throws Exception { final SearchRequestBuilder searchRequest = client().prepareSearch("test") .setSize(10000) @@ -264,4 +283,18 @@ public void testTermsParentAggTerms() throws Exception { } } } + + @Override + SearchRequestBuilder getSearchRequest() { + return client().prepareSearch("sparse") + .setSize(10000) + .setQuery(matchAllQuery()) + .addAggregation(parent("to_article", "comment").subAggregation(terms("category").field("category").size(10000))); + } + + @Override + void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) { + Parent parentAgg = searchResponse.getAggregations().get("to_article"); + assertEquals(parentAgg.getDocCount(), 1); + } } diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index 6fdcd7f5488f4..99527c3273c4b 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -31,6 +31,8 @@ package org.opensearch.join.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.explain.ExplainResponse; import org.opensearch.action.index.IndexRequestBuilder; @@ -42,6 +44,7 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; @@ -50,7 +53,6 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.bucket.filter.Filter; @@ -65,6 +67,8 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -87,6 +91,7 @@ import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.opensearch.join.query.JoinQueryBuilders.parentId; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -100,6 +105,18 @@ public class ChildQuerySearchIT extends ParentChildTestCase { + public ChildQuerySearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testMultiLevelChild() throws Exception { assertAcked( prepareCreate("test").setMapping( diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java index ffcc9cf38545f..4b5470d17c100 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java @@ -32,11 +32,14 @@ package org.opensearch.join.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; @@ -54,6 +57,7 @@ import org.opensearch.search.sort.SortOrder; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -73,6 +77,7 @@ import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -87,6 +92,18 @@ public class InnerHitsIT extends ParentChildTestCase { + public InnerHitsIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java index 34a6af6ee3639..9c0f96cf382a6 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java @@ -32,16 +32,16 @@ package org.opensearch.join.query; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -51,7 +51,11 @@ import java.util.Map; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public abstract class ParentChildTestCase extends OpenSearchIntegTestCase { +public abstract class ParentChildTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ParentChildTestCase(Settings staticSettings) { + super(staticSettings); + } @Override protected boolean ignoreExternalCluster() { @@ -85,7 +89,7 @@ protected IndexRequestBuilder createIndexRequest(String index, String type, Stri protected IndexRequestBuilder createIndexRequest(String index, String type, String id, String parentId, XContentBuilder builder) throws IOException { - Map<String, Object> source = XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.toString(builder), false); + Map<String, Object> source = XContentHelper.convertToMap(JsonXContent.jsonXContent, builder.toString(), false); return createIndexRequest(index, type, id, parentId, source); } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java index 793b35111cfe2..bbca89fc56820 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java @@ -118,4 +118,9 @@ public String getStatsSubtype() { // Child Aggregation is registered in non-standard way, so it might return child's values type return OTHER_SUBTYPE; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java index 40c07c8f53e20..9a21cd1db3200 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java @@ -118,4 +118,10 @@ public String getStatsSubtype() { // Parent Aggregation is registered in non-standard way return OTHER_SUBTYPE; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + // See https://github.com/opensearch-project/OpenSearch/issues/9316 + return false; + } } diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentJoinAggregator.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentJoinAggregator.java index 0b0ff2b0a382e..4e1016a596874 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentJoinAggregator.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentJoinAggregator.java @@ -41,11 +41,11 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.CardinalityUpperBound; diff --git a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java index 2052b229e7a2d..a229d050f3b1d 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/mapper/ParentJoinFieldMapper.java @@ -38,9 +38,9 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; diff --git a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java index 4f26a15031dd4..e930780613ed6 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java @@ -42,12 +42,12 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.Similarity; import org.opensearch.OpenSearchException; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexOrdinalsFieldData; @@ -373,7 +373,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { * A query that rewrites into another query using * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)} * that executes the actual join. - * + * <p> * This query is exclusively used by the {@link HasChildQueryBuilder} and {@link HasParentQueryBuilder} to get access * to the {@link DirectoryReader} used by the current search in order to retrieve the {@link OrdinalMap}. * The {@link OrdinalMap} is required by {@link JoinUtil} to execute the join. @@ -416,11 +416,12 @@ public void visit(QueryVisitor visitor) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } + IndexReader reader = searcher.getIndexReader(); if (reader instanceof DirectoryReader) { IndexSearcher indexSearcher = new IndexSearcher(reader); indexSearcher.setQueryCache(null); diff --git a/modules/parent-join/src/main/java/org/opensearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/opensearch/join/query/HasParentQueryBuilder.java index 97809af772979..d296a7b0141ff 100644 --- a/modules/parent-join/src/main/java/org/opensearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/opensearch/join/query/HasParentQueryBuilder.java @@ -35,11 +35,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java index 1d0f6ebdd8349..cdc2764ae1ac9 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -38,12 +38,12 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -51,6 +51,7 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IdFieldMapper; @@ -60,7 +61,6 @@ import org.opensearch.index.mapper.MappingLookup; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java index 4f08e004ea208..6d3b955926ef2 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentTests.java @@ -32,14 +32,14 @@ package org.opensearch.join.aggregations; -import java.util.Arrays; -import java.util.Collection; - import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; +import java.util.Arrays; +import java.util.Collection; + public class ParentTests extends BaseAggregationTestCase<ParentAggregationBuilder> { @Override diff --git a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java index 1e09d75dc7197..8a0857d287717 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -39,12 +39,12 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -52,16 +52,16 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.ContentPath; -import org.opensearch.index.mapper.MappingLookup; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MappingLookup; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.Uid; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.join.ParentJoinModulePlugin; import org.opensearch.join.mapper.MetaJoinFieldMapper; import org.opensearch.join.mapper.ParentJoinFieldMapper; diff --git a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java index cd8f18b679750..ed6a8259d6e90 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/mapper/ParentJoinFieldMapperTests.java @@ -32,11 +32,10 @@ package org.opensearch.join.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperException; @@ -60,19 +59,18 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testSingleLevel() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService() .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -80,7 +78,12 @@ public void testSingleLevel() throws Exception { // Doc without join ParsedDocument doc = docMapper.parse( - new SourceToParse("test", "0", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), XContentType.JSON) + new SourceToParse( + "test", + "0", + BytesReference.bytes(MediaTypeRegistry.JSON.contentBuilder().startObject().endObject()), + MediaTypeRegistry.JSON + ) ); assertNull(doc.rootDoc().getBinaryValue("join_field")); @@ -90,7 +93,7 @@ public void testSingleLevel() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "parent").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertEquals("1", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString()); @@ -110,7 +113,7 @@ public void testSingleLevel() throws Exception { .endObject() .endObject() ), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ); @@ -125,7 +128,7 @@ public void testSingleLevel() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "unknown").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -133,19 +136,18 @@ public void testSingleLevel() throws Exception { } public void testParentIdSpecifiedAsNumber() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService() .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -162,7 +164,7 @@ public void testParentIdSpecifiedAsNumber() throws Exception { .endObject() .endObject() ), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ); @@ -181,7 +183,7 @@ public void testParentIdSpecifiedAsNumber() throws Exception { .endObject() .endObject() ), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ); @@ -190,20 +192,19 @@ public void testParentIdSpecifiedAsNumber() throws Exception { } public void testMultipleLevels() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .field("child", "grand_child") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService() .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -211,7 +212,12 @@ public void testMultipleLevels() throws Exception { // Doc without join ParsedDocument doc = docMapper.parse( - new SourceToParse("test", "0", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), XContentType.JSON) + new SourceToParse( + "test", + "0", + BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), + MediaTypeRegistry.JSON + ) ); assertNull(doc.rootDoc().getBinaryValue("join_field")); @@ -221,7 +227,7 @@ public void testMultipleLevels() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "parent").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertEquals("1", doc.rootDoc().getBinaryValue("join_field#parent").utf8ToString()); @@ -241,7 +247,7 @@ public void testMultipleLevels() throws Exception { .endObject() .endObject() ), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ); @@ -257,7 +263,7 @@ public void testMultipleLevels() throws Exception { "test", "2", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "child").endObject()), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ) @@ -280,7 +286,7 @@ public void testMultipleLevels() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -300,7 +306,7 @@ public void testMultipleLevels() throws Exception { .endObject() .endObject() ), - XContentType.JSON, + MediaTypeRegistry.JSON, "1" ) ); @@ -315,7 +321,7 @@ public void testMultipleLevels() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("join_field", "unknown").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -323,39 +329,37 @@ public void testMultipleLevels() throws Exception { } public void testUpdateRelations() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); + IndexService indexService = createIndex("test"); + DocumentMapper docMapper = indexService.mapperService() + .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); + + { + final String updateMapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject("join_field") .field("type", "join") .startObject("relations") - .field("parent", "child") .array("child", "grand_child1", "grand_child2") .endObject() .endObject() .endObject() .endObject() - ); - IndexService indexService = createIndex("test"); - DocumentMapper docMapper = indexService.mapperService() - .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); - - { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .array("child", "grand_child1", "grand_child2") - .endObject() - .endObject() - .endObject() - .endObject() - ); + .toString(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexService.mapperService() @@ -365,20 +369,19 @@ public void testUpdateRelations() throws Exception { } { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .field("child", "grand_child1") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child1") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexService.mapperService() @@ -388,21 +391,20 @@ public void testUpdateRelations() throws Exception { } { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("uber_parent", "parent") - .field("parent", "child") - .array("child", "grand_child1", "grand_child2") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("uber_parent", "parent") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexService.mapperService() @@ -412,21 +414,20 @@ public void testUpdateRelations() throws Exception { } { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .array("child", "grand_child1", "grand_child2") - .field("grand_child2", "grand_grand_child") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .array("child", "grand_child1", "grand_child2") + .field("grand_child2", "grand_grand_child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexService.mapperService() @@ -436,20 +437,19 @@ public void testUpdateRelations() throws Exception { } { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .array("parent", "child", "child2") - .array("child", "grand_child1", "grand_child2") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .array("parent", "child", "child2") + .array("child", "grand_child1", "grand_child2") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); docMapper = indexService.mapperService() .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); @@ -462,21 +462,20 @@ public void testUpdateRelations() throws Exception { } { - final String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .array("parent", "child", "child2") - .array("child", "grand_child1", "grand_child2") - .array("other", "child_other1", "child_other2") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .array("parent", "child", "child2") + .array("child", "grand_child1", "grand_child2") + .array("other", "child_other1", "child_other2") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); docMapper = indexService.mapperService() .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); @@ -492,23 +491,22 @@ public void testUpdateRelations() throws Exception { } public void testInvalidJoinFieldInsideObject() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows( MapperParsingException.class, @@ -521,24 +519,23 @@ public void testInvalidJoinFieldInsideObject() throws Exception { } public void testInvalidJoinFieldInsideMultiFields() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("number") - .field("type", "integer") - .startObject("fields") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("number") + .field("type", "integer") + .startObject("fields") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService indexService = createIndex("test"); MapperParsingException exc = expectThrows( MapperParsingException.class, @@ -553,26 +550,25 @@ public void testInvalidJoinFieldInsideMultiFields() throws Exception { public void testMultipleJoinFields() throws Exception { IndexService indexService = createIndex("test"); { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .field("child", "grand_child") - .endObject() - .endObject() - .startObject("another_join_field") - .field("type", "join") - .startObject("relations") - .field("product", "item") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() + .endObject() + .startObject("another_join_field") + .field("type", "join") + .startObject("relations") + .field("product", "item") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); MapperParsingException exc = expectThrows( MapperParsingException.class, () -> indexService.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE) @@ -581,43 +577,7 @@ public void testMultipleJoinFields() throws Exception { } { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", "child") - .field("child", "grand_child") - .endObject() - .endObject() - .endObject() - .endObject() - ); - indexService.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - String updateMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("another_join_field") - .field("type", "join") - .endObject() - .endObject() - .endObject() - ); - MapperParsingException exc = expectThrows( - MapperParsingException.class, - () -> indexService.mapperService() - .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE) - ); - assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined more than once")); - } - } - - public void testEagerGlobalOrdinals() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() + String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") .startObject("join_field") @@ -629,7 +589,40 @@ public void testEagerGlobalOrdinals() throws Exception { .endObject() .endObject() .endObject() - ); + .toString(); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + String updateMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("another_join_field") + .field("type", "join") + .endObject() + .endObject() + .endObject() + .toString(); + MapperParsingException exc = expectThrows( + MapperParsingException.class, + () -> indexService.mapperService() + .merge("type", new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE) + ); + assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined more than once")); + } + } + + public void testEagerGlobalOrdinals() throws Exception { + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IndexService service = createIndex("test"); DocumentMapper docMapper = service.mapperService() .merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -640,21 +633,20 @@ public void testEagerGlobalOrdinals() throws Exception { assertNotNull(service.mapperService().fieldType("join_field#child")); assertTrue(service.mapperService().fieldType("join_field#child").eagerGlobalOrdinals()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("join_field") - .field("type", "join") - .field("eager_global_ordinals", false) - .startObject("relations") - .field("parent", "child") - .field("child", "grand_child") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .field("eager_global_ordinals", false) + .startObject("relations") + .field("parent", "child") + .field("child", "grand_child") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); service.mapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); assertFalse(service.mapperService().fieldType("join_field").eagerGlobalOrdinals()); assertNotNull(service.mapperService().fieldType("join_field#parent")); diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java index 6610b103509b0..96220c247d909 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasChildQueryBuilderTests.java @@ -46,7 +46,6 @@ import org.apache.lucene.search.similarities.Similarity; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; @@ -141,7 +140,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge(TYPE, new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge(TYPE, new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java index 9921b6b040901..63af6873e14af 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/HasParentQueryBuilderTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -122,7 +121,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge(TYPE, new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge(TYPE, new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } /** diff --git a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java index 88da6a6953d1e..ec555448fd218 100644 --- a/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/opensearch/join/query/ParentIdQueryBuilderTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.opensearch.OpenSearchException; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -111,7 +110,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge(TYPE, new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge(TYPE, new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } @Override diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index 278a8c1cc0ebc..01436404e8a85 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -31,19 +31,22 @@ package org.opensearch.percolator; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.MatchPhraseQueryBuilder; @@ -53,7 +56,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -75,6 +79,7 @@ import static org.opensearch.index.query.QueryBuilders.spanNotQuery; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @@ -84,7 +89,19 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; -public class PercolatorQuerySearchIT extends OpenSearchIntegTestCase { +public class PercolatorQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public PercolatorQuerySearchIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected boolean addMockGeoShapeFieldMapper() { @@ -125,14 +142,16 @@ public void testPercolatorQuery() throws Exception { BytesReference source = BytesReference.bytes(jsonBuilder().startObject().endObject()); logger.info("percolating empty doc"); - SearchResponse response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + SearchResponse response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) + .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); logger.info("percolating doc with 1 field"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) .addSort("id", SortOrder.ASC) .get(); assertHitCount(response, 2); @@ -144,7 +163,7 @@ public void testPercolatorQuery() throws Exception { source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()); logger.info("percolating doc with 2 fields"); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) .addSort("id", SortOrder.ASC) .get(); assertHitCount(response, 3); @@ -164,7 +183,7 @@ public void testPercolatorQuery() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("id", SortOrder.ASC) @@ -267,44 +286,46 @@ public void testPercolatorRangeQueries() throws Exception { // Test long range: BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 12).endObject()); - SearchResponse response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + SearchResponse response = client().prepareSearch() + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) + .get(); logger.info("response={}", response); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("3")); assertThat(response.getHits().getAt(1).getId(), equalTo("1")); source = BytesReference.bytes(jsonBuilder().startObject().field("field1", 11).endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); // Test double range: source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 12).endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("6")); assertThat(response.getHits().getAt(1).getId(), equalTo("4")); source = BytesReference.bytes(jsonBuilder().startObject().field("field2", 11).endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("4")); // Test IP range: source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.5").endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("9")); assertThat(response.getHits().getAt(1).getId(), equalTo("7")); source = BytesReference.bytes(jsonBuilder().startObject().field("field3", "192.168.1.4").endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("7")); // Test date range: source = BytesReference.bytes(jsonBuilder().startObject().field("field4", "2016-05-15").endObject()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("10")); } @@ -355,7 +376,7 @@ public void testPercolatorGeoQueries() throws Exception { jsonBuilder().startObject().startObject("field1").field("lat", 52.20).field("lon", 4.51).endObject().endObject() ); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) .addSort("id", SortOrder.ASC) .get(); assertHitCount(response, 3); @@ -390,9 +411,9 @@ public void testPercolatorQueryExistingDocument() throws Exception { ) .get(); - client().prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("5").setSource(XContentType.JSON, "id", "5", "field1", "value").get(); - client().prepareIndex("test").setId("6").setSource(XContentType.JSON, "id", "6", "field1", "value", "field2", "value").get(); + client().prepareIndex("test").setId("4").setSource("{\"id\": \"4\"}", MediaTypeRegistry.JSON).get(); + client().prepareIndex("test").setId("5").setSource(MediaTypeRegistry.JSON, "id", "5", "field1", "value").get(); + client().prepareIndex("test").setId("6").setSource(MediaTypeRegistry.JSON, "id", "6", "field1", "value", "field2", "value").get(); client().admin().indices().prepareRefresh().get(); logger.info("percolating empty doc"); @@ -432,7 +453,7 @@ public void testPercolatorQueryExistingDocumentSourceDisabled() throws Exception client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject()).get(); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("2").setSource("{}", MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); logger.info("percolating empty doc with source disabled"); @@ -528,7 +549,7 @@ public void testPercolatorSpecificQueries() throws Exception { .endObject() ); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) .addSort("id", SortOrder.ASC) .get(); assertHitCount(response, 4); @@ -586,7 +607,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject() ); SearchResponse searchResponse = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", document, MediaTypeRegistry.JSON)) .highlighter(new HighlightBuilder().field("field1")) .addSort("id", SortOrder.ASC) .get(); @@ -619,8 +640,8 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference document2 = BytesReference.bytes(jsonBuilder().startObject().field("field1", "over the lazy dog").endObject()); searchResponse = client().prepareSearch() .setQuery( - boolQuery().should(new PercolateQueryBuilder("query", document1, XContentType.JSON).setName("query1")) - .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) + boolQuery().should(new PercolateQueryBuilder("query", document1, MediaTypeRegistry.JSON).setName("query1")) + .should(new PercolateQueryBuilder("query", document2, MediaTypeRegistry.JSON).setName("query2")) ) .highlighter(new HighlightBuilder().field("field1")) .addSort("id", SortOrder.ASC) @@ -659,7 +680,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .highlighter(new HighlightBuilder().field("field1")) @@ -712,7 +733,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "dog").endObject()), BytesReference.bytes(jsonBuilder().startObject().field("field1", "fox").endObject()) ), - XContentType.JSON + MediaTypeRegistry.JSON ).setName("query1") ) .should( @@ -722,7 +743,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "jumps").endObject()), BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), - XContentType.JSON + MediaTypeRegistry.JSON ).setName("query2") ) ) @@ -811,7 +832,7 @@ public void testTakePositionOffsetGapIntoAccount() throws Exception { client().admin().indices().prepareRefresh().get(); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", new BytesArray("{\"field\" : [\"brown\", \"fox\"]}"), MediaTypeRegistry.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("2")); @@ -899,7 +920,7 @@ public void testWithMultiplePercolatorFields() throws Exception { BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field", "value").endObject()); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder(queryFieldName, source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder(queryFieldName, source, MediaTypeRegistry.JSON)) .setIndices("test1") .get(); assertHitCount(response, 1); @@ -907,7 +928,7 @@ public void testWithMultiplePercolatorFields() throws Exception { assertThat(response.getHits().getAt(0).getIndex(), equalTo("test1")); response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("object_field." + queryFieldName, source, MediaTypeRegistry.JSON)) .setIndices("test2") .get(); assertHitCount(response, 1); @@ -1012,7 +1033,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("id", SortOrder.ASC) @@ -1039,7 +1060,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("id", SortOrder.ASC) @@ -1052,7 +1073,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { new PercolateQueryBuilder( "query", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("companyname", "notstark").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("id", SortOrder.ASC) @@ -1105,7 +1126,7 @@ public void testPercolateQueryWithNestedDocuments() throws Exception { .endObject() ) ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("id", SortOrder.ASC) @@ -1158,7 +1179,7 @@ public void testPercolatorQueryViaMultiSearch() throws Exception { new PercolateQueryBuilder( "query", BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) @@ -1178,7 +1199,7 @@ public void testPercolatorQueryViaMultiSearch() throws Exception { new PercolateQueryBuilder( "query", BytesReference.bytes(jsonBuilder().startObject().field("field1", "b c").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) @@ -1188,7 +1209,7 @@ public void testPercolatorQueryViaMultiSearch() throws Exception { new PercolateQueryBuilder( "query", BytesReference.bytes(jsonBuilder().startObject().field("field1", "d").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) @@ -1248,7 +1269,7 @@ public void testDisallowExpensiveQueries() throws IOException { // Execute with search.allow_expensive_queries = null => default value = false => success BytesReference source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()); SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -1261,7 +1282,7 @@ public void testDisallowExpensiveQueries() throws IOException { OpenSearchException e = expectThrows( OpenSearchException.class, - () -> client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get() + () -> client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get() ); assertEquals( "[percolate] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", @@ -1273,7 +1294,7 @@ public void testDisallowExpensiveQueries() throws IOException { updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", true)); assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)).get(); + response = client().prepareSearch().setQuery(new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON)).get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); assertThat(response.getHits().getAt(0).getFields().get("_percolator_document_slot").getValue(), equalTo(0)); @@ -1307,7 +1328,7 @@ public void testWrappedWithConstantScore() throws Exception { new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .get(); @@ -1318,7 +1339,7 @@ public void testWrappedWithConstantScore() throws Exception { new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("_doc", SortOrder.ASC) @@ -1331,7 +1352,7 @@ public void testWrappedWithConstantScore() throws Exception { new PercolateQueryBuilder( "q", BytesReference.bytes(jsonBuilder().startObject().field("d", "2020-02-01T15:00:00.000+11:00").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQuery.java index 86d30f009e709..99907b48d4fdd 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQuery.java @@ -32,8 +32,9 @@ package org.opensearch.percolator; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -45,13 +46,11 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; +import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; import java.util.List; @@ -89,8 +88,8 @@ final class PercolateQuery extends Query implements Accountable { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = candidateMatchesQuery.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = candidateMatchesQuery.rewrite(searcher); if (rewritten != candidateMatchesQuery) { return new PercolateQuery( name, diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java index d5b61d5c5a517..6933bfbef4666 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java @@ -57,24 +57,28 @@ import org.opensearch.OpenSearchException; import org.opensearch.ResourceNotFoundException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.get.GetRequest; import org.opensearch.common.SetOnce; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.ParseField; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.analysis.FieldNameAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -90,8 +94,6 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; import org.opensearch.index.query.Rewriteable; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -123,7 +125,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu private final String field; private String name; private final List<BytesReference> documents; - private final XContentType documentXContentType; + private final MediaType documentXContentType; private final String indexedDocumentIndex; private final String indexedDocumentId; @@ -137,10 +139,10 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu * * @param field The field that contains the percolator query * @param document The binary blob containing document to percolate - * @param documentXContentType The content type of the binary blob containing the document to percolate + * @param documentMediaType The content type of the binary blob containing the document to percolate */ - public PercolateQueryBuilder(String field, BytesReference document, XContentType documentXContentType) { - this(field, Collections.singletonList(document), documentXContentType); + public PercolateQueryBuilder(String field, BytesReference document, MediaType documentMediaType) { + this(field, Collections.singletonList(document), documentMediaType); } /** @@ -150,7 +152,7 @@ public PercolateQueryBuilder(String field, BytesReference document, XContentType * @param documents The binary blob containing document to percolate * @param documentXContentType The content type of the binary blob containing the document to percolate */ - public PercolateQueryBuilder(String field, List<BytesReference> documents, XContentType documentXContentType) { + public PercolateQueryBuilder(String field, List<BytesReference> documents, MediaType documentXContentType) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } @@ -252,7 +254,11 @@ protected PercolateQueryBuilder(String field, Supplier<BytesReference> documentS } documents = in.readList(StreamInput::readBytesReference); if (documents.isEmpty() == false) { - documentXContentType = in.readEnum(XContentType.class); + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + documentXContentType = in.readMediaType(); + } else { + documentXContentType = in.readEnum(XContentType.class); + } } else { documentXContentType = null; } @@ -298,7 +304,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBytesReference(document); } if (documents.isEmpty() == false) { - out.writeEnum(documentXContentType); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + documentXContentType.writeTo(out); + } else { + out.writeEnum((XContentType) documentXContentType); + } } } @@ -359,9 +369,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep if (indexedDocId != null) { return new PercolateQueryBuilder(field, indexedDocIndex, indexedDocId, indexDocRouting, indexDocPreference, indexedDocVersion); } else if (document != null) { - return new PercolateQueryBuilder(field, Collections.singletonList(document), XContentType.JSON); + return new PercolateQueryBuilder(field, Collections.singletonList(document), MediaTypeRegistry.JSON); } else { - return new PercolateQueryBuilder(field, documents, XContentType.JSON); + return new PercolateQueryBuilder(field, documents, MediaTypeRegistry.JSON); } }); static { @@ -432,7 +442,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { PercolateQueryBuilder rewritten = new PercolateQueryBuilder( field, Collections.singletonList(source), - XContentHelper.xContentType(source) + MediaTypeRegistry.xContentType(source) ); if (name != null) { rewritten.setName(name); @@ -560,7 +570,7 @@ public List<BytesReference> getDocuments() { } // pkg-private for testing - XContentType getXContentType() { + MediaType getXContentType() { return documentXContentType; } diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java index a2fb9e56fd9f0..e30ce218ed5ff 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorFieldMapper.java @@ -56,14 +56,14 @@ import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.hash.MurmurHash3; -import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.BinaryFieldMapper; diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java index 91c0d2fad44b4..78e5980e88d5c 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -36,8 +36,8 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index a157a20f5f2c4..0884d534849da 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -57,8 +57,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; import static org.opensearch.percolator.PercolatorHighlightSubFetchPhase.locatePercolatorQuery; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * Adds a special field to a percolator query hit to indicate which documents matched with the percolator query. diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java index 3b953fcfe65e1..143b0942deb75 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/CandidateQueryTests.java @@ -94,14 +94,13 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedFunction; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DocumentMapper; @@ -112,9 +111,9 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.test.VersionUtils; import org.junit.After; import org.junit.Before; -import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -163,51 +162,49 @@ public void init() throws Exception { indexService = createIndex(indexName, Settings.EMPTY); mapperService = indexService.mapperService(); - String mapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("int_field") - .field("type", "integer") - .endObject() - .startObject("long_field") - .field("type", "long") - .endObject() - .startObject("half_float_field") - .field("type", "half_float") - .endObject() - .startObject("float_field") - .field("type", "float") - .endObject() - .startObject("double_field") - .field("type", "double") - .endObject() - .startObject("ip_field") - .field("type", "ip") - .endObject() - .startObject("field") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapper = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("int_field") + .field("type", "integer") + .endObject() + .startObject("long_field") + .field("type", "long") + .endObject() + .startObject("half_float_field") + .field("type", "half_float") + .endObject() + .startObject("float_field") + .field("type", "float") + .endObject() + .startObject("double_field") + .field("type", "double") + .endObject() + .startObject("ip_field") + .field("type", "ip") + .endObject() + .startObject("field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); String queryField = "query_field"; - String percolatorMapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject(queryField) - .field("type", "percolator") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String percolatorMapper = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject(queryField) + .field("type", "percolator") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper().mappers().getMapper(queryField); fieldType = (PercolatorFieldMapper.PercolatorFieldType) fieldMapper.fieldType(); @@ -1275,7 +1272,7 @@ private CustomQuery(Term term) { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { return new TermQuery(term); } diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java index 34ed195cd0f23..c3f16a2903700 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateQueryBuilderTests.java @@ -38,15 +38,14 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilder; @@ -109,15 +108,13 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge( docType, new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping(queryField, "type=percolator", aliasField, "type=alias,path=" + queryField) - ) + PutMappingRequest.simpleMapping(queryField, "type=percolator", aliasField, "type=alias,path=" + queryField).toString() ), MapperService.MergeReason.MAPPING_UPDATE ); mapperService.merge( docType, - new CompressedXContent(Strings.toString(PutMappingRequest.simpleMapping(TEXT_FIELD_NAME, "type=text"))), + new CompressedXContent(PutMappingRequest.simpleMapping(TEXT_FIELD_NAME, "type=text").toString()), MapperService.MergeReason.MAPPING_UPDATE ); } @@ -155,7 +152,7 @@ private PercolateQueryBuilder doCreateTestQueryBuilder(boolean indexedDocument) indexedDocumentVersion ); } else { - queryBuilder = new PercolateQueryBuilder(queryField, documentSource, XContentType.JSON); + queryBuilder = new PercolateQueryBuilder(queryField, documentSource, MediaTypeRegistry.JSON); } if (randomBoolean()) { queryBuilder.setName(randomAlphaOfLength(4)); @@ -224,7 +221,7 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> pqb.toQuery(createShardContext())); assertThat(e.getMessage(), equalTo("query builder must be rewritten first")); QueryBuilder rewrite = rewriteAndFetch(pqb, createShardContext()); - PercolateQueryBuilder geoShapeQueryBuilder = new PercolateQueryBuilder(pqb.getField(), documentSource, XContentType.JSON); + PercolateQueryBuilder geoShapeQueryBuilder = new PercolateQueryBuilder(pqb.getField(), documentSource, MediaTypeRegistry.JSON); assertEquals(geoShapeQueryBuilder, rewrite); } @@ -246,13 +243,13 @@ protected Set<String> getObjectsHoldingArbitraryContent() { public void testRequiredParameters() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - new PercolateQueryBuilder(null, new BytesArray("{}"), XContentType.JSON); + new PercolateQueryBuilder(null, new BytesArray("{}"), MediaTypeRegistry.JSON); }); assertThat(e.getMessage(), equalTo("[field] is a required argument")); e = expectThrows( IllegalArgumentException.class, - () -> new PercolateQueryBuilder("_field", (List<BytesReference>) null, XContentType.JSON) + () -> new PercolateQueryBuilder("_field", (List<BytesReference>) null, MediaTypeRegistry.JSON) ); assertThat(e.getMessage(), equalTo("[document] is a required argument")); diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateWithNestedQueryBuilderTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateWithNestedQueryBuilderTests.java index 748b79d70af07..a5682928863cc 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolateWithNestedQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolateWithNestedQueryBuilderTests.java @@ -33,10 +33,9 @@ package org.opensearch.percolator; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -50,7 +49,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws super.initializeAdditionalMappings(mapperService); mapperService.merge( "_doc", - new CompressedXContent(Strings.toString(PutMappingRequest.simpleMapping("some_nested_object", "type=nested"))), + new CompressedXContent(PutMappingRequest.simpleMapping("some_nested_object", "type=nested").toString()), MapperService.MergeReason.MAPPING_UPDATE ); } @@ -58,7 +57,11 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws public void testDetectsNestedDocuments() throws IOException { QueryShardContext shardContext = createShardContext(); - PercolateQueryBuilder builder = new PercolateQueryBuilder(queryField, new BytesArray("{ \"foo\": \"bar\" }"), XContentType.JSON); + PercolateQueryBuilder builder = new PercolateQueryBuilder( + queryField, + new BytesArray("{ \"foo\": \"bar\" }"), + MediaTypeRegistry.JSON + ); QueryBuilder rewrittenBuilder = rewriteAndFetch(builder, shardContext); PercolateQuery query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext); assertFalse(query.excludesNestedDocs()); @@ -66,7 +69,7 @@ public void testDetectsNestedDocuments() throws IOException { builder = new PercolateQueryBuilder( queryField, new BytesArray("{ \"foo\": \"bar\", \"some_nested_object\": [ { \"baz\": 42 } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ); rewrittenBuilder = rewriteAndFetch(builder, shardContext); query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext); diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java index 77a4718b1d755..ea04f21be4cee 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorFieldMapperTests.java @@ -56,21 +56,20 @@ import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.hash.MurmurHash3; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DocumentMapper; @@ -101,8 +100,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.Before; import java.io.ByteArrayInputStream; @@ -163,67 +162,65 @@ public void init() throws Exception { indexService = createIndex("test"); mapperService = indexService.mapperService(); - String mapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .startObject("field1") - .field("type", "text") - .endObject() - .startObject("field2") - .field("type", "text") - .endObject() - .startObject("_field3") - .field("type", "text") - .endObject() - .startObject("field4") - .field("type", "text") - .endObject() - .startObject("number_field1") - .field("type", "integer") - .endObject() - .startObject("number_field2") - .field("type", "long") - .endObject() - .startObject("number_field3") - .field("type", "long") - .endObject() - .startObject("number_field4") - .field("type", "half_float") - .endObject() - .startObject("number_field5") - .field("type", "float") - .endObject() - .startObject("number_field6") - .field("type", "double") - .endObject() - .startObject("number_field7") - .field("type", "ip") - .endObject() - .startObject("date_field") - .field("type", "date") - .endObject() - .endObject() - .endObject() - ); + String mapper = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .endObject() + .startObject("field1") + .field("type", "text") + .endObject() + .startObject("field2") + .field("type", "text") + .endObject() + .startObject("_field3") + .field("type", "text") + .endObject() + .startObject("field4") + .field("type", "text") + .endObject() + .startObject("number_field1") + .field("type", "integer") + .endObject() + .startObject("number_field2") + .field("type", "long") + .endObject() + .startObject("number_field3") + .field("type", "long") + .endObject() + .startObject("number_field4") + .field("type", "half_float") + .endObject() + .startObject("number_field5") + .field("type", "float") + .endObject() + .startObject("number_field6") + .field("type", "double") + .endObject() + .startObject("number_field7") + .field("type", "ip") + .endObject() + .startObject("date_field") + .field("type", "date") + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE); } private void addQueryFieldMappings() throws Exception { fieldName = randomAlphaOfLength(4); - String percolatorMapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(fieldName) - .field("type", "percolator") - .endObject() - .endObject() - .endObject() - ); + String percolatorMapper = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(fieldName) + .field("type", "percolator") + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge( MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(percolatorMapper), @@ -560,7 +557,7 @@ public void testPercolatorFieldMapper() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -580,7 +577,7 @@ public void testPercolatorFieldMapper() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name()).length, equalTo(1)); @@ -597,7 +594,7 @@ public void testPercolatorFieldMapper() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name()).length, equalTo(1)); @@ -625,7 +622,7 @@ public void testStoringQueries() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, query).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); @@ -643,7 +640,7 @@ public void testQueryWithRewrite() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, queryBuilder).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); @@ -669,7 +666,7 @@ public void testPercolatorFieldMapperUnMappedField() throws Exception { BytesReference.bytes( XContentFactory.jsonBuilder().startObject().field(fieldName, termQuery("unmapped_field", "value")).endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); @@ -685,7 +682,7 @@ public void testPercolatorFieldMapper_noQuery() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields(fieldType.queryBuilderField.name()).length, equalTo(0)); @@ -697,7 +694,7 @@ public void testPercolatorFieldMapper_noQuery() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField(fieldName).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); } catch (MapperParsingException e) { @@ -710,17 +707,16 @@ public void testAllowNoAdditionalSettings() throws Exception { IndexService indexService = createIndex("test1", Settings.EMPTY); MapperService mapperService = indexService.mapperService(); - String percolatorMapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(fieldName) - .field("type", "percolator") - .field("index", "no") - .endObject() - .endObject() - .endObject() - ); + String percolatorMapper = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(fieldName) + .field("type", "percolator") + .field("index", "no") + .endObject() + .endObject() + .endObject() + .toString(); MapperParsingException e = expectThrows( MapperParsingException.class, () -> mapperService.merge( @@ -735,21 +731,20 @@ public void testAllowNoAdditionalSettings() throws Exception { // multiple percolator fields are allowed in the mapping, but only one field can be used at index time. public void testMultiplePercolatorFields() throws Exception { String typeName = MapperService.SINGLE_MAPPING_NAME; - String percolatorMapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(typeName) - .startObject("properties") - .startObject("query_field1") - .field("type", "percolator") - .endObject() - .startObject("query_field2") - .field("type", "percolator") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String percolatorMapper = XContentFactory.jsonBuilder() + .startObject() + .startObject(typeName) + .startObject("properties") + .startObject("query_field1") + .field("type", "percolator") + .endObject() + .startObject("query_field2") + .field("type", "percolator") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); @@ -761,7 +756,7 @@ public void testMultiplePercolatorFields() throws Exception { BytesReference.bytes( jsonBuilder().startObject().field("query_field1", queryBuilder).field("query_field2", queryBuilder).endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields().size(), equalTo(16)); // also includes all other meta fields @@ -775,23 +770,22 @@ public void testMultiplePercolatorFields() throws Exception { // percolator field can be nested under an object field, but only one query can be specified per document public void testNestedPercolatorField() throws Exception { String typeName = MapperService.SINGLE_MAPPING_NAME; - String percolatorMapper = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(typeName) - .startObject("properties") - .startObject("object_field") - .field("type", "object") - .startObject("properties") - .startObject("query_field") - .field("type", "percolator") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String percolatorMapper = XContentFactory.jsonBuilder() + .startObject() + .startObject(typeName) + .startObject("properties") + .startObject("object_field") + .field("type", "object") + .startObject("properties") + .startObject("query_field") + .field("type", "percolator") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE); QueryBuilder queryBuilder = matchQuery("field", "value"); @@ -803,7 +797,7 @@ public void testNestedPercolatorField() throws Exception { BytesReference.bytes( jsonBuilder().startObject().startObject("object_field").field("query_field", queryBuilder).endObject().endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields().size(), equalTo(12)); // also includes all other meta fields @@ -828,7 +822,7 @@ public void testNestedPercolatorField() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields().size(), equalTo(12)); // also includes all other meta fields @@ -853,7 +847,7 @@ public void testNestedPercolatorField() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); @@ -907,18 +901,17 @@ private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws I } public void testEmptyName() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type1") - .startObject("properties") - .startObject("") - .field("type", "percolator") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type1") + .startObject("properties") + .startObject("") + .field("type", "percolator") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapperParser parser = mapperService.documentMapperParser(); IllegalArgumentException e = expectThrows( @@ -951,10 +944,10 @@ public void testImplicitlySetDefaultScriptLang() throws Exception { BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() - .rawField(fieldName, new BytesArray(Strings.toString(query)).streamInput(), query.contentType()) + .rawField(fieldName, new BytesArray(query.toString()).streamInput(), query.contentType()) .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); @@ -998,10 +991,10 @@ public void testImplicitlySetDefaultScriptLang() throws Exception { BytesReference.bytes( XContentFactory.jsonBuilder() .startObject() - .rawField(fieldName, new BytesArray(Strings.toString(query)).streamInput(), query.contentType()) + .rawField(fieldName, new BytesArray(query.toString()).streamInput(), query.contentType()) .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); @@ -1090,7 +1083,7 @@ public void testDuplicatedClauses() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, qb).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -1115,7 +1108,7 @@ public void testDuplicatedClauses() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, qb).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -1143,7 +1136,7 @@ public void testDuplicatedClauses() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field(fieldName, qb).endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index 08c48e430119a..18ab11864397f 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -38,17 +38,18 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.RandomScoreFunction; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.test.OpenSearchTestCase; -import org.mockito.Mockito; import java.util.Arrays; import java.util.Collections; +import org.mockito.Mockito; + import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index efa5a7a3d5095..ffb1764d7f3d6 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.MatchAllDocsQuery; @@ -46,6 +45,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.FixedBitSet; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.search.SearchHit; diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java index 7f0437a8a29aa..97e80c66e3f4e 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/PercolatorQuerySearchTests.java @@ -35,11 +35,11 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.Engine; @@ -115,7 +115,7 @@ public void testPercolateScriptQuery() throws IOException { new PercolateQueryBuilder( "query", BytesReference.bytes(jsonBuilder().startObject().field("field1", "b").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .get(); @@ -188,7 +188,7 @@ public void testPercolateQueryWithNestedDocuments_doNotLeakBitsetCacheEntries() .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .addSort("_doc", SortOrder.ASC) @@ -269,7 +269,7 @@ public void testPercolateQueryWithNestedDocuments_doLeakFieldDataCacheEntries() doc.endObject(); for (int i = 0; i < 32; i++) { SearchResponse response = client().prepareSearch() - .setQuery(new PercolateQueryBuilder("query", BytesReference.bytes(doc), XContentType.JSON)) + .setQuery(new PercolateQueryBuilder("query", BytesReference.bytes(doc), MediaTypeRegistry.JSON)) .addSort("_doc", SortOrder.ASC) .get(); assertHitCount(response, 1); @@ -293,7 +293,7 @@ public void testMapUnmappedFieldAsText() throws IOException { new PercolateQueryBuilder( "query", BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ) .get(); @@ -348,13 +348,13 @@ public void testRangeQueriesWithNow() throws Exception { BytesReference source = BytesReference.bytes( jsonBuilder().startObject().field("field1", "value").field("field2", currentTime[0]).endObject() ); - QueryBuilder queryBuilder = new PercolateQueryBuilder("query", source, XContentType.JSON); + QueryBuilder queryBuilder = new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON); Query query = queryBuilder.toQuery(queryShardContext); assertThat(searcher.count(query), equalTo(3)); currentTime[0] = currentTime[0] + 10800000; // + 3 hours source = BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", currentTime[0]).endObject()); - queryBuilder = new PercolateQueryBuilder("query", source, XContentType.JSON); + queryBuilder = new PercolateQueryBuilder("query", source, MediaTypeRegistry.JSON); query = queryBuilder.toQuery(queryShardContext); assertThat(searcher.count(query), equalTo(3)); } diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/QueryAnalyzerTests.java index 509f483bcd253..9699fb741a678 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/QueryAnalyzerTests.java @@ -51,6 +51,7 @@ import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -65,7 +66,6 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.BytesRef; diff --git a/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java index b93b0427e1f67..340d359f85523 100644 --- a/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/opensearch/percolator/QueryBuilderStoreTests.java @@ -41,12 +41,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; -import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.fielddata.plain.BytesBinaryIndexFieldData; import org.opensearch.index.mapper.BinaryFieldMapper; @@ -63,6 +62,8 @@ import java.io.IOException; import java.util.Collections; +import org.mockito.Mockito; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index 6eb974c77a5f3..488c2e33648e7 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -32,10 +32,13 @@ package org.opensearch.index.rankeval; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -43,7 +46,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -54,15 +57,28 @@ import java.util.Set; import static org.opensearch.index.rankeval.EvaluationMetric.filterUnratedDocuments; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -public class RankEvalRequestIT extends OpenSearchIntegTestCase { +public class RankEvalRequestIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; + public RankEvalRequestIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(RankEvalModulePlugin.class); diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/PrecisionAtK.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/PrecisionAtK.java index a10b957f57a6b..531db06ef3cfb 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/PrecisionAtK.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/PrecisionAtK.java @@ -40,13 +40,13 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchHit; +import javax.naming.directory.SearchResult; + import java.io.IOException; import java.util.List; import java.util.Objects; import java.util.OptionalInt; -import javax.naming.directory.SearchResult; - import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.index.rankeval.EvaluationMetric.joinHitsWithRatings; diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java index 3dbc07e5cf342..cfabb582e2003 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalModulePlugin.java @@ -33,14 +33,14 @@ package org.opensearch.index.rankeval; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry.Entry; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java index ba640a96d2642..8767c6766387d 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequest.java @@ -38,9 +38,9 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Arrays; diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequestBuilder.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequestBuilder.java index 84fc45527ec27..f3fdb486bd070 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequestBuilder.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.index.rankeval; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; public class RankEvalRequestBuilder extends ActionRequestBuilder<RankEvalRequest, RankEvalResponse> { diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalResponse.java index 4dbc348fe458e..880032ede01a2 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalResponse.java @@ -33,18 +33,18 @@ package org.opensearch.index.rankeval; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Collections; @@ -106,7 +106,7 @@ public Map<String, Exception> getFailures() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalSpec.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalSpec.java index 9585e79e69cf1..229782b5fb5d1 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalSpec.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RankEvalSpec.java @@ -34,16 +34,16 @@ import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.Script; import java.io.IOException; @@ -250,7 +250,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedDocument.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedDocument.java index 02ac9182c4f35..afb454025dd01 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedDocument.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedDocument.java @@ -32,13 +32,13 @@ package org.opensearch.index.rankeval; +import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -129,7 +129,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return org.opensearch.common.Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedRequest.java index c5f899cbefdf6..bb05da0682aed 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RatedRequest.java @@ -33,13 +33,13 @@ package org.opensearch.index.rankeval; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -341,7 +341,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RecallAtK.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RecallAtK.java index fdbdead7575a7..90c1f4951269e 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RecallAtK.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/RecallAtK.java @@ -40,13 +40,13 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchHit; +import javax.naming.directory.SearchResult; + import java.io.IOException; import java.util.List; import java.util.Objects; import java.util.OptionalInt; -import javax.naming.directory.SearchResult; - import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.index.rankeval.EvaluationMetric.joinHitsWithRatings; diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java index 688a0787c95b8..8e72c6ef06849 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java @@ -32,7 +32,6 @@ package org.opensearch.index.rankeval; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.MultiSearchResponse.Item; @@ -40,13 +39,14 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.Client; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; import org.opensearch.script.TemplateScript; @@ -71,10 +71,10 @@ * supplied query parameters) against a set of possible search requests (read: * search specifications, expressed as query/search request templates) and * compares the result against a set of annotated documents per search intent. - * + * <p> * If any documents are returned that haven't been annotated the document id of * those is returned per search intent. - * + * <p> * The resulting search quality is computed in terms of precision at n and * returned for each search specification for the full set of search intents as * averaged precision at n. @@ -126,7 +126,7 @@ protected void doExecute(Task task, RankEvalRequest request, ActionListener<Rank namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { evaluationRequest = SearchSourceBuilder.fromXContent(subParser, false); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index ea001de0ee7c6..d96e3212e05a2 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -33,17 +33,17 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -67,7 +67,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { /** * Assuming the docs are ranked in the following order: - * + * <p> * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 | 7.0 |  @@ -76,7 +76,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { * 4 | 0 | 0.0 | 2.321928094887362 | 0.0 * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | 2 | 3.0 | 2.807354922057604 | 1.0686215613240666 - * + * <p> * dcg = 13.84826362927298 (sum of last column) */ public void testDCGAt() { @@ -91,20 +91,20 @@ public void testDCGAt() { DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); assertEquals(EXPECTED_DCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 - * 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 - * 6 | 0 | 0.0 | 2.807354922057604  | 0.0 - * - * idcg = 14.595390756454922 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 + 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 + 6 | 0 | 0.0 | 2.807354922057604  | 0.0 + + idcg = 14.595390756454922 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(EXPECTED_NDCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -113,7 +113,7 @@ public void testDCGAt() { /** * This tests metric when some documents in the search result don't have a * rating provided by the user. - * + * <p> * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -122,7 +122,7 @@ public void testDCGAt() { * 4 | n/a | n/a | n/a | n/a * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + * <p> * dcg = 12.779642067948913 (sum of last column) */ public void testDCGAtSixMissingRatings() { @@ -143,20 +143,20 @@ public void testDCGAtSixMissingRatings() { assertEquals(12.779642067948913, result.metricScore(), DELTA); assertEquals(2, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * ---------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + ---------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.779642067948913 / 13.347184833073591, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -166,7 +166,7 @@ public void testDCGAtSixMissingRatings() { * This tests that normalization works as expected when there are more rated * documents than search hits because we restrict DCG to be calculated at the * fourth position - * + * <p> * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -176,7 +176,7 @@ public void testDCGAtSixMissingRatings() { * ----------------------------------------------------------------- * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + * <p> * dcg = 12.392789260714371 (sum of last column until position 4) */ public void testDCGAtFourMoreRatings() { @@ -200,21 +200,21 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371, result.metricScore(), DELTA); assertEquals(1, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * --------------------------------------------------------------------------------------- - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + --------------------------------------------------------------------------------------- + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).metricScore(), DELTA); @@ -274,7 +274,7 @@ public static DiscountedCumulativeGain createTestItem() { public void testXContentRoundtrip() throws IOException { DiscountedCumulativeGain testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); @@ -323,12 +323,12 @@ public void testMetricDetails() { + ",\"unrated_docs\":" + unratedDocs + "}}", - Strings.toString(XContentType.JSON, detail) + Strings.toString(MediaTypeRegistry.JSON, detail) ); } else { assertEquals( "{\"dcg\":{\"dcg\":" + dcg + ",\"unrated_docs\":" + unratedDocs + "}}", - Strings.toString(XContentType.JSON, detail) + Strings.toString(MediaTypeRegistry.JSON, detail) ); } } diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java index 2b99e41b8267c..935083a66b7fe 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/EvalQueryQualityTests.java @@ -33,13 +33,13 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -48,7 +48,7 @@ import java.util.List; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.opensearch.test.XContentTestUtils.insertRandomFields; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java index 014f52faa9d57..d10c6c285e0d7 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/ExpectedReciprocalRankTests.java @@ -33,16 +33,16 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -166,7 +166,7 @@ public static ExpectedReciprocalRank createTestItem() { public void testXContentRoundtrip() throws IOException { ExpectedReciprocalRank testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java index 3df79acfa6ce1..2304cf74d307f 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/MeanReciprocalRankTests.java @@ -33,16 +33,16 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -183,7 +183,7 @@ public void testNoResults() throws Exception { public void testXContentRoundtrip() throws IOException { MeanReciprocalRank testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java index 3317a2d2f00f1..16e74d928c2b4 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/PrecisionAtKTests.java @@ -33,16 +33,16 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -199,7 +199,7 @@ public static PrecisionAtK createTestItem() { public void testXContentRoundtrip() throws IOException { PrecisionAtK testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java index a13b1d0511e68..283542a07ba07 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalRequestTests.java @@ -34,9 +34,9 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable.Reader; -import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.test.AbstractWireSerializingTestCase; import org.junit.AfterClass; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java index 7c0590566bba9..db792130ca016 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalResponseTests.java @@ -38,19 +38,19 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchParseException; import org.opensearch.search.SearchShardTarget; @@ -67,7 +67,7 @@ import java.util.function.Predicate; import static java.util.Collections.singleton; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.TestSearchContext.SHARD_TARGET; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -177,7 +177,7 @@ public void testToXContent() throws IOException { Collections.singletonMap("coffee_query", coffeeQueryQuality), Collections.singletonMap("beer_query", new ParsingException(new XContentLocation(0, 0), "someMsg")) ); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); String xContent = BytesReference.bytes(response.toXContent(builder, ToXContent.EMPTY_PARAMS)).utf8ToString(); assertEquals( ("{" diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java index c42c0722e0fae..7457e26aacdf0 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RankEvalSpecTests.java @@ -32,16 +32,15 @@ package org.opensearch.index.rankeval; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.rankeval.RankEvalSpec.ScriptWithId; @@ -103,7 +102,7 @@ static RankEvalSpec createTestItem() { builder.startObject(); builder.field("field", randomAlphaOfLengthBetween(1, 5)); builder.endObject(); - script = Strings.toString(builder); + script = builder.toString(); } catch (IOException e) { // this shouldn't happen in tests, re-throw just not to swallow it throw new RuntimeException(e); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java index 988784b6e57a3..390412674e05e 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedDocumentTests.java @@ -32,14 +32,14 @@ package org.opensearch.index.rankeval; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -57,7 +57,7 @@ public static RatedDocument createRatedDocument() { public void testXContentParsing() throws IOException { RatedDocument testItem = createRatedDocument(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { RatedDocument parsedItem = RatedDocument.fromXContent(itemParser); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedRequestsTests.java index ac2401f30e6f0..588be3d94c2c6 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedRequestsTests.java @@ -32,17 +32,17 @@ package org.opensearch.index.rankeval; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.SearchModule; @@ -134,7 +134,7 @@ public static RatedRequest createTestItem(boolean forceRequest) { public void testXContentRoundtrip() throws IOException { RatedRequest testItem = createTestItem(randomBoolean()); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java index 791d3d03cd939..69cf329401918 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RatedSearchHitTests.java @@ -32,11 +32,11 @@ package org.opensearch.index.rankeval; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchTestCase; diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java index 85e024f6bb1e9..e89d0bcb20eef 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/RecallAtKTests.java @@ -33,16 +33,16 @@ package org.opensearch.index.rankeval; import org.opensearch.action.OriginalIndices; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -185,7 +185,7 @@ public static RecallAtK createTestItem() { public void testXContentRoundtrip() throws IOException { RecallAtK testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); XContentBuilder shuffled = shuffleXContent(testItem.toXContent(builder, ToXContent.EMPTY_PARAMS)); try (XContentParser itemParser = createParser(shuffled)) { itemParser.nextToken(); diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/TransportRankEvalActionTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/TransportRankEvalActionTests.java index 6fc491058af86..bd42289015bf4 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/TransportRankEvalActionTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/TransportRankEvalActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.rankeval; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchType; @@ -40,6 +39,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.script.ScriptService; diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java index 3a19be57cf731..40d888f4c6346 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/client/documentation/ReindexDocumentationIT.java @@ -32,10 +32,12 @@ package org.opensearch.client.documentation; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.client.Client; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.IndexModule; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; @@ -53,12 +55,10 @@ import org.opensearch.index.reindex.UpdateByQueryAction; import org.opensearch.index.reindex.UpdateByQueryRequestBuilder; import org.opensearch.index.shard.IndexingOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.sort.SortOrder; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java deleted file mode 100644 index 87f3c68d8af76..0000000000000 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Segment; -import org.opensearch.index.reindex.BulkByScrollResponse; -import org.opensearch.index.reindex.ReindexAction; -import org.opensearch.index.reindex.ReindexRequestBuilder; -import org.opensearch.index.reindex.ReindexTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - -public class MultiCodecReindexIT extends ReindexTestCase { - - public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { - internalCluster().ensureAtLeastNumDataNodes(1); - Map<String, String> codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", - "default", - "BEST_SPEED" - ); - - for (Map.Entry<String, String> codec : codecMap.entrySet()) { - assertReindexingWithMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void assertReindexingWithMultipleCodecs(String destCodec, String destCodecMode, Map<String, String> codecMap) - throws ExecutionException, InterruptedException { - - final String index = "test-index" + destCodec; - final String destIndex = "dest-index" + destCodec; - - // creating source index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - - final int nbDocs = randomIntBetween(2, 5); - - // indexing with all 4 codecs - for (Map.Entry<String, String> codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index, nbDocs); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // creating destination index with destination codec - createIndex( - destIndex, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", destCodec) - .build() - ); - - BulkByScrollResponse bulkResponse = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source(index) - .destination(destIndex) - .refresh(true) - .waitForActiveShards(ActiveShardCount.ONE) - .get(); - - assertEquals(codecMap.size() * nbDocs, bulkResponse.getCreated()); - assertEquals(codecMap.size() * nbDocs, bulkResponse.getTotal()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - // flush - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - - // refresh - RefreshResponse refreshResponse = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(refreshResponse); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - - private void ingestDocs(String index, int nbDocs) throws InterruptedException { - - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - flushAndRefreshIndex(index); - } - - private ArrayList<Segment> getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - -} diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 6170c1adabbea..6ed486fbdb33b 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -32,12 +32,9 @@ package org.opensearch.index.reindex; -import java.util.Optional; - import org.apache.hc.core5.http.HttpRequestInterceptor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.refresh.RefreshRequest; @@ -53,9 +50,10 @@ import org.opensearch.action.support.TransportAction; import org.opensearch.client.ParentTaskAssigningClient; import org.opensearch.common.Nullable; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; @@ -80,6 +78,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -91,8 +90,8 @@ import static java.util.Collections.unmodifiableList; import static org.opensearch.action.bulk.BackoffPolicy.exponentialBackoff; import static org.opensearch.common.unit.TimeValue.timeValueNanos; -import static org.opensearch.index.reindex.AbstractBulkByScrollRequest.MAX_DOCS_ALL_MATCHES; import static org.opensearch.core.rest.RestStatus.CONFLICT; +import static org.opensearch.index.reindex.AbstractBulkByScrollRequest.MAX_DOCS_ALL_MATCHES; import static org.opensearch.search.sort.SortBuilders.fieldSort; /** @@ -207,7 +206,7 @@ public abstract class AbstractAsyncBulkByScrollAction< /** * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. - * + * <p> * Public for testings.... */ public BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> buildScriptApplier() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractBulkByQueryRestHandler.java index 6f36fafd852ef..96e15a3899383 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractBulkByQueryRestHandler.java @@ -34,11 +34,11 @@ import org.opensearch.action.ActionType; import org.opensearch.action.search.SearchRequest; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.search.RestSearchAction; @@ -106,7 +106,7 @@ private XContentParser extractRequestSpecificFields(RestRequest restRequest, Map } try ( XContentParser parser = restRequest.contentOrSourceParamParser(); - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()) + XContentBuilder builder = MediaTypeRegistry.contentBuilder(parser.contentType()) ) { Map<String, Object> body = parser.map(); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java index 1a9ce16acc255..fefd7fd280082 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AsyncDeleteByQueryAction.java @@ -33,9 +33,9 @@ package org.opensearch.index.reindex; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.client.ParentTaskAssigningClient; +import org.opensearch.core.action.ActionListener; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java index fcdee900b4ca7..7534de1408bcc 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -32,18 +32,18 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.slice.SliceBuilder; -import org.opensearch.tasks.TaskId; import java.util.Arrays; import java.util.Collections; @@ -63,14 +63,14 @@ private BulkByScrollParallelizationHelper() {} /** * Takes an action created by a {@link BulkByScrollTask} and runs it with regard to whether the request is sliced or not. - * + * <p> * If the request is not sliced (i.e. the number of slices is 1), the worker action in the given {@link Runnable} will be started on * the local node. If the request is sliced (i.e. the number of slices is more than 1), then a subrequest will be created for each * slice and sent. - * + * <p> * If slices are set as {@code "auto"}, this class will resolve that to a specific number based on characteristics of the source * indices. A request with {@code "auto"} slices may end up being sliced or unsliced. - * + * <p> * This method is equivalent to calling {@link #initTaskState} followed by {@link #executeSlicedAction} */ static <Request extends AbstractBulkByScrollRequest<Request>> void startSlicedAction( @@ -98,11 +98,11 @@ public void onFailure(Exception e) { /** * Takes an action and a {@link BulkByScrollTask} and runs it with regard to whether this task is a * leader or worker. - * + * <p> * If this task is a worker, the worker action in the given {@link Runnable} will be started on the local * node. If the task is a leader (i.e. the number of slices is more than 1), then a subrequest will be * created for each slice and sent. - * + * <p> * This method can only be called after the task state is initialized {@link #initTaskState}. */ static <Request extends AbstractBulkByScrollRequest<Request>> void executeSlicedAction( @@ -125,7 +125,7 @@ static <Request extends AbstractBulkByScrollRequest<Request>> void executeSliced /** * Takes a {@link BulkByScrollTask} and ensures that its initial task state (leader or worker) is set. - * + * <p> * If slices are set as {@code "auto"}, this method will resolve that to a specific number based on * characteristics of the source indices. A request with {@code "auto"} slices may end up being sliced or * unsliced. This method does not execute the action. In order to execute the action see diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkIndexByScrollResponseContentListener.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkIndexByScrollResponseContentListener.java index 1ed61c200ce28..b9367d352fdb1 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkIndexByScrollResponseContentListener.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkIndexByScrollResponseContentListener.java @@ -33,13 +33,13 @@ package org.opensearch.index.reindex; import org.opensearch.action.bulk.BulkItemResponse.Failure; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.util.Map; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java index d89e3d677a2a5..aa48da4cb2421 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java @@ -32,28 +32,26 @@ package org.opensearch.index.reindex; -import java.util.Optional; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.index.reindex.spi.RemoteReindexExtension; -import org.opensearch.plugins.ExtensiblePlugin; -import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.reindex.spi.RemoteReindexExtension; import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.ExtensiblePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -61,12 +59,14 @@ import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Supplier; import static java.util.Collections.singletonList; @@ -132,6 +132,8 @@ public List<Setting<?>> getSettings() { final List<Setting<?>> settings = new ArrayList<>(); settings.add(TransportReindexAction.REMOTE_CLUSTER_WHITELIST); settings.add(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST); + settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_INITIAL_BACKOFF); + settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_MAX_COUNT); settings.addAll(ReindexSslConfig.getSettings()); return settings; } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index 10b94efe4d868..1ac500af590bb 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -39,12 +39,12 @@ import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.ssl.SslConfiguration; import org.opensearch.common.ssl.SslConfigurationKeys; import org.opensearch.common.ssl.SslConfigurationLoader; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import org.opensearch.watcher.FileChangesListener; import org.opensearch.watcher.FileWatcher; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index cb07e593f8155..c553effc65ab5 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -32,8 +32,6 @@ package org.opensearch.index.reindex; -import java.util.Optional; - import org.apache.hc.client5.http.auth.AuthScope; import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; @@ -47,7 +45,6 @@ import org.apache.hc.core5.util.Timeout; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.bulk.BulkItemResponse; @@ -57,9 +54,11 @@ import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -80,6 +79,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -142,7 +142,8 @@ public void execute(BulkByScrollTask task, ReindexRequest request, ActionListene ParentTaskAssigningClient assigningClient = new ParentTaskAssigningClient(client, clusterService.localNode(), task); AsyncIndexBySearchAction searchAction = new AsyncIndexBySearchAction( task, - logger, + // Added prefix based logger(destination index) to distinguish multiple reindex jobs for easier debugging. + Loggers.getLogger(Reindexer.class, String.valueOf(request.getDestination().index())), assigningClient, threadPool, scriptService, diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/RestRethrottleAction.java index 89864cf56c71d..925cada3e40b5 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/RestRethrottleAction.java @@ -34,9 +34,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.tasks.TaskId; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.tasks.TaskId; import java.util.List; import java.util.function.Supplier; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportDeleteByQueryAction.java index 50dbc972061a5..299626bd7fd22 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportDeleteByQueryAction.java @@ -32,13 +32,13 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.Client; import org.opensearch.client.ParentTaskAssigningClient; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java index c84d103a2ef6f..c9a970a4118b3 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java @@ -32,8 +32,6 @@ package org.opensearch.index.reindex; -import java.util.Optional; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.HandledTransportAction; @@ -44,6 +42,8 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.reindex.spi.RemoteReindexExtension; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; @@ -51,6 +51,7 @@ import org.opensearch.transport.TransportService; import java.util.List; +import java.util.Optional; import java.util.function.Function; import static java.util.Collections.emptyList; @@ -71,11 +72,32 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques Function.identity(), Property.NodeScope ); + + public static final Setting<TimeValue> REMOTE_REINDEX_RETRY_INITIAL_BACKOFF = Setting.timeSetting( + "reindex.remote.retry.initial_backoff", + TimeValue.timeValueMillis(500), + TimeValue.timeValueMillis(50), + TimeValue.timeValueMillis(5000), + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting<Integer> REMOTE_REINDEX_RETRY_MAX_COUNT = Setting.intSetting( + "reindex.remote.retry.max_count", + 15, + 1, + 100, + Property.Dynamic, + Property.NodeScope + ); + public static Optional<RemoteReindexExtension> remoteExtension = Optional.empty(); private final ReindexValidator reindexValidator; private final Reindexer reindexer; + private final ClusterService clusterService; + @Inject public TransportReindexAction( Settings settings, @@ -92,10 +114,16 @@ public TransportReindexAction( super(ReindexAction.NAME, transportService, actionFilters, ReindexRequest::new); this.reindexValidator = new ReindexValidator(settings, clusterService, indexNameExpressionResolver, autoCreateIndex); this.reindexer = new Reindexer(clusterService, client, threadPool, scriptService, sslConfig, remoteExtension); + this.clusterService = clusterService; } @Override protected void doExecute(Task task, ReindexRequest request, ActionListener<BulkByScrollResponse> listener) { + if (request.getRemoteInfo() != null) { + request.setMaxRetries(clusterService.getClusterSettings().get(REMOTE_REINDEX_RETRY_MAX_COUNT)); + request.setRetryBackoffInitialTime(clusterService.getClusterSettings().get(REMOTE_REINDEX_RETRY_INITIAL_BACKOFF)); + } + reindexValidator.initialValidation(request); BulkByScrollTask bulkByScrollTask = (BulkByScrollTask) task; reindexer.initTask(bulkByScrollTask, request, new ActionListener<Void>() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportRethrottleAction.java index 2ee869ce2b465..21ae8fd722629 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportRethrottleAction.java @@ -33,7 +33,6 @@ package org.opensearch.index.reindex; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -42,7 +41,8 @@ import org.opensearch.client.Client; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java index 1ea9ec5fb7beb..1af9b62d72fd4 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportUpdateByQueryAction.java @@ -33,7 +33,6 @@ package org.opensearch.index.reindex; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -42,6 +41,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 511c44ae3c2b6..7bf24e4b67ec4 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -38,16 +38,15 @@ import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortBuilder; @@ -181,7 +180,7 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, } entity.endObject(); - request.setJsonEntity(Strings.toString(entity)); + request.setJsonEntity(entity.toString()); } catch (IOException e) { throw new OpenSearchException("unexpected error building entity", e); } @@ -246,7 +245,7 @@ static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) try (XContentBuilder entity = JsonXContent.contentBuilder()) { entity.startObject().field("scroll_id", scroll).endObject(); - request.setJsonEntity(Strings.toString(entity)); + request.setJsonEntity(entity.toString()); } catch (IOException e) { throw new OpenSearchException("failed to build scroll entity", e); } @@ -263,7 +262,7 @@ static Request clearScroll(String scroll, Version remoteVersion) { } try (XContentBuilder entity = JsonXContent.contentBuilder()) { entity.startObject().array("scroll_id", scroll).endObject(); - request.setJsonEntity(Strings.toString(entity)); + request.setJsonEntity(entity.toString()); } catch (IOException e) { throw new OpenSearchException("failed to build clear scroll entity", e); } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java index aaca04641d76a..981a22d4e7945 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteResponseParsers.java @@ -35,10 +35,10 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.MediaType; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index 3c305c2a8239d..accaa28283abd 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -50,22 +50,25 @@ import org.opensearch.client.ResponseListener; import org.opensearch.client.RestClient; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.reindex.RejectAwareActionListener; +import org.opensearch.index.reindex.RetryListener; import org.opensearch.index.reindex.ScrollableHitSource; -import org.opensearch.core.rest.RestStatus; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; +import java.net.ConnectException; +import java.util.Arrays; import java.util.function.BiFunction; import java.util.function.Consumer; @@ -99,21 +102,29 @@ public RemoteScrollableHitSource( @Override protected void doStart(RejectAwareActionListener<Response> searchListener) { - lookupRemoteVersion(RejectAwareActionListener.withResponseHandler(searchListener, version -> { + logger.info("Starting remote reindex for {}", Arrays.toString(searchRequest.indices())); + lookupRemoteVersion(RejectAwareActionListener.wrap(version -> { remoteVersion = version; - execute( + logger.trace("Starting initial search"); + executeWithRetries( RemoteRequestBuilders.initialSearch(searchRequest, query, remoteVersion), RESPONSE_PARSER, RejectAwareActionListener.withResponseHandler(searchListener, r -> onStartResponse(searchListener, r)) ); - })); + // Skipping searchListener::onRejection(used for retries) for remote source as we've configured retries at request(scroll) + // level. + }, searchListener::onFailure, searchListener::onFailure)); } void lookupRemoteVersion(RejectAwareActionListener<Version> listener) { + logger.trace("Checking version for remote domain"); + // We're skipping retries for the first call to remote cluster so that we fail fast & respond back immediately + // instead of retrying for longer duration. execute(new Request("GET", ""), MAIN_ACTION_PARSER, listener); } private void onStartResponse(RejectAwareActionListener<Response> searchListener, Response response) { + logger.trace("On initial search response"); if (Strings.hasLength(response.getScrollId()) && response.getHits().isEmpty()) { logger.debug("First response looks like a scan response. Jumping right to the second. scroll=[{}]", response.getScrollId()); doStartNextScroll(response.getScrollId(), timeValueMillis(0), searchListener); @@ -124,12 +135,14 @@ private void onStartResponse(RejectAwareActionListener<Response> searchListener, @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener<Response> searchListener) { + logger.trace("Starting next scroll call"); TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); - execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); + executeWithRetries(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); } @Override protected void clearScroll(String scrollId, Runnable onCompletion) { + logger.debug("Clearing the scrollID {}", scrollId); client.performRequestAsync(RemoteRequestBuilders.clearScroll(scrollId, remoteVersion), new ResponseListener() { @Override public void onSuccess(org.opensearch.client.Response response) { @@ -180,17 +193,31 @@ protected void cleanup(Runnable onCompletion) { }); } + private void executeWithRetries( + Request request, + BiFunction<XContentParser, MediaType, Response> parser, + RejectAwareActionListener<Response> childListener + ) { + execute(request, parser, new RetryListener(logger, threadPool, backoffPolicy, r -> { + logger.debug("Retrying execute request {}", request.getEndpoint()); + countSearchRetry.run(); + execute(request, parser, r); + }, childListener)); + } + private <T> void execute( Request request, BiFunction<XContentParser, MediaType, T> parser, RejectAwareActionListener<? super T> listener ) { + logger.trace("Executing http request to remote cluster {}", request.getEndpoint()); // Preserve the thread context so headers survive after the call java.util.function.Supplier<ThreadContext.StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(true); try { client.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(org.opensearch.client.Response response) { + logger.trace("Successfully got response from the remote"); // Restore the thread context to get the precious headers try (ThreadContext.StoredContext ctx = contextSupplier.get()) { assert ctx != null; // eliminates compiler warning @@ -205,7 +232,7 @@ public void onSuccess(org.opensearch.client.Response response) { } if (mediaType == null) { try { - logger.debug("Response didn't include Content-Type: " + bodyMessage(response.getEntity())); + logger.error("Response didn't include Content-Type: " + bodyMessage(response.getEntity())); throw new OpenSearchException( "Response didn't include supported Content-Type, remote is likely not an OpenSearch instance" ); @@ -237,22 +264,28 @@ public void onSuccess(org.opensearch.client.Response response) { public void onFailure(Exception e) { try (ThreadContext.StoredContext ctx = contextSupplier.get()) { assert ctx != null; // eliminates compiler warning + logger.debug("Received response failure {}", e.getMessage()); if (e instanceof ResponseException) { ResponseException re = (ResponseException) e; int statusCode = re.getResponse().getStatusLine().getStatusCode(); e = wrapExceptionToPreserveStatus(statusCode, re.getResponse().getEntity(), re); - if (RestStatus.TOO_MANY_REQUESTS.getStatus() == statusCode) { + // retry all 5xx & 429s. + if (RestStatus.TOO_MANY_REQUESTS.getStatus() == statusCode + || statusCode >= RestStatus.INTERNAL_SERVER_ERROR.getStatus()) { listener.onRejection(e); return; } + } else if (e instanceof ConnectException) { + listener.onRejection(e); + return; } else if (e instanceof ContentTooLongException) { e = new IllegalArgumentException( "Remote responded with a chunk that was too large. Use a smaller batch size.", e ); } - listener.onFailure(e); } + listener.onFailure(e); } }); } catch (Exception e) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java index 0646c9b5d8705..d716c85fc47cf 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java @@ -5,12 +5,12 @@ package org.opensearch.index.reindex.spi; -import java.util.Optional; - import org.apache.hc.core5.http.HttpRequestInterceptor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.reindex.ReindexRequest; +import java.util.Optional; + public interface ReindexRestInterceptorProvider { /** * @param request Reindex request. diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java index 22d560b19c699..f98fd00eeb85a 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/RemoteReindexExtension.java @@ -5,7 +5,7 @@ package org.opensearch.index.reindex.spi; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.ReindexModulePlugin; import org.opensearch.index.reindex.ReindexRequest; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java index 671faef6c5545..1aa1e7e13ea97 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionScriptTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.index.reindex; -import org.junit.Before; import org.opensearch.action.ActionRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; @@ -40,6 +39,7 @@ import org.opensearch.index.reindex.AbstractAsyncBulkByScrollAction.RequestWrapper; import org.opensearch.script.ScriptService; import org.opensearch.script.UpdateScript; +import org.junit.Before; import java.util.Collections; import java.util.Map; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java index deb78edf2f01e..8ddc1ff778982 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -33,13 +33,11 @@ package org.opensearch.index.reindex; import org.apache.lucene.search.TotalHits; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.DocWriteResponse.Result; @@ -71,23 +69,25 @@ import org.opensearch.client.ParentTaskAssigningClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedConsumer; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.reindex.ScrollableHitSource.Hit; import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; @@ -121,7 +121,6 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static java.util.Collections.synchronizedSet; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.opensearch.action.bulk.BackoffPolicy.constantBackoff; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueSeconds; @@ -135,6 +134,7 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class AsyncBulkByScrollActionTests extends OpenSearchTestCase { private MyMockClient client; @@ -448,7 +448,7 @@ protected AbstractAsyncBulkByScrollAction.RequestWrapper<?> buildRequest(Hit doc } }; ScrollableHitSource.BasicHit hit = new ScrollableHitSource.BasicHit("index", "id", 0); - hit.setSource(new BytesArray("{}"), XContentType.JSON); + hit.setSource(new BytesArray("{}"), MediaTypeRegistry.JSON); ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null); simulateScrollResponse(action, System.nanoTime(), 0, response); ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java index 26fcfd226371f..1ad5b385d5676 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/CancelTests.java @@ -35,22 +35,22 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.ingest.DeletePipelineRequest; +import org.opensearch.common.action.ActionFuture; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexModule; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.Engine.Operation.Origin; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.IndexingOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.ingest.IngestTestPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskInfo; import org.hamcrest.Matcher; import org.junit.Before; @@ -263,7 +263,7 @@ public void testUpdateByQueryCancel() throws Exception { + " } ]\n" + "}" ); - assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline, XContentType.JSON).get()); + assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline, MediaTypeRegistry.JSON).get()); testCancel(UpdateByQueryAction.NAME, updateByQuery().setPipeline("set-processed").source(INDEX), (response, total, modified) -> { assertThat(response, matcher().updated(modified).reasonCancelled(equalTo("by user request"))); @@ -307,7 +307,7 @@ public void testUpdateByQueryCancelWithWorkers() throws Exception { + " } ]\n" + "}" ); - assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline, XContentType.JSON).get()); + assertAcked(client().admin().cluster().preparePutPipeline("set-processed", pipeline, MediaTypeRegistry.JSON).get()); testCancel( UpdateByQueryAction.NAME, diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java index 46113971d537a..9a6eb7e7fc2ae 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ClientScrollableHitSourceTests.java @@ -33,9 +33,7 @@ package org.opensearch.index.reindex; import org.apache.lucene.search.TotalHits; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchAction; @@ -45,14 +43,16 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.client.ParentTaskAssigningClient; import org.opensearch.client.support.AbstractClient; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.internal.InternalSearchResponse; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -69,9 +69,9 @@ import java.util.stream.IntStream; import static java.util.Collections.emptyMap; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.opensearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.Matchers.instanceOf; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class ClientScrollableHitSourceTests extends OpenSearchTestCase { diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java new file mode 100644 index 0000000000000..53a0545fd2ff7 --- /dev/null +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/MultiCodecReindexTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.reindex; + +import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.MergePolicyProvider; +import org.opensearch.index.engine.Segment; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class MultiCodecReindexTests extends ReindexTestCase { + final static Map<String, String> codecMap = Map.of( + "best_compression", + "BEST_COMPRESSION", + "zlib", + "BEST_COMPRESSION", + "default", + "BEST_SPEED", + "lz4", + "BEST_SPEED" + ); + final static String[] codecChoices = codecMap.keySet().toArray(String[]::new); + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return List.of(InternalSettingsPlugin.class, ReindexModulePlugin.class); + } + + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { + for (Map.Entry<String, String> candidate : codecMap.entrySet()) { + final int nbDocs = randomIntBetween(2, 5); + + final String destCodec = candidate.getKey(); + final String destCodecMode = candidate.getValue(); + + final String index = "test-index-" + destCodec; + final String destIndex = "dest-index-" + destCodec; + + // create source index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(codecChoices)) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) + .build() + ); + ensureGreen(index); + + // index using all codecs + for (String codec : codecMap.keySet()) { + useCodec(index, codec); + ingestDocs(index, nbDocs); + } + + assertTrue( + getSegments(index).stream() + .flatMap(s -> s.getAttributes().values().stream()) + .collect(Collectors.toSet()) + .containsAll(codecMap.values()) + ); + + // create destination index with destination codec + createIndex( + destIndex, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", destCodec) + .build() + ); + ensureGreen(destIndex); + + // perform reindex + BulkByScrollResponse response = reindex().source(index) + .destination(destIndex) + .refresh(true) + .waitForActiveShards(ActiveShardCount.ONE) + .get(); + final int expectedResponseSize = codecMap.size() * nbDocs; + + // assertions + assertEquals(0, response.getNoops()); + assertEquals(1, response.getBatches()); + assertEquals(0, response.getDeleted()); + assertEquals(0, response.getVersionConflicts()); + assertEquals(0, response.getBulkFailures().size()); + assertEquals(0, response.getSearchFailures().size()); + + assertEquals(expectedResponseSize, response.getTotal()); + assertEquals(expectedResponseSize, response.getCreated()); + + assertTrue(response.getTook().getMillis() > 0); + assertTrue(getSegments(destIndex).stream().allMatch(segment -> segment.attributes.containsValue(destCodecMode))); + } + } + + private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); + + assertAcked( + client().admin() + .indices() + .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); + } + + private void ingestDocs(String index, int nbDocs) throws InterruptedException { + indexRandom( + randomBoolean(), + false, + randomBoolean(), + IntStream.range(0, nbDocs) + .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) + .collect(toList()) + ); + + flushAndRefresh(index); + } + + private ArrayList<Segment> getSegments(String index) { + return new ArrayList<>( + client().admin() + .indices() + .segments(new IndicesSegmentsRequest(index)) + .actionGet() + .getIndices() + .get(index) + .getShards() + .get(0) + .getShards()[0].getSegments() + ); + } +} diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java index 0355bfcab02de..2e14df4628283 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteBuildRestClientTests.java @@ -34,9 +34,9 @@ import org.opensearch.client.RestClient; import org.opensearch.client.RestClientBuilderTestCase; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.query.MatchAllQueryBuilder; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 97f43b9439408..0d3cf208aabfb 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -34,9 +34,7 @@ import org.opensearch.OpenSearchSecurityException; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.search.SearchAction; import org.opensearch.action.support.ActionFilter; @@ -46,12 +44,15 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; @@ -60,7 +61,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestHeaderDefinition; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchSingleNodeTestCase; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexMetadataTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexMetadataTests.java index 291325a3d8a5b..82619f1c3959b 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexMetadataTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexMetadataTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex; -import org.opensearch.index.reindex.ScrollableHitSource.Hit; import org.opensearch.action.index.IndexRequest; +import org.opensearch.index.reindex.ScrollableHitSource.Hit; /** * Index-by-search test for ttl, timestamp, and routing. diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java index 4bde5f353f09e..1123ae4623300 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRestClientSslTests.java @@ -36,15 +36,16 @@ import com.sun.net.httpserver.HttpsExchange; import com.sun.net.httpserver.HttpsParameters; import com.sun.net.httpserver.HttpsServer; + import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.RestClient; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.ssl.PemKeyConfig; import org.opensearch.common.ssl.PemTrustConfig; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.test.OpenSearchTestCase; @@ -60,6 +61,7 @@ import javax.net.ssl.TrustManager; import javax.net.ssl.X509ExtendedKeyManager; import javax.net.ssl.X509ExtendedTrustManager; + import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java index 85f0c3c24abee..1700a25330463 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexScriptTests.java @@ -35,10 +35,11 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.script.ScriptService; -import org.mockito.Mockito; import java.util.Map; +import org.mockito.Mockito; + import static org.hamcrest.Matchers.containsString; /** diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSourceTargetValidationTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSourceTargetValidationTests.java index a554c25a47f65..026017298fc5b 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSourceTargetValidationTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -44,12 +44,12 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.SystemIndices; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.indices.SystemIndices; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java index 651ccd47df7bd..19859fea86bd6 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RestReindexActionTests.java @@ -32,12 +32,12 @@ package org.opensearch.index.reindex; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; @@ -88,14 +88,14 @@ public void testPipelineQueryParameterIsError() throws IOException { public void testSetScrollTimeout() throws IOException { { FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry()); - requestBuilder.withContent(new BytesArray("{}"), XContentType.JSON); + requestBuilder.withContent(new BytesArray("{}"), MediaTypeRegistry.JSON); ReindexRequest request = action.buildRequest(requestBuilder.build(), new NamedWriteableRegistry(Collections.emptyList())); assertEquals(AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT, request.getScrollTime()); } { FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry()); requestBuilder.withParams(singletonMap("scroll", "10m")); - requestBuilder.withContent(new BytesArray("{}"), XContentType.JSON); + requestBuilder.withContent(new BytesArray("{}"), MediaTypeRegistry.JSON); ReindexRequest request = action.buildRequest(requestBuilder.build(), new NamedWriteableRegistry(Collections.emptyList())); assertEquals("10m", request.getScrollTime().toString()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RethrottleTests.java index 6bedd59515e45..c48f55a4ab08d 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RethrottleTests.java @@ -32,14 +32,14 @@ package org.opensearch.index.reindex; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionFuture; +import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.tasks.TaskId; import java.util.ArrayList; import java.util.List; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index e239018e0ce31..b2362d243d647 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.bulk.BackoffPolicy; @@ -40,9 +39,10 @@ import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.bulk.Retry; import org.opensearch.client.Client; -import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.http.HttpInfo; import org.opensearch.index.query.QueryBuilders; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java index 4508bf59d6879..1d10990dc9964 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RoundTripTests.java @@ -33,16 +33,16 @@ package org.opensearch.index.reindex; import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -50,8 +50,8 @@ import java.util.HashMap; import java.util.Map; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.opensearch.common.unit.TimeValue.parseTimeValue; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; /** * Round trip tests for all {@link Writeable} things declared in this plugin. diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java index 6456aa0af9aac..44b95244c9507 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/TransportRethrottleActionTests.java @@ -32,31 +32,32 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.client.Client; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matcher; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.function.Consumer; +import org.mockito.ArgumentCaptor; + import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.theInstance; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.atMost; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java index f203625dcfc44..b0fe9e55d3ba2 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryWhileModifyingTests.java @@ -40,10 +40,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; /** * Mutates a document while update-by-query-ing it and asserts that the mutation diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index 54e455d9e676e..ff24bd44f8ef3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -37,10 +37,10 @@ import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.Streams; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchTestCase; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index ebbd2da776ace..8aa66fc3cfd8c 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -32,31 +32,6 @@ package org.opensearch.index.reindex.remote; -import org.opensearch.LegacyESVersion; -import org.opensearch.OpenSearchStatusException; -import org.opensearch.Version; -import org.opensearch.action.bulk.BackoffPolicy; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.client.RestClient; -import org.opensearch.client.http.HttpUriRequestProducer; -import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.common.io.Streams; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.reindex.RejectAwareActionListener; -import org.opensearch.index.reindex.ScrollableHitSource; -import org.opensearch.index.reindex.ScrollableHitSource.Response; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import org.apache.hc.core5.concurrent.FutureCallback; @@ -67,9 +42,12 @@ import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.HttpEntity; import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.hc.core5.http.nio.AsyncPushConsumer; import org.apache.hc.core5.http.nio.AsyncRequestProducer; import org.apache.hc.core5.http.nio.AsyncResponseConsumer; @@ -77,12 +55,39 @@ import org.apache.hc.core5.http.protocol.HttpContext; import org.apache.hc.core5.io.CloseMode; import org.apache.hc.core5.reactor.IOReactorStatus; +import org.opensearch.LegacyESVersion; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.Version; +import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.ResponseException; +import org.opensearch.client.RestClient; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; +import org.opensearch.common.io.Streams; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.reindex.RejectAwareActionListener; +import org.opensearch.index.reindex.ScrollableHitSource; +import org.opensearch.index.reindex.ScrollableHitSource.Response; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.io.InputStreamReader; import java.io.UncheckedIOException; +import java.net.ConnectException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -90,10 +95,13 @@ import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Stream; +import org.mockito.Mockito; + import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueMinutes; import static org.hamcrest.Matchers.empty; @@ -515,7 +523,7 @@ public void testInvalidJsonThinksRemoteIsNotES() throws IOException { Exception e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall("some_text.txt").start()); assertEquals( "Error parsing the response, remote is likely not an OpenSearch instance", - e.getCause().getCause().getCause().getMessage() + e.getCause().getCause().getCause().getCause().getMessage() ); } @@ -524,7 +532,7 @@ public void testUnexpectedJsonThinksRemoteIsNotES() throws IOException { Exception e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall("main/2_3_3.json").start()); assertEquals( "Error parsing the response, remote is likely not an OpenSearch instance", - e.getCause().getCause().getCause().getMessage() + e.getCause().getCause().getCause().getCause().getMessage() ); } @@ -702,4 +710,105 @@ private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProduce assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); return ((HttpUriRequestProducer) requestProducer).getRequest(); } + + RemoteScrollableHitSource createRemoteSourceWithFailure( + boolean shouldMockRemoteVersion, + Exception failure, + AtomicInteger invocationCount + ) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier<AsyncPushConsumer> supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected <T> Future<T> doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer<T> responseConsumer, + HandlerFactory<AsyncPushConsumer> pushHandlerFactory, + HttpContext context, + FutureCallback<T> callback + ) { + invocationCount.getAndIncrement(); + callback.failed(failure); + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + return sourceWithMockedClient(shouldMockRemoteVersion, httpClient); + } + + void verifyRetries(boolean shouldMockRemoteVersion, Exception failureResponse, boolean expectedToRetry) { + retriesAllowed = 5; + AtomicInteger invocations = new AtomicInteger(); + invocations.set(0); + RemoteScrollableHitSource source = createRemoteSourceWithFailure(shouldMockRemoteVersion, failureResponse, invocations); + + Throwable e = expectThrows(RuntimeException.class, source::start); + int expectedInvocations = 0; + if (shouldMockRemoteVersion) { + expectedInvocations += 1; // first search + if (expectedToRetry) expectedInvocations += retriesAllowed; + } else { + expectedInvocations = 1; // the first should fail and not trigger any retry. + } + + assertEquals(expectedInvocations, invocations.get()); + + // Unwrap the some artifacts from the test + while (e.getMessage().equals("failed")) { + e = e.getCause(); + } + // There is an additional wrapper for ResponseException. + if (failureResponse instanceof ResponseException) { + e = e.getCause(); + } + + assertSame(failureResponse, e); + } + + ResponseException withResponseCode(int statusCode, String errorMsg) throws IOException { + org.opensearch.client.Response mockResponse = Mockito.mock(org.opensearch.client.Response.class); + Mockito.when(mockResponse.getEntity()).thenReturn(new StringEntity(errorMsg, ContentType.TEXT_PLAIN)); + Mockito.when(mockResponse.getStatusLine()).thenReturn(new StatusLine(new BasicClassicHttpResponse(statusCode, errorMsg))); + Mockito.when(mockResponse.getRequestLine()).thenReturn(new RequestLine("GET", "/", new ProtocolVersion("https", 1, 1))); + return new ResponseException(mockResponse); + } + + public void testRetryOnCallFailure() throws Exception { + // First call succeeds. Search calls failing with 5xxs and 429s should be retried but not 400s. + verifyRetries(true, withResponseCode(500, "Internal Server Error"), true); + verifyRetries(true, withResponseCode(429, "Too many requests"), true); + verifyRetries(true, withResponseCode(400, "Client Error"), false); + + // First call succeeds. Search call failed with exceptions other than ResponseException + verifyRetries(true, new ConnectException("blah"), true); // should retry connect exceptions. + verifyRetries(true, new RuntimeException("foobar"), false); + + // First call(remote version lookup) failed and no retries expected + verifyRetries(false, withResponseCode(500, "Internal Server Error"), false); + verifyRetries(false, withResponseCode(429, "Too many requests"), false); + verifyRetries(false, withResponseCode(400, "Client Error"), false); + verifyRetries(false, new ConnectException("blah"), false); + } } diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index 1bf461d67862b..9c61bca316a56 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugin.repository.url.URLRepositoryModulePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.fs.FsRepository; diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java index b13a4d5a39a5b..02e858cb8d1f2 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobContainer.java @@ -157,6 +157,7 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS throw new UnsupportedOperationException("URL repository doesn't support this operation"); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") private static InputStream getInputStream(URL url) throws IOException { try { diff --git a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java index fbfbf5e006fee..0fad0cbe21033 100644 --- a/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/opensearch/common/blobstore/url/URLBlobStore.java @@ -37,8 +37,8 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import java.net.MalformedURLException; import java.net.URL; diff --git a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java index 9e9d94c8e8fc0..4c8d8aab4532b 100644 --- a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java @@ -113,7 +113,7 @@ public URLRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/modules/repository-url/src/test/java/org/opensearch/common/blobstore/url/URLBlobStoreTests.java b/modules/repository-url/src/test/java/org/opensearch/common/blobstore/url/URLBlobStoreTests.java index 90b75f50b16ad..0b62304270052 100644 --- a/modules/repository-url/src/test/java/org/opensearch/common/blobstore/url/URLBlobStoreTests.java +++ b/modules/repository-url/src/test/java/org/opensearch/common/blobstore/url/URLBlobStoreTests.java @@ -33,6 +33,7 @@ package org.opensearch.common.blobstore.url; import com.sun.net.httpserver.HttpServer; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; diff --git a/modules/repository-url/src/test/java/org/opensearch/repositories/url/URLFixture.java b/modules/repository-url/src/test/java/org/opensearch/repositories/url/URLFixture.java index f9f2ecaca486d..b2dab1314f66d 100644 --- a/modules/repository-url/src/test/java/org/opensearch/repositories/url/URLFixture.java +++ b/modules/repository-url/src/test/java/org/opensearch/repositories/url/URLFixture.java @@ -31,9 +31,9 @@ package org.opensearch.repositories.url; -import org.opensearch.test.fixture.AbstractHttpFixture; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.rest.RestStatus; +import org.opensearch.test.fixture.AbstractHttpFixture; import java.io.IOException; import java.nio.file.Files; diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 705cbafd1bd3a..6536d474f5abc 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -34,21 +34,21 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.client.Request; import org.opensearch.client.Response; -import org.opensearch.common.Strings; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.repositories.fs.FsRepository; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; -import org.apache.hc.core5.http.ContentType; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import java.io.IOException; @@ -144,7 +144,7 @@ private static HttpEntity buildRepositorySettings(final String type, final Setti builder.endObject(); } builder.endObject(); - return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + return new StringEntity(builder.toString(), ContentType.APPLICATION_JSON); } } } diff --git a/modules/search-pipeline-common/README.md b/modules/search-pipeline-common/README.md new file mode 100644 index 0000000000000..70615d846987b --- /dev/null +++ b/modules/search-pipeline-common/README.md @@ -0,0 +1,202 @@ +- [Search Pipelines](#search-pipelines) + - [Architecture](#architecture) + - [Search Processors](#search-processors) + - [Creating a Search Processor](#creating-a-search-processor) + - [Creating a Pipeline](#creating-a-search-pipeline) + +# Search Pipelines + +This README briefly covers the two types of search processors, explains how you can use them to create search pipelines, and walks through the creation of a new processor. + +## Architecture + +Search pipelines allow cluster operators to create and reuse [components](#search-processors) to transform search queries and results. + +With search pipelines, the operator can combine multiple [search processors](#search-processors) to create a transform which acts on the search request and/or search response. + +Search pipelines offer numerous benefits: + +1. search processors living in OpenSearch can be used by _all_ calling applications; +2. search pipeline operations occur inside the OpenSearch cluster, so large results can be processed before returning to the calling application\*; +3. search processors can be distributed in plugins to be shared with other OpenSearch users; +4. search pipelines only need to be modified once (and without changing or redeploying any calling applications) to have a change occur on all incoming queries\*\*; +5. search pipelines support standard APIs for accessing metrics and disaster recovery. + +*Within a cluster, results are passed using a more efficient, but version-specific binary protocol. You can pass result information back to a coordinator, allow it to post-process (e.g. rerank or collapse), and finally truncate it before sending it to the client over the less efficient but flexible JSON API. + +**For example, the `FilterQueryRequestProcessor` could be used to exclude search results immediately, without needing to make a code change in the application layer and deploy the change across your fleet. + +## Search Processors + +You can create many search pipelines by combining search processors in various orders. There are two types of search processors: + +1. search request processors which transform a request _before_ it is executed; +2. search response processors which transform the output of a request _after_ it is executed. + +You can find all existing search processors registered in `SearchPipelineCommonModulePlugin.java` and described on the documentation website. + +### Creating a search processor + +New search processors can be created in two different ways. + +Generally, a search processor can be created in your own `SearchPipelinePlugin`. This method is best for when you are creating a unique search +processor for your niche application. This method should also be used when your processor relies on an outside service. To get started creating a search processor in a `SearchPipelinePlugin`, you can use the [plugin template](https://github.com/opensearch-project/opensearch-plugin-template-java ). + +Alternatively, if you think your processor may be valuable to _all_ OpenSearch users you can follow these steps: + +1. Create a new class in `org.opensearch.search.pipeline.common`, this class will hold your new processor and should include whether it is a request or response processor. For example, a response processor which deleted a target field could be called `DeleteFieldResponseProcessor`. + +2. Make the class extend the generic `AbstractProcessor` class as well as implement either the `SearchRequestProcessor` or `SearchResponseProcessor` class depending on what type of processor it is. In the `DeleteFieldResponseProcessor` example, this would look like: + +```public class DeleteFieldResponseProcessor extends AbstractProcessor implements SearchResponseProcessor``` + +3. Create the main functionality of your processor and implement the methods required by the implemented interface. This will be `SearchRequest processRequest(SearchRequest request) throws Exception;` for a search request processor or `SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception;` for a search response processor. + +For the example field `DeleteFieldResponseProcessor`, this will look like: + +``` +@Override +public SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception { + + boolean foundField = false; + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + + // Process each hit as desired + + if (hit.hasSource()) { + // Change hit source if needed + ); + + Map<String, Object> sourceAsMap = typeAndSourceMap.v2(); + if (sourceAsMap.containsKey(field)) { + // Handle source as map + } + } + + if (!foundField && !ignoreMissing) { + // Handle error scenarios + } + + return response; +} +``` + +4. Create a factory to parse processor-specific JSON configurations. These are used for constructing a processor instance. + +In the `DeleteFieldResponseProcessor`, this would look something like: + +``` +public static final class Factory implements Processor.Factory<SearchResponseProcessor> { + + /** + * Constructor for factory + */ + Factory() {} + + @Override + public DeleteFieldResponseProcessor create( + Map<String, Processor.Factory<SearchResponseProcessor>> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map<String, Object> config, + PipelineContext pipelineContext + ) throws Exception { + String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, "ignore_missing", false); + return new DeleteFieldResponseProcessor(tag, description, ignoreFailure, field, ignoreMissing); + } +} +``` + +In this example, we provide specific configurations for which field should be deleted and whether the processor should ignore attempts to remove a non-existent field. + +5. Add the newly added search processor to the `SearchPieplineCommonModulePlugin` getter for the corresponding processor type. + +For the `DeleteFieldResponseProcessor`, you would modify the response processor getter to have: + +``` +@Override +public Map<String, Processor.Factory<SearchResponseProcessor>> getResponseProcessors(Parameters parameters) { + return Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + DeleteFieldResponseProcessor.TYPE, + new DeleteFieldResponseProcessor.Factory() + ); +} +``` + +6. After creating a search processor, the processor is ready to be tested in a search pipeline. + +To test your new search processor, you can make use of the test [`SearchPipelineCommonYamlTestSuiteIT`](src/yamlRestTest/java/org/opensearch/search/pipeline/common). + +Following the format of the YAML files in [`rest-api-spec.test.search_pipeline`](src/yamlRestTest/resources/rest-api-spec/test/search_pipeline), you should be able to create your own YAML test file to exercise your new processor. + +To run the tests, from the root of the OpenSearch repository, you can run `./gradlew :modules:search-pipeline-common:yamlRestTest`. + +7. Finally, the processor is ready to used in a cluster. + +To use the new processor, make sure the cluster is reloaded and that the new processor is accessible. + +The new processor should show when calling `GET /_nodes/search_pipelines`. + +If the new processor is shown in the cURL response, the new processor should be available for use in a search pipeline. + +## Creating a Search Pipeline + +To create a search pipeline, you must create an ordered list of search processors in the OpenSearch cluster. + +An example creation request is: + +``` +PUT /_search/pipeline/my_pipeline +{ + "request_processors": [ + { + "filter_query" : { + "tag" : "tag1", + "description" : "This processor is going to restrict to publicly visible documents", + "query" : { + "term": { + "visibility": "public" + } + } + } + } + ], + "response_processors": [ + { + "rename_field": { + "field": "message", + "target_field": "notification" + } + } + ] +} +``` + +Alternatively, if you want to use just the `DeleteFieldResponseProcessor` created before, you would use: + +``` +PUT /_search/pipeline/my_pipeline2 + +{ + "response_processors": [ + { + "delete_field": { + "field": "message" + } + } + ] +} +``` + +## Running a search request using a search pipeline + +To run a search request using a search pipeline, you first need to create the pipeline using the request format shown above. + +After that is completed, you can run a request using the format: `POST /myindex/_search?search_pipeline=<pipeline_name>`. + +In the example of the `DeleteFieldResponseProcessor` this would be called with `POST /myindex/_search?search_pipeline=my_pipeline2`. diff --git a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java index faa0859e8e33f..b8b0798812df1 100644 --- a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java +++ b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java @@ -19,10 +19,10 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase; @@ -58,7 +58,7 @@ public void testFilterQuery() { + "]" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); AcknowledgedResponse ackRsp = client().admin().cluster().putSearchPipeline(putSearchPipelineRequest).actionGet(); assertTrue(ackRsp.isAcknowledged()); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java new file mode 100644 index 0000000000000..6ddc22420416b --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/BasicMap.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +/** + * Helper for map abstractions passed to scripting processors. Throws {@link UnsupportedOperationException} for almost + * all methods. Subclasses just need to implement get and put. + */ +abstract class BasicMap implements Map<String, Object> { + + /** + * No-args constructor. + */ + protected BasicMap() {} + + private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; + + @Override + public boolean isEmpty() { + throw new UnsupportedOperationException("isEmpty" + UNSUPPORTED_OP_ERR); + } + + public int size() { + throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); + } + + public boolean containsKey(Object key) { + return get(key) != null; + } + + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); + } + + public Object remove(Object key) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + public void putAll(Map<? extends String, ?> m) { + throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); + } + + public void clear() { + throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); + } + + public Set<String> keySet() { + throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); + } + + public Collection<Object> values() { + throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); + } + + public Set<Map.Entry<String, Object>> entrySet() { + throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object getOrDefault(Object key, Object defaultValue) { + throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); + } + + @Override + public void forEach(BiConsumer<? super String, ? super Object> action) { + throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); + } + + @Override + public void replaceAll(BiFunction<? super String, ? super Object, ?> function) { + throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object putIfAbsent(String key, Object value) { + throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean remove(Object key, Object value) { + throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); + } + + @Override + public boolean replace(String key, Object oldValue, Object newValue) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object replace(String key, Object value) { + throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfAbsent(String key, Function<? super String, ?> mappingFunction) { + throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object computeIfPresent(String key, BiFunction<? super String, ? super Object, ?> remappingFunction) { + throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object compute(String key, BiFunction<? super String, ? super Object, ?> remappingFunction) { + throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); + } + + @Override + public Object merge(String key, Object value, BiFunction<? super Object, ? super Object, ?> remappingFunction) { + throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java new file mode 100644 index 0000000000000..3e6c4fef6a559 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/CollapseResponseProcessor.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A simple implementation of field collapsing on search responses. Note that this is not going to work as well as + * field collapsing at the shard level, as implemented with the "collapse" parameter in a search request. Mostly + * just using this to demo the oversample / truncate_hits processors. + */ +public class CollapseResponseProcessor extends AbstractProcessor implements SearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "collapse"; + static final String COLLAPSE_FIELD = "field"; + private final String collapseField; + + private CollapseResponseProcessor(String tag, String description, boolean ignoreFailure, String collapseField) { + super(tag, description, ignoreFailure); + this.collapseField = Objects.requireNonNull(collapseField); + } + + @Override + public String getType() { + return TYPE; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) { + + if (response.getHits() != null) { + if (response.getHits().getCollapseField() != null) { + throw new IllegalStateException( + "Cannot collapse on " + collapseField + ". Results already collapsed on " + response.getHits().getCollapseField() + ); + } + Map<String, SearchHit> collapsedHits = new LinkedHashMap<>(); + List<Object> collapseValues = new ArrayList<>(); + for (SearchHit hit : response.getHits()) { + Object fieldValue = null; + DocumentField docField = hit.getFields().get(collapseField); + if (docField != null) { + if (docField.getValues().size() > 1) { + throw new IllegalStateException( + "Failed to collapse " + hit.getId() + ": doc has multiple values for field " + collapseField + ); + } + fieldValue = docField.getValues().get(0); + } else if (hit.getSourceAsMap() != null) { + fieldValue = hit.getSourceAsMap().get(collapseField); + } + String fieldValueString; + if (fieldValue == null) { + fieldValueString = "__missing__"; + } else { + fieldValueString = fieldValue.toString(); + } + + // Results are already sorted by sort criterion. Only keep the first hit for each field. + if (collapsedHits.containsKey(fieldValueString) == false) { + collapsedHits.put(fieldValueString, hit); + collapseValues.add(fieldValue); + } + } + SearchHit[] newHits = new SearchHit[collapsedHits.size()]; + int i = 0; + for (SearchHit collapsedHit : collapsedHits.values()) { + newHits[i++] = collapsedHit; + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + collapseField, + collapseValues.toArray() + ); + return SearchResponseUtil.replaceHits(searchHits, response); + } + return response; + } + + static class Factory implements Processor.Factory<SearchResponseProcessor> { + + @Override + public CollapseResponseProcessor create( + Map<String, Processor.Factory<SearchResponseProcessor>> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map<String, Object> config, + PipelineContext pipelineContext + ) { + String collapseField = ConfigurationUtils.readStringProperty(TYPE, tag, config, COLLAPSE_FIELD); + return new CollapseResponseProcessor(tag, description, ignoreFailure, collapseField); + } + } + +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java index eab8bc95bd668..3d04d6d5ed1e3 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java @@ -9,10 +9,10 @@ package org.opensearch.search.pipeline.common; import org.opensearch.action.search.SearchRequest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -20,8 +20,8 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; import java.io.InputStream; @@ -114,7 +114,7 @@ public FilterQueryRequestProcessor create( try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(query); InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) ) { return new FilterQueryRequestProcessor(tag, description, ignoreFailure, parseInnerQueryBuilder(parser)); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java new file mode 100644 index 0000000000000..182cf6ba79504 --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/OversampleRequestProcessor.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchService; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchRequestProcessor; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Multiplies the "size" parameter on the {@link SearchRequest} by the given scaling factor, storing the original value + * in the request context as "original_size". + */ +public class OversampleRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { + + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "oversample"; + static final String SAMPLE_FACTOR = "sample_factor"; + static final String ORIGINAL_SIZE = "original_size"; + private final double sampleFactor; + private final String contextPrefix; + + private OversampleRequestProcessor(String tag, String description, boolean ignoreFailure, double sampleFactor, String contextPrefix) { + super(tag, description, ignoreFailure); + this.sampleFactor = sampleFactor; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) { + if (request.source() != null) { + int originalSize = request.source().size(); + if (originalSize == -1) { + originalSize = SearchService.DEFAULT_SIZE; + } + requestContext.setAttribute(applyContextPrefix(contextPrefix, ORIGINAL_SIZE), originalSize); + int newSize = (int) Math.ceil(originalSize * sampleFactor); + request.source().size(newSize); + } + return request; + } + + @Override + public String getType() { + return TYPE; + } + + static class Factory implements Processor.Factory<SearchRequestProcessor> { + @Override + public OversampleRequestProcessor create( + Map<String, Processor.Factory<SearchRequestProcessor>> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map<String, Object> config, + PipelineContext pipelineContext + ) { + double sampleFactor = ConfigurationUtils.readDoubleProperty(TYPE, tag, config, SAMPLE_FACTOR); + if (sampleFactor < 1.0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, SAMPLE_FACTOR, "Value must be >= 1.0"); + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new OversampleRequestProcessor(tag, description, ignoreFailure, sampleFactor, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java index f959bccc93c20..212fe844d97a1 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java @@ -10,16 +10,16 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.search.SearchHit; -import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; import org.opensearch.search.pipeline.SearchResponseProcessor; diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java index 3849d2f905490..a4052d0892ee6 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java @@ -9,25 +9,24 @@ package org.opensearch.search.pipeline.common; import org.opensearch.action.search.SearchRequest; - import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; - import org.opensearch.script.Script; import org.opensearch.script.ScriptException; import org.opensearch.script.ScriptService; import org.opensearch.script.ScriptType; import org.opensearch.script.SearchScript; -import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.StatefulSearchRequestProcessor; import java.io.InputStream; import java.util.HashMap; @@ -40,7 +39,7 @@ * Processor that evaluates a script with a search request in its context * and then returns the modified search request. */ -public final class ScriptRequestProcessor extends AbstractProcessor implements SearchRequestProcessor { +public final class ScriptRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { /** * Key to reference this processor type from a search pipeline. */ @@ -74,15 +73,8 @@ public final class ScriptRequestProcessor extends AbstractProcessor implements S this.scriptService = scriptService; } - /** - * Executes the script with the search request in context. - * - * @param request The search request passed into the script context. - * @return The modified search request. - * @throws Exception if an error occurs while processing the request. - */ @Override - public SearchRequest processRequest(SearchRequest request) throws Exception { + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { // assert request is not null and source is not null if (request == null || request.source() == null) { throw new IllegalArgumentException("search request must not be null"); @@ -95,10 +87,33 @@ public SearchRequest processRequest(SearchRequest request) throws Exception { searchScript = precompiledSearchScript; } // execute the script with the search request in context - searchScript.execute(Map.of("_source", new SearchRequestMap(request))); + searchScript.execute(Map.of("_source", new SearchRequestMap(request), "request_context", new RequestContextMap(requestContext))); return request; } + private static class RequestContextMap extends BasicMap { + private final PipelineProcessingContext pipelinedRequestContext; + + private RequestContextMap(PipelineProcessingContext pipelinedRequestContext) { + this.pipelinedRequestContext = pipelinedRequestContext; + } + + @Override + public Object get(Object key) { + if (key instanceof String) { + return pipelinedRequestContext.getAttribute(key.toString()); + } + return null; + } + + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + pipelinedRequestContext.setAttribute(key, value); + return originalValue; + } + } + /** * Returns the type of the processor. * @@ -163,7 +178,7 @@ public ScriptRequestProcessor create( try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(scriptConfig); InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { Script script = Script.parse(parser); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 49681b80fdead..5378a6721efb2 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -38,12 +38,21 @@ public Map<String, Processor.Factory<SearchRequestProcessor>> getRequestProcesso FilterQueryRequestProcessor.TYPE, new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), ScriptRequestProcessor.TYPE, - new ScriptRequestProcessor.Factory(parameters.scriptService) + new ScriptRequestProcessor.Factory(parameters.scriptService), + OversampleRequestProcessor.TYPE, + new OversampleRequestProcessor.Factory() ); } @Override public Map<String, Processor.Factory<SearchResponseProcessor>> getResponseProcessors(Parameters parameters) { - return Map.of(RenameFieldResponseProcessor.TYPE, new RenameFieldResponseProcessor.Factory()); + return Map.of( + RenameFieldResponseProcessor.TYPE, + new RenameFieldResponseProcessor.Factory(), + TruncateHitsResponseProcessor.TYPE, + new TruncateHitsResponseProcessor.Factory(), + CollapseResponseProcessor.TYPE, + new CollapseResponseProcessor.Factory() + ); } } diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java new file mode 100644 index 0000000000000..c6430b96dcbed --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMap.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; + +import java.util.Map; + +/** + * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s + * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. + */ +class SearchRequestMap extends BasicMap implements Map<String, Object> { + + private final SearchSourceBuilder source; + + /** + * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. + * + * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. + */ + public SearchRequestMap(SearchRequest searchRequest) { + source = searchRequest.source(); + } + + /** + * Checks if the SearchSourceBuilder is empty. + * + * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. + */ + @Override + public boolean isEmpty() { + return source == null; + } + + /** + * Retrieves the value associated with the specified property from the SearchSourceBuilder. + * + * @param key The SearchSourceBuilder property whose value is to be retrieved. + * @return The value associated with the specified property or null if the property has not been initialized. + * @throws IllegalArgumentException if the property name is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported. + */ + @Override + public Object get(Object key) { + if (!(key instanceof String)) { + throw new IllegalArgumentException("key must be a String"); + } + // This is the explicit implementation of fetch value from source + switch ((String) key) { + case "from": + return source.from(); + case "size": + return source.size(); + case "explain": + return source.explain(); + case "version": + return source.version(); + case "seq_no_primary_term": + return source.seqNoAndPrimaryTerm(); + case "track_scores": + return source.trackScores(); + case "track_total_hits": + return source.trackTotalHitsUpTo(); + case "min_score": + return source.minScore(); + case "terminate_after": + return source.terminateAfter(); + case "profile": + return source.profile(); + default: + throw new SearchRequestMapProcessingException("Unsupported key: " + key); + } + } + + /** + * Sets the value for the specified property in the SearchSourceBuilder. + * + * @param key The property whose value is to be set. + * @param value The value to be set for the specified property. + * @return The original value associated with the property, or null if none existed. + * @throws IllegalArgumentException if the property is not a String. + * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. + */ + @Override + public Object put(String key, Object value) { + Object originalValue = get(key); + try { + switch (key) { + case "from": + source.from((Integer) value); + break; + case "size": + source.size((Integer) value); + break; + case "explain": + source.explain((Boolean) value); + break; + case "version": + source.version((Boolean) value); + break; + case "seq_no_primary_term": + source.seqNoAndPrimaryTerm((Boolean) value); + break; + case "track_scores": + source.trackScores((Boolean) value); + break; + case "track_total_hits": + source.trackTotalHitsUpTo((Integer) value); + break; + case "min_score": + source.minScore((Float) value); + break; + case "terminate_after": + source.terminateAfter((Integer) value); + break; + case "profile": + source.profile((Boolean) value); + break; + case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling + case "sort": + case "timeout": + case "docvalue_fields": + case "indices_boost": + default: + throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); + } + } catch (Exception e) { + throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); + } + return originalValue; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java similarity index 76% rename from modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java rename to modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java index cb1e45a20b624..2f00d0f82c2f1 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapProcessingException.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchRequestMapProcessingException.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchWrapperException; @@ -14,12 +14,12 @@ /** * An exception that indicates an error occurred while processing a {@link SearchRequestMap}. */ -public class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { +class SearchRequestMapProcessingException extends OpenSearchException implements OpenSearchWrapperException { /** * Constructs a new SearchRequestMapProcessingException with the specified message. * - * @param msg The error message. + * @param msg The error message. * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Object... args) { @@ -29,9 +29,9 @@ public SearchRequestMapProcessingException(String msg, Object... args) { /** * Constructs a new SearchRequestMapProcessingException with the specified message and cause. * - * @param msg The error message. + * @param msg The error message. * @param cause The cause of the exception. - * @param args Arguments to substitute in the error message. + * @param args Arguments to substitute in the error message. */ public SearchRequestMapProcessingException(String msg, Throwable cause, Object... args) { super(msg, cause, args); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java new file mode 100644 index 0000000000000..e3413bf41720f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessor.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.search.SearchHit; +import org.opensearch.search.pipeline.AbstractProcessor; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.Processor; +import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.search.pipeline.StatefulSearchResponseProcessor; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.search.pipeline.common.helpers.SearchResponseUtil; + +import java.util.Map; + +import static org.opensearch.search.pipeline.common.helpers.ContextUtils.applyContextPrefix; + +/** + * Truncates the returned search hits from the {@link SearchResponse}. If no target size is specified in the pipeline, then + * we try using the "original_size" value from the request context, which may have been set by {@link OversampleRequestProcessor}. + */ +public class TruncateHitsResponseProcessor extends AbstractProcessor implements StatefulSearchResponseProcessor { + /** + * Key to reference this processor type from a search pipeline. + */ + public static final String TYPE = "truncate_hits"; + static final String TARGET_SIZE = "target_size"; + private final int targetSize; + private final String contextPrefix; + + @Override + public String getType() { + return TYPE; + } + + private TruncateHitsResponseProcessor(String tag, String description, boolean ignoreFailure, int targetSize, String contextPrefix) { + super(tag, description, ignoreFailure); + this.targetSize = targetSize; + this.contextPrefix = contextPrefix; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) { + int size; + if (targetSize < 0) { // No value specified in processor config. Use context value instead. + String key = applyContextPrefix(contextPrefix, OversampleRequestProcessor.ORIGINAL_SIZE); + Object o = requestContext.getAttribute(key); + if (o == null) { + throw new IllegalStateException("Must specify " + TARGET_SIZE + " unless an earlier processor set " + key); + } + size = (int) o; + } else { + size = targetSize; + } + if (response.getHits() != null && response.getHits().getHits().length > size) { + SearchHit[] newHits = new SearchHit[size]; + System.arraycopy(response.getHits().getHits(), 0, newHits, 0, size); + return SearchResponseUtil.replaceHits(newHits, response); + } + return response; + } + + static class Factory implements Processor.Factory<SearchResponseProcessor> { + @Override + public TruncateHitsResponseProcessor create( + Map<String, Processor.Factory<SearchResponseProcessor>> processorFactories, + String tag, + String description, + boolean ignoreFailure, + Map<String, Object> config, + PipelineContext pipelineContext + ) { + Integer targetSize = ConfigurationUtils.readIntProperty(TYPE, tag, config, TARGET_SIZE, null); + if (targetSize == null) { + // Use -1 as an "unset" marker to avoid repeated unboxing of an Integer. + targetSize = -1; + } else { + // Explicitly set values must be >= 0. + if (targetSize < 0) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, TARGET_SIZE, "Value must be >= 0"); + } + } + String contextPrefix = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, ContextUtils.CONTEXT_PREFIX_PARAMETER); + return new TruncateHitsResponseProcessor(tag, description, ignoreFailure, targetSize, contextPrefix); + } + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java new file mode 100644 index 0000000000000..9697da85dbecf --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/ContextUtils.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +/** + * Helpers for working with request-scoped context. + */ +public final class ContextUtils { + private ContextUtils() {} + + /** + * Parameter that can be passed to a stateful processor to avoid collisions between contextual variables by + * prefixing them with distinct qualifiers. + */ + public static final String CONTEXT_PREFIX_PARAMETER = "context_prefix"; + + /** + * Replaces a "global" variable name with one scoped to a given context prefix (unless prefix is null or empty). + * @param contextPrefix the prefix qualifier for the variable + * @param variableName the generic "global" form of the context variable + * @return the variableName prefixed with contextPrefix followed by ".", or just variableName if contextPrefix is null or empty + */ + public static String applyContextPrefix(String contextPrefix, String variableName) { + String contextVariable; + if (contextPrefix != null && contextPrefix.isEmpty() == false) { + contextVariable = contextPrefix + "." + variableName; + } else { + contextVariable = variableName; + } + return contextVariable; + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java deleted file mode 100644 index 7af3ac66be146..0000000000000 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMap.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.pipeline.common.helpers; - -import org.opensearch.action.search.SearchRequest; -import org.opensearch.search.builder.SearchSourceBuilder; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import java.util.function.Function; - -/** - * A custom implementation of {@link Map} that provides access to the properties of a {@link SearchRequest}'s - * {@link SearchSourceBuilder}. The class allows retrieving and modifying specific properties of the search request. - */ -public class SearchRequestMap implements Map<String, Object> { - private static final String UNSUPPORTED_OP_ERR = " Method not supported in Search pipeline script"; - - private final SearchSourceBuilder source; - - /** - * Constructs a new instance of the {@link SearchRequestMap} with the provided {@link SearchRequest}. - * - * @param searchRequest The SearchRequest containing the SearchSourceBuilder to be accessed. - */ - public SearchRequestMap(SearchRequest searchRequest) { - source = searchRequest.source(); - } - - /** - * Retrieves the number of properties in the SearchSourceBuilder. - * - * @return The number of properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public int size() { - throw new UnsupportedOperationException("size" + UNSUPPORTED_OP_ERR); - } - - /** - * Checks if the SearchSourceBuilder is empty. - * - * @return {@code true} if the SearchSourceBuilder is empty, {@code false} otherwise. - */ - @Override - public boolean isEmpty() { - return source == null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified property. - * - * @param key The property to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified property, {@code false} otherwise. - */ - @Override - public boolean containsKey(Object key) { - return get(key) != null; - } - - /** - * Checks if the SearchSourceBuilder contains the specified value. - * - * @param value The value to check for. - * @return {@code true} if the SearchSourceBuilder contains the specified value, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean containsValue(Object value) { - throw new UnsupportedOperationException("containsValue" + UNSUPPORTED_OP_ERR); - } - - /** - * Retrieves the value associated with the specified property from the SearchSourceBuilder. - * - * @param key The SearchSourceBuilder property whose value is to be retrieved. - * @return The value associated with the specified property or null if the property has not been initialized. - * @throws IllegalArgumentException if the property name is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported. - */ - @Override - public Object get(Object key) { - if (!(key instanceof String)) { - throw new IllegalArgumentException("key must be a String"); - } - // This is the explicit implementation of fetch value from source - switch ((String) key) { - case "from": - return source.from(); - case "size": - return source.size(); - case "explain": - return source.explain(); - case "version": - return source.version(); - case "seq_no_primary_term": - return source.seqNoAndPrimaryTerm(); - case "track_scores": - return source.trackScores(); - case "track_total_hits": - return source.trackTotalHitsUpTo(); - case "min_score": - return source.minScore(); - case "terminate_after": - return source.terminateAfter(); - case "profile": - return source.profile(); - default: - throw new SearchRequestMapProcessingException("Unsupported key: " + key); - } - } - - /** - * Sets the value for the specified property in the SearchSourceBuilder. - * - * @param key The property whose value is to be set. - * @param value The value to be set for the specified property. - * @return The original value associated with the property, or null if none existed. - * @throws IllegalArgumentException if the property is not a String. - * @throws SearchRequestMapProcessingException if the property is not supported or an error occurs during the setting. - */ - @Override - public Object put(String key, Object value) { - Object originalValue = get(key); - try { - switch (key) { - case "from": - source.from((Integer) value); - break; - case "size": - source.size((Integer) value); - break; - case "explain": - source.explain((Boolean) value); - break; - case "version": - source.version((Boolean) value); - break; - case "seq_no_primary_term": - source.seqNoAndPrimaryTerm((Boolean) value); - break; - case "track_scores": - source.trackScores((Boolean) value); - break; - case "track_total_hits": - source.trackTotalHitsUpTo((Integer) value); - break; - case "min_score": - source.minScore((Float) value); - break; - case "terminate_after": - source.terminateAfter((Integer) value); - break; - case "profile": - source.profile((Boolean) value); - break; - case "stats": // Not modifying stats, sorts, docvalue_fields, etc. as they require more complex handling - case "sort": - case "timeout": - case "docvalue_fields": - case "indices_boost": - default: - throw new SearchRequestMapProcessingException("Unsupported SearchRequest source property: " + key); - } - } catch (Exception e) { - throw new SearchRequestMapProcessingException("Error while setting value for SearchRequest source property: " + key, e); - } - return originalValue; - } - - /** - * Removes the specified property from the SearchSourceBuilder. - * - * @param key The name of the property that will be removed. - * @return The value associated with the property before it was removed, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object remove(Object key) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Sets all the properties from the specified map to the SearchSourceBuilder. - * - * @param m The map containing the properties to be set. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void putAll(Map<? extends String, ?> m) { - throw new UnsupportedOperationException("putAll" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes all properties from the SearchSourceBuilder. - * - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void clear() { - throw new UnsupportedOperationException("clear" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the property names in the SearchSourceBuilder. - * - * @return A set view of the property names in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set<String> keySet() { - throw new UnsupportedOperationException("keySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a collection view of the property values in the SearchSourceBuilder. - * - * @return A collection view of the property values in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Collection<Object> values() { - throw new UnsupportedOperationException("values" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns a set view of the properties in the SearchSourceBuilder. - * - * @return A set view of the properties in the SearchSourceBuilder. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Set<Entry<String, Object>> entrySet() { - throw new UnsupportedOperationException("entrySet" + UNSUPPORTED_OP_ERR); - } - - /** - * Returns the value to which the specified property has, or the defaultValue if the property is not present in the - * SearchSourceBuilder. - * - * @param key The property whose associated value is to be returned. - * @param defaultValue The default value to be returned if the property is not present. - * @return The value to which the specified property has, or the defaultValue if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object getOrDefault(Object key, Object defaultValue) { - throw new UnsupportedOperationException("getOrDefault" + UNSUPPORTED_OP_ERR); - } - - /** - * Performs the given action for each property in the SearchSourceBuilder until all properties have been processed or the - * action throws an exception - * - * @param action The action to be performed for each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void forEach(BiConsumer<? super String, ? super Object> action) { - throw new UnsupportedOperationException("forEach" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces each property's value with the result of invoking the given function on that property until all properties have - * been processed or the function throws an exception. - * - * @param function The function to apply to each property. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public void replaceAll(BiFunction<? super String, ? super Object, ?> function) { - throw new UnsupportedOperationException("replaceAll" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value, associates it with the given value and returns null, - * else returns the current value. - * - * @param key The property whose value is to be set if absent. - * @param value The value to be associated with the specified property. - * @return The current value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object putIfAbsent(String key, Object value) { - throw new UnsupportedOperationException("putIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * Removes the property only if it has the given value. - * - * @param key The property to be removed. - * @param value The value expected to be associated with the property. - * @return {@code true} if the entry was removed, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean remove(Object key, Object value) { - throw new UnsupportedOperationException("remove" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param oldValue The value expected to be associated with the property. - * @param newValue The value to be associated with the property. - * @return {@code true} if the property was replaced, {@code false} otherwise. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public boolean replace(String key, Object oldValue, Object newValue) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * Replaces the specified property only if it has the given value. - * - * @param key The property to be replaced. - * @param value The value to be associated with the property. - * @return The previous value associated with the property, or null if the property was not found. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object replace(String key, Object value) { - throw new UnsupportedOperationException("replace" + UNSUPPORTED_OP_ERR); - } - - /** - * The computed value associated with the property, or null if the property is not present. - * - * @param key The property whose value is to be computed if absent. - * @param mappingFunction The function to compute a value based on the property. - * @return The computed value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfAbsent(String key, Function<? super String, ?> mappingFunction) { - throw new UnsupportedOperationException("computeIfAbsent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object computeIfPresent(String key, BiFunction<? super String, ? super Object, ?> remappingFunction) { - throw new UnsupportedOperationException("computeIfPresent" + UNSUPPORTED_OP_ERR); - } - - /** - * If the value for the specified property is present, attempts to compute a new mapping given the property and its current - * mapped value, or removes the property if the computed value is null. - * - * @param key The property for which the mapping is to be computed. - * @param remappingFunction The function to compute a new mapping. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object compute(String key, BiFunction<? super String, ? super Object, ?> remappingFunction) { - throw new UnsupportedOperationException("compute" + UNSUPPORTED_OP_ERR); - } - - /** - * If the specified property is not already associated with a value or is associated with null, associates it with the - * given non-null value. Otherwise, replaces the associated value with the results of applying the given - * remapping function to the current and new values. - * - * @param key The property for which the mapping is to be merged. - * @param value The non-null value to be merged with the existing value. - * @param remappingFunction The function to merge the existing and new values. - * @return The new value associated with the property, or null if the property is not present. - * @throws UnsupportedOperationException always, as the method is not supported. - */ - @Override - public Object merge(String key, Object value, BiFunction<? super Object, ? super Object, ?> remappingFunction) { - throw new UnsupportedOperationException("merge" + UNSUPPORTED_OP_ERR); - } -} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java new file mode 100644 index 0000000000000..0710548c6429f --- /dev/null +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/helpers/SearchResponseUtil.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common.helpers; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.SearchProfileShardResults; + +/** + * Helper methods for manipulating {@link SearchResponse}. + */ +public final class SearchResponseUtil { + private SearchResponseUtil() { + + } + + /** + * Construct a new {@link SearchResponse} based on an existing one, replacing just the {@link SearchHits}. + * @param newHits new {@link SearchHits} + * @param response the existing search response + * @return a new search response where the {@link SearchHits} has been replaced + */ + public static SearchResponse replaceHits(SearchHits newHits, SearchResponse response) { + SearchResponseSections searchResponseSections; + if (response.getAggregations() == null || response.getAggregations() instanceof InternalAggregations) { + // We either have no aggregations, or we have Writeable InternalAggregations. + // Either way, we can produce a Writeable InternalSearchResponse. + searchResponseSections = new InternalSearchResponse( + newHits, + (InternalAggregations) response.getAggregations(), + response.getSuggest(), + new SearchProfileShardResults(response.getProfileResults()), + response.isTimedOut(), + response.isTerminatedEarly(), + response.getNumReducePhases() + ); + } else { + // We have non-Writeable Aggregations, so the whole SearchResponseSections is non-Writeable. + searchResponseSections = new SearchResponseSections( + newHits, + response.getAggregations(), + response.getSuggest(), + response.isTimedOut(), + response.isTerminatedEarly(), + new SearchProfileShardResults(response.getProfileResults()), + response.getNumReducePhases() + ); + } + + return new SearchResponse( + searchResponseSections, + response.getScrollId(), + response.getTotalShards(), + response.getSuccessfulShards(), + response.getSkippedShards(), + response.getTook().millis(), + response.getShardFailures(), + response.getClusters(), + response.pointInTimeId() + ); + } + + /** + * Convenience method when only replacing the {@link SearchHit} array within the {@link SearchHits} in a {@link SearchResponse}. + * @param newHits the new array of {@link SearchHit} elements. + * @param response the search response to update + * @return a {@link SearchResponse} where the underlying array of {@link SearchHit} within the {@link SearchHits} has been replaced. + */ + public static SearchResponse replaceHits(SearchHit[] newHits, SearchResponse response) { + if (response.getHits() == null) { + throw new IllegalStateException("Response must have hits"); + } + SearchHits searchHits = new SearchHits( + newHits, + response.getHits().getTotalHits(), + response.getHits().getMaxScore(), + response.getHits().getSortFields(), + response.getHits().getCollapseField(), + response.getHits().getCollapseValues() + ); + return replaceHits(searchHits, response); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java new file mode 100644 index 0000000000000..cda011f24fea1 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/CollapseResponseProcessorTests.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class CollapseResponseProcessorTests extends OpenSearchTestCase { + public void testWithDocumentFields() { + testProcessor(true); + } + + public void testWithSourceField() { + testProcessor(false); + } + + private void testProcessor(boolean includeDocField) { + Map<String, Object> config = new HashMap<>(Map.of(CollapseResponseProcessor.COLLAPSE_FIELD, "groupid")); + CollapseResponseProcessor processor = new CollapseResponseProcessor.Factory().create( + Collections.emptyMap(), + null, + null, + false, + config, + null + ); + int numHits = randomIntBetween(1, 100); + SearchResponse inputResponse = generateResponse(numHits, includeDocField); + + SearchResponse processedResponse = processor.processResponse(new SearchRequest(), inputResponse); + if (numHits % 2 == 0) { + assertEquals(numHits / 2, processedResponse.getHits().getHits().length); + } else { + assertEquals(numHits / 2 + 1, processedResponse.getHits().getHits().length); + } + for (SearchHit collapsedHit : processedResponse.getHits()) { + assertEquals(0, collapsedHit.docId() % 2); + } + assertEquals("groupid", processedResponse.getHits().getCollapseField()); + assertEquals(processedResponse.getHits().getHits().length, processedResponse.getHits().getCollapseValues().length); + for (int i = 0; i < processedResponse.getHits().getHits().length; i++) { + assertEquals(i, processedResponse.getHits().getCollapseValues()[i]); + } + } + + private static SearchResponse generateResponse(int numHits, boolean includeDocField) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + Map<String, DocumentField> docFields; + int groupValue = i / 2; + if (includeDocField) { + docFields = Map.of("groupid", new DocumentField("groupid", List.of(groupValue))); + } else { + docFields = Collections.emptyMap(); + } + SearchHit hit = new SearchHit(i, Integer.toString(i), docFields, Collections.emptyMap()); + hit.sourceRef(new BytesArray("{\"groupid\": " + groupValue + "}")); + hitsArray[i] = hit; + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java new file mode 100644 index 0000000000000..96e99dff9cc03 --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/OversampleRequestProcessorTests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class OversampleRequestProcessorTests extends OpenSearchTestCase { + + public void testEmptySource() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map<String, Object> config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchRequest request = new SearchRequest(); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(request, transformedRequest); + assertNull(context.getAttribute("original_size")); + } + + public void testBasicBehavior() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map<String, Object> config = new HashMap<>(Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0)); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("original_size")); + } + + public void testContextPrefix() { + OversampleRequestProcessor.Factory factory = new OversampleRequestProcessor.Factory(); + Map<String, Object> config = new HashMap<>( + Map.of(OversampleRequestProcessor.SAMPLE_FACTOR, 3.0, ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo") + ); + OversampleRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(10); + SearchRequest request = new SearchRequest().source(sourceBuilder); + PipelineProcessingContext context = new PipelineProcessingContext(); + SearchRequest transformedRequest = processor.processRequest(request, context); + assertEquals(30, transformedRequest.source().size()); + assertEquals(10, context.getAttribute("foo.original_size")); + } +} diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java index f50d29ff3be6c..b051a8123b354 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java @@ -13,8 +13,8 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchResponseSections; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.ingest.RandomDocumentPicks; diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java index df383e778c7ba..b372b220b71ac 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/ScriptRequestProcessorTests.java @@ -8,25 +8,23 @@ package org.opensearch.search.pipeline.common; -import org.junit.Before; import org.opensearch.action.search.SearchRequest; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptModule; import org.opensearch.script.ScriptService; -import org.opensearch.script.SearchScript; import org.opensearch.script.ScriptType; +import org.opensearch.script.SearchScript; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; +import org.opensearch.search.pipeline.PipelineProcessingContext; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Collections; -import java.util.Map; import java.util.HashMap; - -import static org.hamcrest.core.Is.is; +import java.util.Map; import java.util.concurrent.TimeUnit; public class ScriptRequestProcessorTests extends OpenSearchTestCase { @@ -87,7 +85,7 @@ public void testScriptingWithoutPrecompiledScriptFactory() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -104,7 +102,7 @@ public void testScriptingWithPrecompiledIngestScript() throws Exception { searchRequest.source(createSearchSourceBuilder()); assertNotNull(searchRequest); - processor.processRequest(searchRequest); + processor.processRequest(searchRequest, new PipelineProcessingContext()); assertSearchRequest(searchRequest); } @@ -124,15 +122,15 @@ private SearchSourceBuilder createSearchSourceBuilder() { } private void assertSearchRequest(SearchRequest searchRequest) { - assertThat(searchRequest.source().from(), is(20)); - assertThat(searchRequest.source().size(), is(30)); - assertThat(searchRequest.source().explain(), is(false)); - assertThat(searchRequest.source().version(), is(false)); - assertThat(searchRequest.source().seqNoAndPrimaryTerm(), is(false)); - assertThat(searchRequest.source().trackScores(), is(false)); - assertThat(searchRequest.source().trackTotalHitsUpTo(), is(4)); - assertThat(searchRequest.source().minScore(), is(2.0f)); - assertThat(searchRequest.source().timeout(), is(new TimeValue(60, TimeUnit.SECONDS))); - assertThat(searchRequest.source().terminateAfter(), is(6)); + assertEquals(20, searchRequest.source().from()); + assertEquals(30, searchRequest.source().size()); + assertFalse(searchRequest.source().explain()); + assertFalse(searchRequest.source().version()); + assertFalse(searchRequest.source().seqNoAndPrimaryTerm()); + assertFalse(searchRequest.source().trackScores()); + assertEquals(4, searchRequest.source().trackTotalHitsUpTo().intValue()); + assertEquals(2.0f, searchRequest.source().minScore(), 0.0001); + assertEquals(new TimeValue(60, TimeUnit.SECONDS), searchRequest.source().timeout()); + assertEquals(6, searchRequest.source().terminateAfter()); } } diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java similarity index 99% rename from modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java rename to modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java index 5572f28335e1c..c982ada7b5ea5 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/helpers/SearchRequestMapTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchRequestMapTests.java @@ -5,7 +5,7 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.search.pipeline.common.helpers; +package org.opensearch.search.pipeline.common; import org.opensearch.action.search.SearchRequest; import org.opensearch.search.builder.SearchSourceBuilder; diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java new file mode 100644 index 0000000000000..7615225c7f77e --- /dev/null +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/TruncateHitsResponseProcessorTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline.common; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.pipeline.PipelineProcessingContext; +import org.opensearch.search.pipeline.common.helpers.ContextUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class TruncateHitsResponseProcessorTests extends OpenSearchTestCase { + + public void testBasicBehavior() { + int targetSize = randomInt(50); + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map<String, Object> config = new HashMap<>(Map.of(TruncateHitsResponseProcessor.TARGET_SIZE, targetSize)); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContext() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizePassedViaContextWithPrefix() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + Map<String, Object> config = new HashMap<>(Map.of(ContextUtils.CONTEXT_PREFIX_PARAMETER, "foo")); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, config, null); + + int targetSize = randomInt(50); + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + requestContext.setAttribute("foo.original_size", targetSize); + SearchResponse transformedResponse = processor.processResponse(new SearchRequest(), response, requestContext); + assertEquals(Math.min(targetSize, numHits), transformedResponse.getHits().getHits().length); + } + + public void testTargetSizeMissing() { + TruncateHitsResponseProcessor.Factory factory = new TruncateHitsResponseProcessor.Factory(); + TruncateHitsResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, false, Collections.emptyMap(), null); + + int numHits = randomInt(100); + SearchResponse response = constructResponse(numHits); + assertThrows( + IllegalStateException.class, + () -> processor.processResponse(new SearchRequest(), response, new PipelineProcessingContext()) + ); + } + + private static SearchResponse constructResponse(int numHits) { + SearchHit[] hitsArray = new SearchHit[numHits]; + for (int i = 0; i < numHits; i++) { + hitsArray[i] = new SearchHit(i, Integer.toString(i), Collections.emptyMap(), Collections.emptyMap()); + } + SearchHits searchHits = new SearchHits( + hitsArray, + new TotalHits(Math.max(numHits, 1000), TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), + 1.0f + ); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, null, false, false, 0); + return new SearchResponse(internalSearchResponse, null, 1, 1, 0, 10, null, null); + } +} diff --git a/modules/search-pipeline-common/src/yamlRestTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonYamlTestSuiteIT.java b/modules/search-pipeline-common/src/yamlRestTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonYamlTestSuiteIT.java index cd1cbbc995f8b..d4d53b158420a 100644 --- a/modules/search-pipeline-common/src/yamlRestTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonYamlTestSuiteIT.java +++ b/modules/search-pipeline-common/src/yamlRestTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonYamlTestSuiteIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml new file mode 100644 index 0000000000000..1f9e95084322d --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/60_oversample_truncate.yml @@ -0,0 +1,105 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from oversample to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "oversample" : { + "sample_factor" : 2 + } + } + ], + "response_processors": [ + { + "collapse" : { + "field" : "group_id" + } + }, + { + "truncate_hits" : {} + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: { + "group_id": "a", + "popularity" : 1 + } + - do: + index: + index: test + id: 2 + body: { + "group_id": "a", + "popularity" : 2 + } + - do: + index: + index: test + id: 3 + body: { + "group_id": "b", + "popularity" : 3 + } + - do: + index: + index: test + id: 4 + body: { + "group_id": "b", + "popularity" : 4 + } + - do: + indices.refresh: + index: test + + - do: + search: + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "3" } + + - do: + search: + search_pipeline: my_pipeline + body: { + "query" : { + "function_score" : { + "field_value_factor" : { + "field" : "popularity" + } + } + }, + "size" : 2 + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.1._id: "2" } diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml new file mode 100644 index 0000000000000..9c9f6747e9bdc --- /dev/null +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/70_script_truncate.yml @@ -0,0 +1,70 @@ +--- +teardown: + - do: + search_pipeline.delete: + id: "my_pipeline" + ignore: 404 + +--- +"Test state propagating from script request to truncate_hits processor": + - do: + search_pipeline.put: + id: "my_pipeline" + body: > + { + "description": "_description", + "request_processors": [ + { + "script" : { + "source" : "ctx.request_context['foo.original_size'] = 2" + } + } + ], + "response_processors": [ + { + "truncate_hits" : { + "context_prefix" : "foo" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + body: {} + - do: + index: + index: test + id: 2 + body: {} + - do: + index: + index: test + id: 3 + body: {} + - do: + index: + index: test + id: 4 + body: {} + - do: + indices.refresh: + index: test + + - do: + search: + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + + - do: + search: + search_pipeline: my_pipeline + body: { + } + - match: { hits.total.value: 4 } + - length: { hits.hits: 2 } diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java index 93e2e28718d51..05c6222d3d89a 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/Libsystemd.java @@ -40,6 +40,7 @@ /** * Provides access to the native method sd_notify from libsystemd. */ +@SuppressWarnings("removal") class Libsystemd { static { diff --git a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java index eb04e69810b9c..6e291027fa35f 100644 --- a/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java +++ b/modules/systemd/src/main/java/org/opensearch/systemd/SystemdModulePlugin.java @@ -38,8 +38,8 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java new file mode 100644 index 0000000000000..c39567a005fd1 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.transport.Netty4BlockingPlugin; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBufUtil; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4HeaderVerifierIT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Collections.singletonList(Netty4BlockingPlugin.class); + } + + public void testThatNettyHttpServerRequestBlockedWithHeaderVerifier() throws Exception { + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + final FullHttpRequest blockedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + blockedRequest.headers().add("blockme", "Not Allowed"); + blockedRequest.headers().add(HOST, "localhost"); + blockedRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + + final List<FullHttpResponse> responses = new ArrayList<>(); + try (Netty4HttpClient nettyHttpClient = Netty4HttpClient.http2()) { + try { + FullHttpResponse blockedResponse = nettyHttpClient.send(transportAddress.address(), blockedRequest); + responses.add(blockedResponse); + String blockedResponseContent = new String(ByteBufUtil.getBytes(blockedResponse.content()), StandardCharsets.UTF_8); + assertThat(blockedResponseContent, containsString("Hit header_verifier")); + assertThat(blockedResponse.status().code(), equalTo(401)); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java index c066f3edf6900..eba2c5ce1e094 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -8,12 +8,9 @@ package org.opensearch.http.netty4; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -23,6 +20,10 @@ import java.util.Locale; import java.util.stream.IntStream; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index db76c0b145840..826d4a7e5d61e 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -32,15 +32,12 @@ package org.opensearch.http.netty4; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.http.HttpServerTransport; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; @@ -50,13 +47,17 @@ import java.util.Collection; import java.util.List; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; /** * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. - * + * <p> * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". */ diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 96193b0ecb954..42595fcddfa69 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -32,10 +32,8 @@ package org.opensearch.http.netty4; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchNetty4IntegTestCase; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -43,6 +41,9 @@ import java.util.Collection; import java.util.Locale; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index fbac1f1c52e95..fd00d9d00d7fd 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -32,6 +32,7 @@ package org.opensearch.rest.discovery; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; @@ -46,9 +47,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.http.HttpServerTransport; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; -import org.apache.hc.core5.http.HttpHost; +import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java new file mode 100644 index 0000000000000..d5fe49952add3 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.netty4.Netty4HttpServerTransport; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCountUtil; + +public class Netty4BlockingPlugin extends Netty4ModulePlugin { + + public class Netty4BlockingHttpServerTransport extends Netty4HttpServerTransport { + + public Netty4BlockingHttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer + ); + } + + @Override + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + return new ExampleBlockingNetty4HeaderVerifier(); + } + } + + @Override + public Map<String, Supplier<HttpServerTransport>> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + NETTY_HTTP_TRANSPORT_NAME, + () -> new Netty4BlockingHttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + /** POC for how an external header verifier would be implemented */ + public class ExampleBlockingNetty4HeaderVerifier extends SimpleChannelInboundHandler<DefaultHttpRequest> { + + @Override + public void channelRead0(ChannelHandlerContext ctx, DefaultHttpRequest msg) throws Exception { + ReferenceCountUtil.retain(msg); + if (isBlocked(msg)) { + ByteBuf buf = Unpooled.copiedBuffer("Hit header_verifier".getBytes(StandardCharsets.UTF_8)); + final FullHttpResponse response = new DefaultFullHttpResponse(msg.protocolVersion(), HttpResponseStatus.UNAUTHORIZED, buf); + ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + ReferenceCountUtil.release(msg); + } else { + // Lets the request pass to the next channel handler + ctx.fireChannelRead(msg); + } + } + + private boolean isBlocked(HttpRequest request) { + final boolean shouldBlock = request.headers().contains("blockme"); + + return shouldBlock; + } + } +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java index 3ff3938d23f65..4004d3d1a029d 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.junit.annotations.Network; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java index e6604abf126da..4722cdb66be18 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -38,8 +38,8 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.TransportInfo; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java index ea3f21dd0ed3b..b8369acdf9dc6 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/OpenSearchLoggingHandlerIT.java @@ -36,9 +36,9 @@ import org.apache.logging.log4j.LogManager; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.transport.TcpTransport; import org.opensearch.transport.TransportLogger; diff --git a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4BadRequestIT.java index 2584b768707cd..f7e1c6106cf5a 100644 --- a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4BadRequestIT.java @@ -38,7 +38,7 @@ import org.opensearch.client.ResponseException; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.http.HttpTransportSettings; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; diff --git a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java index 4b962401387b7..b4b15c22258de 100644 --- a/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java +++ b/modules/transport-netty4/src/javaRestTest/java/org/opensearch/rest/Netty4HeadBodyIsEmptyIT.java @@ -34,7 +34,6 @@ import org.opensearch.client.Request; import org.opensearch.client.Response; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.hamcrest.Matcher; @@ -68,7 +67,7 @@ private void createTestDoc(final String indexName) throws IOException { } builder.endObject(); Request request = new Request("PUT", "/" + indexName + "/_doc/" + "1"); - request.setJsonEntity(Strings.toString(builder)); + request.setJsonEntity(builder.toString()); client().performRequest(request); } } @@ -109,7 +108,7 @@ public void testAliasExists() throws IOException { builder.endObject(); Request request = new Request("POST", "/_aliases"); - request.setJsonEntity(Strings.toString(builder)); + request.setJsonEntity(builder.toString()); client().performRequest(request); headTestCase("/_alias/test_alias", emptyMap(), greaterThan(0)); headTestCase("/test/_alias/test_alias", emptyMap(), greaterThan(0)); @@ -136,7 +135,7 @@ public void testTemplateExists() throws IOException { builder.endObject(); Request request = new Request("PUT", "/_template/template"); - request.setJsonEntity(Strings.toString(builder)); + request.setJsonEntity(builder.toString()); client().performRequest(request); headTestCase("/_template/template", emptyMap(), greaterThan(0)); } @@ -163,7 +162,7 @@ public void testGetSourceAction() throws IOException { builder.endObject(); Request request = new Request("PUT", "/test-no-source"); - request.setJsonEntity(Strings.toString(builder)); + request.setJsonEntity(builder.toString()); client().performRequest(request); createTestDoc("test-no-source"); headTestCase("/test-no-source/_source/1", emptyMap(), NOT_FOUND.getStatus(), greaterThan(0)); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 2dd7aaf41986f..75d30aa9797c0 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -32,19 +32,21 @@ package org.opensearch.http.netty4; -import io.netty.channel.Channel; -import io.netty.channel.ChannelPipeline; - -import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelPipeline; public class Netty4HttpChannel implements HttpChannel { + private static final String CHANNEL_PROPERTY = "channel"; private final Channel channel; private final CompletableContext<Void> closeContext = new CompletableContext<>(); @@ -98,6 +100,26 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + if (CHANNEL_PROPERTY.equalsIgnoreCase(name) && clazz.isAssignableFrom(Channel.class)) { + return (Optional<T>) Optional.of(getNettyChannel()); + } + + Object handler = getNettyChannel().pipeline().get(name); + + if (handler == null && inboundPipeline() != null) { + handler = inboundPipeline().get(name); + } + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandler.java index af975518e087e..1200dcaf9e0f8 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -32,9 +32,6 @@ package org.opensearch.http.netty4; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import org.apache.logging.log4j.Logger; import org.opensearch.common.collect.Tuple; import org.opensearch.http.HttpPipelinedRequest; @@ -44,6 +41,10 @@ import java.nio.channels.ClosedChannelException; import java.util.List; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; + /** * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. */ diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java index c502cb9882b55..3c96affb7adf7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java @@ -32,21 +32,10 @@ package org.opensearch.http.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultHttpHeaders; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.cookie.Cookie; -import io.netty.handler.codec.http.cookie.ServerCookieDecoder; -import io.netty.handler.codec.http.cookie.ServerCookieEncoder; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.transport.netty4.Netty4Utils; import java.util.AbstractMap; @@ -58,6 +47,18 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; + public class Netty4HttpRequest implements HttpRequest { private final FullHttpRequest request; @@ -257,7 +258,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + * <p> * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestCreator.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestCreator.java index 52d202f45a728..f97c2059e03d5 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestCreator.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestCreator.java @@ -32,13 +32,14 @@ package org.opensearch.http.netty4; +import org.opensearch.ExceptionsHelper; + +import java.util.List; + import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.handler.codec.http.FullHttpRequest; -import org.opensearch.ExceptionsHelper; - -import java.util.List; @ChannelHandler.Sharable class Netty4HttpRequestCreator extends MessageToMessageDecoder<FullHttpRequest> { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestHandler.java index caa80587a6e56..1f7aaf17d2191 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequestHandler.java @@ -32,11 +32,12 @@ package org.opensearch.http.netty4; +import org.opensearch.ExceptionsHelper; +import org.opensearch.http.HttpPipelinedRequest; + import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; -import org.opensearch.ExceptionsHelper; -import org.opensearch.http.HttpPipelinedRequest; @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest> { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponse.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponse.java index c1fd97022d1af..83284230be049 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponse.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponse.java @@ -32,14 +32,15 @@ package org.opensearch.http.netty4; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.netty4.Netty4Utils; + import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.http.HttpResponse; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.transport.netty4.Netty4Utils; public class Netty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponseCreator.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponseCreator.java index 00c3049162270..e2f04f76e6af8 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponseCreator.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpResponseCreator.java @@ -32,6 +32,11 @@ package org.opensearch.http.netty4; +import org.opensearch.common.Booleans; +import org.opensearch.transport.NettyAllocator; + +import java.util.List; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; @@ -41,10 +46,6 @@ import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.HttpResponse; -import org.opensearch.common.Booleans; -import org.opensearch.transport.NettyAllocator; - -import java.util.List; /** * Split up large responses to prevent batch compression {@link JdkZlibEncoder} down the pipeline. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerChannel.java index 560b7b565fa40..7b8858174e555 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerChannel.java @@ -32,14 +32,15 @@ package org.opensearch.http.netty4; -import io.netty.channel.Channel; -import org.opensearch.action.ActionListener; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpServerChannel; import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import io.netty.channel.Channel; + public class Netty4HttpServerChannel implements HttpServerChannel { private final Channel channel; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 124bc02527bd1..4970c42163ac3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -32,6 +32,37 @@ package org.opensearch.http.netty4; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpHandlingSettings; +import org.opensearch.http.HttpReadTimeoutException; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.NettyByteBufSizer; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.util.concurrent.TimeUnit; + import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -70,36 +101,6 @@ import io.netty.util.AttributeKey; import io.netty.util.ReferenceCountUtil; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.network.NetworkService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.util.net.NetUtils; -import org.opensearch.http.AbstractHttpServerTransport; -import org.opensearch.http.HttpChannel; -import org.opensearch.http.HttpHandlingSettings; -import org.opensearch.http.HttpReadTimeoutException; -import org.opensearch.http.HttpServerChannel; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.NettyAllocator; -import org.opensearch.transport.NettyByteBufSizer; -import org.opensearch.transport.SharedGroupFactory; -import org.opensearch.transport.netty4.Netty4Utils; - -import java.net.InetSocketAddress; -import java.net.SocketOption; -import java.util.concurrent.TimeUnit; - import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; @@ -115,6 +116,9 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.opensearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +/** + * The HTTP transport implementations based on Netty 4. + */ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(Netty4HttpServerTransport.class); @@ -183,6 +187,17 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private volatile ServerBootstrap serverBootstrap; private volatile SharedGroupFactory.SharedGroup sharedGroup; + /** + * Creates new HTTP transport implementations based on Netty 4 + * @param settings seetings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + */ public Netty4HttpServerTransport( Settings settings, NetworkService networkService, @@ -191,9 +206,10 @@ public Netty4HttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, ClusterSettings clusterSettings, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; @@ -332,7 +348,7 @@ public ChannelHandler configureServerChannelHandler() { return new HttpChannelHandler(this, handlingSettings); } - protected static final AttributeKey<Netty4HttpChannel> HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); + public static final AttributeKey<Netty4HttpChannel> HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); protected static final AttributeKey<Netty4HttpServerChannel> HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance( "opensearch-http-server-channel" ); @@ -417,8 +433,8 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpMessage msg) throws E // If this handler is hit then no upgrade has been attempted and the client is just talking HTTP final ChannelPipeline pipeline = ctx.pipeline(); pipeline.addAfter(ctx.name(), "handler", getRequestHandler()); - pipeline.replace(this, "decoder_compress", new HttpContentDecompressor()); - + pipeline.replace(this, "header_verifier", transport.createHeaderVerifier()); + pipeline.addAfter("header_verifier", "decoder_compress", transport.createDecompressor()); pipeline.addAfter("decoder_compress", "aggregator", aggregator); if (handlingSettings.isCompression()) { pipeline.addAfter( @@ -444,7 +460,8 @@ protected void configureDefaultHttpPipeline(ChannelPipeline pipeline) { ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); pipeline.addLast("decoder", decoder); - pipeline.addLast("decoder_compress", new HttpContentDecompressor()); + pipeline.addLast("header_verifier", transport.createHeaderVerifier()); + pipeline.addLast("decoder_compress", transport.createDecompressor()); pipeline.addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); @@ -485,13 +502,13 @@ protected void initChannel(Channel childChannel) throws Exception { final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - childChannel.pipeline() .addLast(new LoggingHandler(LogLevel.DEBUG)) .addLast(new Http2StreamFrameToHttpObjectCodec(true)) .addLast("byte_buf_sizer", byteBufSizer) .addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)) - .addLast("decoder_decompress", new HttpContentDecompressor()); + .addLast("header_verifier", transport.createHeaderVerifier()) + .addLast("decoder_decompress", transport.createDecompressor()); if (handlingSettings.isCompression()) { childChannel.pipeline() @@ -529,4 +546,21 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } } } + + /** + * Extension point that allows a NetworkPlugin to extend the netty pipeline and inspect headers after request decoding + */ + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + // pass-through + return new ChannelInboundHandlerAdapter(); + } + + /** + * Extension point that allows a NetworkPlugin to override the default netty HttpContentDecompressor and supply a custom decompressor. + * + * Used in instances to conditionally decompress depending on the outcome from header verification + */ + protected ChannelInboundHandlerAdapter createDecompressor() { + return new HttpContentDecompressor(); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesServerSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesServerSocketChannel.java index c98cfbe711821..07dedb744ff11 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesServerSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesServerSocketChannel.java @@ -46,14 +46,15 @@ package org.opensearch.transport; -import io.netty.channel.socket.nio.NioServerSocketChannel; -import io.netty.util.internal.SocketUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.nio.channels.SocketChannel; import java.util.List; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.util.internal.SocketUtils; + /** * This class is adapted from {@link NioServerSocketChannel} class in the Netty project. It overrides the * channel read messages behavior to ensure that a {@link CopyBytesSocketChannel} socket channel is created. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java index 9a5459a5ab572..4bab91565d3ad 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java @@ -46,25 +46,26 @@ package org.opensearch.transport; -import io.netty.buffer.ByteBuf; -import io.netty.channel.Channel; -import io.netty.channel.ChannelOutboundBuffer; -import io.netty.channel.RecvByteBufAllocator; -import io.netty.channel.socket.nio.NioSocketChannel; import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SocketChannel; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.socket.nio.NioSocketChannel; + import static io.netty.channel.internal.ChannelUtils.MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD; /** * This class is adapted from {@link NioSocketChannel} class in the Netty project. It overrides the channel * read/write behavior to ensure that the bytes are always copied to a thread-local direct bytes buffer. This * happens BEFORE the call to the Java {@link SocketChannel} is issued. - * + * <p> * The purpose of this class is to allow the disabling of netty direct buffer pooling while allowing us to * control how bytes end up being copied to direct memory. If we simply disabled netty pooling, we would rely * on the JDK's internal thread local buffer pooling. Instead, this class allows us to create a one thread diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index d7f2f6eb6acbb..2bc795d11ed5d 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; @@ -42,12 +41,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpServerTransport; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.netty4.Netty4Transport; @@ -95,7 +96,8 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NETTY_TRANSPORT_NAME, @@ -107,7 +109,8 @@ public Map<String, Supplier<Transport>> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } @@ -122,7 +125,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NETTY_HTTP_TRANSPORT_NAME, @@ -134,12 +138,13 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( xContentRegistry, dispatcher, clusterSettings, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } - private SharedGroupFactory getSharedGroupFactory(Settings settings) { + SharedGroupFactory getSharedGroupFactory(Settings settings) { SharedGroupFactory groupFactory = this.groupFactory.get(); if (groupFactory != null) { assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java index 8a8b1da6ef5dd..e7626b6ddc57a 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java @@ -8,17 +8,17 @@ package org.opensearch.transport; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.nio.channels.spi.SelectorProvider; +import java.util.List; + import io.netty.channel.socket.InternetProtocolFamily; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.util.internal.SocketUtils; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; -import java.nio.channels.ServerSocketChannel; -import java.nio.channels.SocketChannel; -import java.nio.channels.spi.SelectorProvider; -import java.util.List; - public class Netty4NioServerSocketChannel extends NioServerSocketChannel { private static final InternalLogger logger = InternalLoggerFactory.getInstance(Netty4NioServerSocketChannel.class); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioSocketChannel.java index 9fe5d3731eb7d..cca099b4810b3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioSocketChannel.java @@ -32,11 +32,11 @@ package org.opensearch.transport; +import java.nio.channels.SocketChannel; + import io.netty.channel.Channel; import io.netty.channel.socket.nio.NioSocketChannel; -import java.nio.channels.SocketChannel; - /** * Helper class to expose {@link #javaChannel()} method */ diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java index f2f6538d305d9..ff901476c162d 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java @@ -32,6 +32,14 @@ package org.opensearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.Booleans; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.monitor.jvm.JvmInfo; + +import java.util.concurrent.atomic.AtomicBoolean; + import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.CompositeByteBuf; @@ -39,13 +47,6 @@ import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ServerChannel; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.Booleans; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.monitor.jvm.JvmInfo; - -import java.util.concurrent.atomic.AtomicBoolean; public class NettyAllocator { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyByteBufSizer.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyByteBufSizer.java index 857e07f6feca2..54824bc160747 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyByteBufSizer.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyByteBufSizer.java @@ -32,13 +32,13 @@ package org.opensearch.transport; +import java.util.List; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToMessageDecoder; -import java.util.List; - @ChannelHandler.Sharable public class NettyByteBufSizer extends MessageToMessageDecoder<ByteBuf> { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java index d70f6bc83cc73..454293442572c 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java @@ -32,9 +32,6 @@ package org.opensearch.transport; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.util.concurrent.Future; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Settings; @@ -46,6 +43,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; /** diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4MessageChannelHandler.java index 592ea45016bf3..7b9999ce5b20e 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4MessageChannelHandler.java @@ -32,18 +32,12 @@ package org.opensearch.transport.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.channel.Channel; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.OpenSearchException; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.InboundPipeline; import org.opensearch.transport.Transport; @@ -53,6 +47,13 @@ import java.util.ArrayDeque; import java.util.Queue; +import io.netty.buffer.ByteBuf; +import io.netty.channel.Channel; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; + /** * A handler (must be the last one!) that does size based frame decoding and forwards the actual message * to the relevant action. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 46cbf60d45116..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -32,18 +32,20 @@ package org.opensearch.transport.netty4; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.transport.TcpChannel; import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelPromise; public class Netty4TcpChannel implements TcpChannel { @@ -163,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener<Void> listener) } } + @SuppressWarnings("unchecked") + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpServerChannel.java index 6131d7fe2acb9..8910e2b51374e 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpServerChannel.java @@ -32,13 +32,14 @@ package org.opensearch.transport.netty4; -import io.netty.channel.Channel; -import org.opensearch.action.ActionListener; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.transport.TcpServerChannel; import java.net.InetSocketAddress; +import io.netty.channel.Channel; + public class Netty4TcpServerChannel implements TcpServerChannel { private final Channel channel; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java index 561cac2facbff..e76a227630dc1 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java @@ -31,20 +31,6 @@ package org.opensearch.transport.netty4; -import io.netty.bootstrap.Bootstrap; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.channel.AdaptiveRecvByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.FixedRecvByteBufAllocator; -import io.netty.channel.RecvByteBufAllocator; -import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.util.AttributeKey; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -52,18 +38,19 @@ import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.net.NetUtils; -import org.opensearch.common.lease.Releasables; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Netty4NioSocketChannel; import org.opensearch.transport.NettyAllocator; @@ -77,6 +64,21 @@ import java.net.SocketOption; import java.util.Map; +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.AdaptiveRecvByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.FixedRecvByteBufAllocator; +import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.util.AttributeKey; + import static org.opensearch.common.settings.Setting.byteSizeSetting; import static org.opensearch.common.settings.Setting.intSetting; import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -130,9 +132,10 @@ public Netty4Transport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Utils.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Utils.java index 10fd1cf4366c4..11fc74a2afa7f 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Utils.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Utils.java @@ -32,10 +32,6 @@ package org.opensearch.transport.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.util.NettyRuntime; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.Booleans; @@ -49,6 +45,11 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.NettyRuntime; + public class Netty4Utils { private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index ef014aa39367b..03990c173d547 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -32,23 +32,22 @@ package org.opensearch.http.netty4; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchException; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpTransportSettings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -61,6 +60,9 @@ import java.util.Collection; import java.util.Collections; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -111,7 +113,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { httpServerTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java new file mode 100644 index 0000000000000..c49166a51c24a --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.Netty4NioSocketChannel; +import org.junit.Before; + +import java.util.Optional; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelOutboundInvoker; +import io.netty.channel.ServerChannel; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.sameInstance; + +public class Netty4HttpChannelTests extends OpenSearchTestCase { + private Netty4HttpChannel netty4HttpChannel; + private Channel channel; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + channel = new Netty4NioSocketChannel(); + netty4HttpChannel = new Netty4HttpChannel(channel); + } + + public void testChannelAttributeMatchesChannel() { + final Optional<Channel> channelOpt = netty4HttpChannel.get("channel", Channel.class); + assertThat(channelOpt.isPresent(), is(true)); + assertThat(channelOpt.get(), sameInstance(channel)); + } + + public void testChannelAttributeMatchesChannelOutboundInvoker() { + final Optional<ChannelOutboundInvoker> channelOpt = netty4HttpChannel.get("channel", ChannelOutboundInvoker.class); + assertThat(channelOpt.isPresent(), is(true)); + assertThat(channelOpt.get(), sameInstance(channel)); + } + + public void testChannelAttributeIsEmpty() { + final Optional<ServerChannel> channelOpt = netty4HttpChannel.get("channel", ServerChannel.class); + assertThat(channelOpt.isEmpty(), is(true)); + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java index cad2e50327023..1c381e8000f6b 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpClient.java @@ -32,6 +32,23 @@ package org.opensearch.http.netty4; +import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.tasks.Task; +import org.opensearch.transport.NettyAllocator; + +import java.io.Closeable; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; + import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; @@ -71,23 +88,6 @@ import io.netty.handler.codec.http2.InboundHttp2ToHttpAdapterBuilder; import io.netty.util.AttributeKey; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.tasks.Task; -import org.opensearch.transport.NettyAllocator; - -import java.io.Closeable; -import java.net.SocketAddress; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.function.BiFunction; - import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static org.junit.Assert.fail; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java index 88dac2b2c6cdd..99d576bed01c7 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java @@ -32,22 +32,13 @@ package org.opensearch.http.netty4; -import io.netty.buffer.ByteBufUtil; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.QueryStringDecoder; import org.opensearch.common.Randomness; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpPipelinedRequest; import org.opensearch.http.HttpPipelinedResponse; import org.opensearch.http.HttpResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; @@ -64,9 +55,19 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import io.netty.buffer.ByteBufUtil; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.QueryStringDecoder; + +import static org.hamcrest.core.Is.is; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; -import static org.hamcrest.core.Is.is; public class Netty4HttpPipeliningHandlerTests extends OpenSearchTestCase { @@ -78,7 +79,7 @@ public class Netty4HttpPipeliningHandlerTests extends OpenSearchTestCase { @After public void tearDown() throws Exception { waitingRequests.keySet().forEach(this::finishRequest); - shutdownExecutorService(); + shutdownExecutorServices(); super.tearDown(); } @@ -87,7 +88,7 @@ private CountDownLatch finishRequest(String url) { return finishingRequests.get(url); } - private void shutdownExecutorService() throws InterruptedException { + private void shutdownExecutorServices() throws InterruptedException { if (!handlerService.isShutdown()) { handlerService.shutdown(); handlerService.awaitTermination(10, TimeUnit.SECONDS); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index adf4d59a0c139..af868a3a3cb88 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -32,28 +32,20 @@ package org.opensearch.http.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.Channel; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.util.ReferenceCounted; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpPipelinedRequest; import org.opensearch.http.HttpResponse; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.NullDispatcher; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -69,6 +61,16 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + import static org.hamcrest.Matchers.contains; /** @@ -134,7 +136,8 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { xContentRegistry(), new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index 5a43057b1b7d1..d892918decfb5 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -32,59 +32,36 @@ package org.opensearch.http.netty4; -import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.PoolArenaMetric; -import io.netty.buffer.PooledByteBufAllocator; -import io.netty.buffer.PooledByteBufAllocatorMetric; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerAdapter; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.SocketChannel; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.handler.codec.TooLongFrameException; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpVersion; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.http.BindHttpException; import org.opensearch.http.CorsHandler; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpTransportSettings; import org.opensearch.http.NullDispatcher; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.SharedGroupFactory; import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.SharedGroupFactory; import org.junit.After; import org.junit.Before; @@ -96,10 +73,35 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; -import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -197,7 +199,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -246,7 +249,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -264,7 +268,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); @@ -316,7 +321,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -378,7 +384,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -447,7 +454,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -520,7 +528,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/CopyBytesSocketChannelTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/CopyBytesSocketChannelTests.java index 7cb4225f02293..2ace9b111a64b 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/CopyBytesSocketChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/CopyBytesSocketChannelTests.java @@ -31,18 +31,6 @@ package org.opensearch.transport; -import io.netty.bootstrap.Bootstrap; -import io.netty.bootstrap.ServerBootstrap; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOption; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.channel.nio.NioEventLoopGroup; import org.opensearch.common.SuppressForbidden; import org.opensearch.test.OpenSearchTestCase; @@ -57,6 +45,19 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; + public class CopyBytesSocketChannelTests extends OpenSearchTestCase { private final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(false); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index f80d7f41b5f55..c92ccba82835f 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -33,13 +33,14 @@ package org.opensearch.transport.netty4; import org.opensearch.Version; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -86,7 +87,8 @@ public void startThreadPool() { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); nettyTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4UtilsTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4UtilsTests.java index d3fa8ea56ffe7..296f9aa3901c6 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4UtilsTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4UtilsTests.java @@ -32,22 +32,23 @@ package org.opensearch.transport.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.bytes.AbstractBytesReferenceTestCase; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; + public class Netty4UtilsTests extends OpenSearchTestCase { private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java index 5d7841df4bf33..7cca00db68559 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java @@ -32,14 +32,15 @@ package org.opensearch.transport.netty4; import org.opensearch.Version; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.network.NetworkService; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -141,7 +142,8 @@ private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java index 619f473b8bef2..710b3ff6bd0ca 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java @@ -33,17 +33,18 @@ package org.opensearch.transport.netty4; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.net.NetUtils; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -82,7 +83,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) { @Override diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..0643f16dc1052 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +44a4e095d7e047a9452d81b224905b72c830f8ae \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 8a3332c950b6d..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fde64e3b23bc9a0849b9897febfe9f13c5113143 \ No newline at end of file diff --git a/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index 4f596034bfece..9ef539caf24f9 100644 --- a/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/internalClusterTest/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -31,19 +31,13 @@ package org.opensearch.index.mapper; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; - import com.ibm.icu.text.Collator; import com.ibm.icu.text.RuleBasedCollator; import com.ibm.icu.util.ULocale; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugin.analysis.icu.AnalysisICUPlugin; import org.opensearch.plugins.Plugin; @@ -56,6 +50,12 @@ import java.util.Collection; import java.util.Collections; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; + public class ICUCollationKeywordFieldMapperIT extends OpenSearchIntegTestCase { @Override @@ -91,8 +91,12 @@ public void testBasicUsage() throws Exception { // both values should collate to same value indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + client().prepareIndex(index) + .setId("1") + .setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", MediaTypeRegistry.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -134,8 +138,10 @@ public void testMultipleValues() throws Exception { true, client().prepareIndex(index) .setId("1") - .setSource("{\"id\":\"1\", \"collate\":[\"" + equivalent[0] + "\", \"" + equivalent[1] + "\"]}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", XContentType.JSON) + .setSource("{\"id\":\"1\", \"collate\":[\"" + equivalent[0] + "\", \"" + equivalent[1] + "\"]}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[2] + "\"}", MediaTypeRegistry.JSON) ); // using sort mode = max, values B and C will be used for the sort @@ -195,8 +201,12 @@ public void testNormalization() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + client().prepareIndex(index) + .setId("1") + .setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", MediaTypeRegistry.JSON) ); // searching for either of the terms should return both results since they collate to the same value @@ -240,8 +250,12 @@ public void testSecondaryStrength() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + client().prepareIndex(index) + .setId("1") + .setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -285,8 +299,12 @@ public void testIgnorePunctuation() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + client().prepareIndex(index) + .setId("1") + .setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -330,9 +348,9 @@ public void testIgnoreWhitespace() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", XContentType.JSON), - client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", XContentType.JSON) + client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"foo bar\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"foobar\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"foo-bar\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -372,8 +390,8 @@ public void testNumerics() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"collate\":\"foobar-10\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"collate\":\"foobar-9\"}", XContentType.JSON) + client().prepareIndex(index).setId("1").setSource("{\"collate\":\"foobar-10\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("2").setSource("{\"collate\":\"foobar-9\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -411,10 +429,10 @@ public void testIgnoreAccentsButNotCase() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"résumé\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"Resume\"}", XContentType.JSON), - client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"resume\"}", XContentType.JSON), - client().prepareIndex(index).setId("4").setSource("{\"id\":\"4\",\"collate\":\"Résumé\"}", XContentType.JSON) + client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"résumé\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"Resume\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("3").setSource("{\"id\":\"3\",\"collate\":\"resume\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("4").setSource("{\"id\":\"4\",\"collate\":\"Résumé\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -449,8 +467,8 @@ public void testUpperCaseFirst() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", XContentType.JSON) + client().prepareIndex(index).setId("1").setSource("{\"collate\":\"resume\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index).setId("2").setSource("{\"collate\":\"Resume\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) @@ -497,8 +515,12 @@ public void testCustomRules() throws Exception { indexRandom( true, - client().prepareIndex(index).setId("1").setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", XContentType.JSON), - client().prepareIndex(index).setId("2").setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", XContentType.JSON) + client().prepareIndex(index) + .setId("1") + .setSource("{\"id\":\"1\",\"collate\":\"" + equivalent[0] + "\"}", MediaTypeRegistry.JSON), + client().prepareIndex(index) + .setId("2") + .setSource("{\"id\":\"2\",\"collate\":\"" + equivalent[1] + "\"}", MediaTypeRegistry.JSON) ); SearchRequest request = new SearchRequest().indices(index) diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/ICUCollationKeyFilter.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/ICUCollationKeyFilter.java index d7e097ce79798..35e0ed7651547 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/ICUCollationKeyFilter.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/ICUCollationKeyFilter.java @@ -34,8 +34,8 @@ import com.ibm.icu.text.RawCollationKey; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.icu.ICUCollationDocValuesField; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import java.io.IOException; diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java index ca29492addcfe..38a7a54005f3f 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuCollationTokenFilterFactory.java @@ -32,11 +32,9 @@ package org.opensearch.index.analysis; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.InvalidPathException; - +import com.ibm.icu.text.Collator; +import com.ibm.icu.text.RuleBasedCollator; +import com.ibm.icu.util.ULocale; import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.TokenStream; import org.opensearch.common.io.Streams; @@ -44,9 +42,10 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import com.ibm.icu.text.Collator; -import com.ibm.icu.text.RuleBasedCollator; -import com.ibm.icu.util.ULocale; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.InvalidPathException; /** * An ICU based collation token filter. There are two ways to configure collation: diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuFoldingTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuFoldingTokenFilterFactory.java index 605d5065edec9..d0c809fba1f86 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuFoldingTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuFoldingTokenFilterFactory.java @@ -33,7 +33,6 @@ package org.opensearch.index.analysis; import com.ibm.icu.text.Normalizer2; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.icu.ICUFoldingFilter; import org.opensearch.common.settings.Settings; diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuNormalizerCharFilterFactory.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuNormalizerCharFilterFactory.java index 754c52ad5dfe6..e6e21232d17f8 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuNormalizerCharFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/analysis/IcuNormalizerCharFilterFactory.java @@ -33,7 +33,6 @@ package org.opensearch.index.analysis; import com.ibm.icu.text.Normalizer2; - import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java index 7725eda7f1cde..bd9c164811093 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -44,12 +44,12 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.analysis.IndexableBinaryStringTools; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; diff --git a/plugins/analysis-icu/src/main/java/org/opensearch/plugin/analysis/icu/AnalysisICUPlugin.java b/plugins/analysis-icu/src/main/java/org/opensearch/plugin/analysis/icu/AnalysisICUPlugin.java index 686ea4fba10f6..7a61d1400c217 100644 --- a/plugins/analysis-icu/src/main/java/org/opensearch/plugin/analysis/icu/AnalysisICUPlugin.java +++ b/plugins/analysis-icu/src/main/java/org/opensearch/plugin/analysis/icu/AnalysisICUPlugin.java @@ -32,8 +32,6 @@ package org.opensearch.plugin.analysis.icu; -import static java.util.Collections.singletonMap; - import org.apache.lucene.analysis.Analyzer; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.analysis.AnalyzerProvider; @@ -60,6 +58,8 @@ import java.util.List; import java.util.Map; +import static java.util.Collections.singletonMap; + public class AnalysisICUPlugin extends Plugin implements AnalysisPlugin, MapperPlugin { @Override public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() { diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuTokenizerFactoryTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuTokenizerFactoryTests.java index 25ff505cddab8..6062aaced9dea 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuTokenizerFactoryTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IcuTokenizerFactoryTests.java @@ -36,8 +36,8 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; import org.opensearch.core.index.Index; +import org.opensearch.env.Environment; import org.opensearch.plugin.analysis.icu.AnalysisICUPlugin; import org.opensearch.test.OpenSearchTestCase; diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IndexableBinaryStringToolsTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IndexableBinaryStringToolsTests.java index 63e21b2f7903b..d11a09069b71c 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IndexableBinaryStringToolsTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/analysis/IndexableBinaryStringToolsTests.java @@ -34,9 +34,10 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.lucene.util.ArrayUtil; + import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TimeUnits; +import org.apache.lucene.util.ArrayUtil; import org.opensearch.test.junit.listeners.ReproduceInfoPrinter; import org.junit.BeforeClass; diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/CollationFieldTypeTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/CollationFieldTypeTests.java index 8dcec874d3771..1e08000117f61 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/CollationFieldTypeTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/CollationFieldTypeTests.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import com.ibm.icu.text.Collator; import com.ibm.icu.text.RawCollationKey; import com.ibm.icu.util.ULocale; diff --git a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java index 37cb73e21b5d4..0a2f48f4215cb 100644 --- a/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/opensearch/index/mapper/ICUCollationKeywordFieldMapperTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugin.analysis.icu.AnalysisICUPlugin; import org.opensearch.plugins.Plugin; @@ -96,7 +95,7 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { public void testDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..3f4d49a78791b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7f57fe7322e6d3a9b4edcc3da0b1ee0791a814ec \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 33c2afacf2395..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b01a791705fa01fce48dd02ea79fa8045de8dd5e \ No newline at end of file diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java new file mode 100644 index 0000000000000..314daab1801a6 --- /dev/null +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionAnalyzerProvider.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.analysis; + +import org.apache.lucene.analysis.ja.JapaneseCompletionAnalyzer; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter; +import org.apache.lucene.analysis.ja.dict.UserDictionary; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; + +public class KuromojiCompletionAnalyzerProvider extends AbstractIndexAnalyzerProvider<JapaneseCompletionAnalyzer> { + + private final JapaneseCompletionAnalyzer analyzer; + + public KuromojiCompletionAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + final JapaneseCompletionFilter.Mode mode = KuromojiCompletionFilterFactory.getMode(settings); + final UserDictionary userDictionary = KuromojiTokenizerFactory.getUserDictionary(env, settings); + analyzer = new JapaneseCompletionAnalyzer(userDictionary, mode); + } + + @Override + public JapaneseCompletionAnalyzer get() { + return this.analyzer; + } + +} diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java new file mode 100644 index 0000000000000..1459c19de46db --- /dev/null +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiCompletionFilterFactory.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.analysis; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter; +import org.apache.lucene.analysis.ja.JapaneseCompletionFilter.Mode; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; + +public class KuromojiCompletionFilterFactory extends AbstractTokenFilterFactory { + private final Mode mode; + + public KuromojiCompletionFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + this.mode = getMode(settings); + } + + public static Mode getMode(Settings settings) { + String modeSetting = settings.get("mode", null); + if (modeSetting != null) { + if ("index".equalsIgnoreCase(modeSetting)) { + return Mode.INDEX; + } else if ("query".equalsIgnoreCase(modeSetting)) { + return Mode.QUERY; + } + } + return Mode.INDEX; + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new JapaneseCompletionFilter(tokenStream, mode); + } +} diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java index 2939711f6f7e1..76b109932e642 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java @@ -38,7 +38,7 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; -import org.apache.lucene.analysis.ja.util.CSVUtil; +import org.apache.lucene.analysis.util.CSVUtil; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java index 76d3df8c2e76c..c429e8e4dd830 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/plugin/analysis/kuromoji/AnalysisKuromojiPlugin.java @@ -38,6 +38,8 @@ import org.opensearch.index.analysis.JapaneseStopTokenFilterFactory; import org.opensearch.index.analysis.KuromojiAnalyzerProvider; import org.opensearch.index.analysis.KuromojiBaseFormFilterFactory; +import org.opensearch.index.analysis.KuromojiCompletionAnalyzerProvider; +import org.opensearch.index.analysis.KuromojiCompletionFilterFactory; import org.opensearch.index.analysis.KuromojiIterationMarkCharFilterFactory; import org.opensearch.index.analysis.KuromojiKatakanaStemmerFactory; import org.opensearch.index.analysis.KuromojiNumberFilterFactory; @@ -70,6 +72,7 @@ public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() { extra.put("kuromoji_stemmer", KuromojiKatakanaStemmerFactory::new); extra.put("ja_stop", JapaneseStopTokenFilterFactory::new); extra.put("kuromoji_number", KuromojiNumberFilterFactory::new); + extra.put("kuromoji_completion", KuromojiCompletionFilterFactory::new); return extra; } @@ -80,6 +83,9 @@ public Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() { @Override public Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> getAnalyzers() { - return singletonMap("kuromoji", KuromojiAnalyzerProvider::new); + Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> extra = new HashMap<>(); + extra.put("kuromoji", KuromojiAnalyzerProvider::new); + extra.put("kuromoji_completion", KuromojiCompletionAnalyzerProvider::new); + return extra; } } diff --git a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java index a76406d4dc925..b6b953f9ba417 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/AnalysisKuromojiFactoryTests.java @@ -59,6 +59,7 @@ protected Map<String, Class<?>> getTokenFilters() { filters.put("japanesereadingform", KuromojiReadingFormFilterFactory.class); filters.put("japanesekatakanastem", KuromojiKatakanaStemmerFactory.class); filters.put("japanesenumber", KuromojiNumberFilterFactory.class); + filters.put("japanesecompletion", KuromojiCompletionFilterFactory.class); return filters; } diff --git a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java index 62386c2bd2e3d..ec18041f451fc 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java @@ -36,13 +36,14 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseAnalyzer; +import org.apache.lucene.analysis.ja.JapaneseCompletionAnalyzer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.env.Environment; import org.opensearch.core.index.Index; +import org.opensearch.env.Environment; import org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin; import org.opensearch.test.OpenSearchTestCase; @@ -53,12 +54,12 @@ import java.nio.file.Files; import java.nio.file.Path; -import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; +import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; public class KuromojiAnalysisTests extends OpenSearchTestCase { public void testDefaultsKuromojiAnalysis() throws IOException { @@ -85,6 +86,15 @@ public void testDefaultsKuromojiAnalysis() throws IOException { filterFactory = analysis.tokenFilter.get("kuromoji_number"); assertThat(filterFactory, instanceOf(KuromojiNumberFilterFactory.class)); + filterFactory = analysis.tokenFilter.get("kuromoji_completion"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + + filterFactory = analysis.tokenFilter.get("kuromoji_completion_index"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + + filterFactory = analysis.tokenFilter.get("kuromoji_completion_query"); + assertThat(filterFactory, instanceOf(KuromojiCompletionFilterFactory.class)); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji"); assertThat(analyzer.analyzer(), instanceOf(JapaneseAnalyzer.class)); @@ -93,6 +103,15 @@ public void testDefaultsKuromojiAnalysis() throws IOException { assertThat(analyzer.analyzer(), instanceOf(CustomAnalyzer.class)); assertThat(analyzer.analyzer().tokenStream(null, new StringReader("")), instanceOf(JapaneseTokenizer.class)); + analyzer = indexAnalyzers.get("kuromoji_completion"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + + analyzer = indexAnalyzers.get("kuromoji_completion_index"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + + analyzer = indexAnalyzers.get("kuromoji_completion_query"); + assertThat(analyzer.analyzer(), instanceOf(JapaneseCompletionAnalyzer.class)); + CharFilterFactory charFilterFactory = analysis.charFilter.get("kuromoji_iteration_mark"); assertThat(charFilterFactory, instanceOf(KuromojiIterationMarkCharFilterFactory.class)); @@ -199,6 +218,32 @@ public void testKatakanaStemFilter() throws IOException { assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens_katakana); } + public void testJapaneseCompletionFilter() throws IOException { + TestAnalysis analysis = createTestAnalysis(); + + String source = "寿司がおいしいね"; + String[] expected_tokens = new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }; + + // mode = INDEX(default) + Tokenizer tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("kuromoji_completion"); + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + + // mode = INDEX + tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + tokenFilter = analysis.tokenFilter.get("kuromoji_completion_index"); + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + + // mode = QUERY + tokenizer = new JapaneseTokenizer(null, true, JapaneseTokenizer.Mode.SEARCH); + tokenizer.setReader(new StringReader(source)); + tokenFilter = analysis.tokenFilter.get("kuromoji_completion_query"); + expected_tokens = new String[] { "寿司", "susi", "sushi", "がおいしいね", "gaoisiine", "gaoishiine" }; + assertSimpleTSOutput(tokenFilter.create(tokenizer), expected_tokens); + } + public void testIterationMarkCharFilter() throws IOException { TestAnalysis analysis = createTestAnalysis(); // test only kanji @@ -414,6 +459,30 @@ public void testDiscardCompoundToken() throws Exception { assertSimpleTSOutput(tokenizer, expected); } + public void testJapaneseCompletionAnalyzer() throws Exception { + TestAnalysis analysis = createTestAnalysis(); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("kuromoji_completion"); + + // mode = INDEX(default) + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }); + } + + // mode = INDEX + analyzer = indexAnalyzers.get("kuromoji_completion_index"); + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "が", "ga", "おいしい", "oisii", "oishii", "ね", "ne" }); + } + + // mode = QUERY + analyzer = indexAnalyzers.get("kuromoji_completion_query"); + try (TokenStream stream = analyzer.tokenStream("", "寿司がおいしいね")) { + assertTokenStreamContents(stream, new String[] { "寿司", "susi", "sushi", "がおいしいね", "gaoisiine", "gaoishiine" }); + } + + } + private TestAnalysis createTestAnalysis(Settings analysisSettings) throws IOException { InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt"); Path home = createTempDir(); diff --git a/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json b/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json index a55947f53e34b..3e952b51e4ece 100644 --- a/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json +++ b/plugins/analysis-kuromoji/src/test/resources/org/opensearch/index/analysis/kuromoji_analysis.json @@ -17,6 +17,14 @@ "ja_stop" : { "type": "ja_stop", "stopwords": ["_japanese_", "スピード"] + }, + "kuromoji_completion_index" : { + "type" : "kuromoji_completion", + "mode" : "index" + }, + "kuromoji_completion_query" : { + "type" : "kuromoji_completion", + "mode" : "query" } }, @@ -70,6 +78,14 @@ "my_analyzer" : { "type" : "custom", "tokenizer" : "kuromoji_tokenizer" + }, + "kuromoji_completion_index" : { + "type" : "kuromoji_completion", + "mode" : "index" + }, + "kuromoji_completion_query" : { + "type" : "kuromoji_completion", + "mode" : "query" } } diff --git a/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml b/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml index 1cca2b728e0aa..3363591ded5ca 100644 --- a/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml +++ b/plugins/analysis-kuromoji/src/yamlRestTest/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml @@ -16,6 +16,24 @@ - match: { tokens.5.token: 飲む } - match: { tokens.6.token: 行く } --- +"Completion Analyzer": + - do: + indices.analyze: + body: + text: 寿司がおいしいね + analyzer: kuromoji_completion + - length: { tokens: 10 } + - match: { tokens.0.token: "寿司" } + - match: { tokens.1.token: "susi" } + - match: { tokens.2.token: "sushi" } + - match: { tokens.3.token: "が" } + - match: { tokens.4.token: "ga" } + - match: { tokens.5.token: "おいしい" } + - match: { tokens.6.token: "oisii" } + - match: { tokens.7.token: "oishii" } + - match: { tokens.8.token: "ね" } + - match: { tokens.9.token: "ne" } +--- "Tokenizer": - do: indices.analyze: @@ -57,3 +75,15 @@ filter: [kuromoji_stemmer] - length: { tokens: 1 } - match: { tokens.0.token: サーバ } +--- +"Completion filter": + - do: + indices.analyze: + body: + text: 寿司 + tokenizer: kuromoji_tokenizer + filter: [kuromoji_completion] + - length: { tokens: 3 } + - match: { tokens.0.token: "寿司" } + - match: { tokens.1.token: "susi" } + - match: { tokens.2.token: "sushi" } diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..1f110011ca9c6 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +9929da235100f8df323cfed165b8111fb2840093 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e7986dafa11e..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43d19320b1b9cd18638b1602fa87d5f21ee043bc \ No newline at end of file diff --git a/plugins/analysis-nori/src/main/java/org/opensearch/index/analysis/NoriAnalyzerProvider.java b/plugins/analysis-nori/src/main/java/org/opensearch/index/analysis/NoriAnalyzerProvider.java index e3b1cef6aee8a..d3c452c1f2e69 100644 --- a/plugins/analysis-nori/src/main/java/org/opensearch/index/analysis/NoriAnalyzerProvider.java +++ b/plugins/analysis-nori/src/main/java/org/opensearch/index/analysis/NoriAnalyzerProvider.java @@ -32,17 +32,18 @@ package org.opensearch.index.analysis; -import java.util.List; -import java.util.Set; import org.apache.lucene.analysis.ko.KoreanAnalyzer; import org.apache.lucene.analysis.ko.KoreanPartOfSpeechStopFilter; import org.apache.lucene.analysis.ko.KoreanTokenizer; -import org.apache.lucene.analysis.ko.dict.UserDictionary; import org.apache.lucene.analysis.ko.POS; +import org.apache.lucene.analysis.ko.dict.UserDictionary; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; +import java.util.List; +import java.util.Set; + import static org.opensearch.index.analysis.NoriPartOfSpeechStopFilterFactory.resolvePOSList; public class NoriAnalyzerProvider extends AbstractIndexAnalyzerProvider<KoreanAnalyzer> { diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8d6bf9fa0fa1b --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8564c86d880c6ce002250002e2fd0936cbfff61d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 14880d9c2d243..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9244dc232f175010b480d4d88e13945c17a0b28b \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java index c80d32228feeb..b875fab7d4006 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java @@ -35,13 +35,13 @@ /** * Geänderter Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + * <p> * Die Kölner Phonetik wurde für den Einsatz in Namensdatenbanken wie * der Verwaltung eines Krankenhauses durch Martin Haase (Institut für * Sprachwissenschaft, Universität zu Köln) und Kai Heitmann (Insitut für * medizinische Statistik, Informatik und Epidemiologie, Köln) überarbeitet. * M. Haase und K. Heitmann. Die Erweiterte Kölner Phonetik. 526, 2000. - * + * <p> * nach: Martin Wilz, Aspekte der Kodierung phonetischer Ähnlichkeiten * in deutschen Eigennamen, Magisterarbeit. * http://www.uni-koeln.de/phil-fak/phonetik/Lehre/MA-Arbeiten/magister_wilz.pdf diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java index 01056b0983936..4bcc10ff73b0a 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java @@ -32,6 +32,9 @@ package org.opensearch.index.analysis.phonetic; +import org.apache.commons.codec.EncoderException; +import org.apache.commons.codec.StringEncoder; + import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -41,18 +44,15 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.codec.EncoderException; -import org.apache.commons.codec.StringEncoder; - /** * Kölner Phonetik - * + * <p> * H.J. Postel, Die Kölner Phonetik. Ein Verfahren zu Identifizierung * von Personennamen auf der Grundlage der Gestaltanalyse. IBM-Nachrichten 19 (1969), 925-931 - * + * <p> * Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + * <p> * mit Änderungen von Jörg Prante * */ diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java index 818dbba85e2de..c3237114c65d5 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java @@ -40,7 +40,7 @@ /** * * Taken from commons-codec trunk (unreleased yet) - * + * <p> * Encodes a string into a NYSIIS value. NYSIIS is an encoding used to relate * similar names, but can also be used as a general purpose scheme to find word * with similar phonemes. diff --git a/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/SimplePhoneticAnalysisTests.java b/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/SimplePhoneticAnalysisTests.java index b8a28e4b6f36d..d14aeae41bd63 100644 --- a/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/SimplePhoneticAnalysisTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/opensearch/index/analysis/SimplePhoneticAnalysisTests.java @@ -32,10 +32,10 @@ package org.opensearch.index.analysis; -import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.phonetic.DaitchMokotoffSoundexFilter; +import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..cbe4aec98fae4 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7e71777cfb5beb4ffd5b03030576d2f062eef13c \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index edc4de3fffe28..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3101a4f79820c1ca3dfb8f49b74c5fb5b32940e1 \ No newline at end of file diff --git a/plugins/analysis-smartcn/src/main/java/org/opensearch/index/analysis/SmartChineseStopTokenFilterFactory.java b/plugins/analysis-smartcn/src/main/java/org/opensearch/index/analysis/SmartChineseStopTokenFilterFactory.java index c7449ce87111d..6aa251ceb65e8 100644 --- a/plugins/analysis-smartcn/src/main/java/org/opensearch/index/analysis/SmartChineseStopTokenFilterFactory.java +++ b/plugins/analysis-smartcn/src/main/java/org/opensearch/index/analysis/SmartChineseStopTokenFilterFactory.java @@ -40,6 +40,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; + import java.util.Map; import java.util.Set; diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..e1c7aecc104d0 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a597265bd6fb0a7e954e948a295d31507dd73cce \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 54c310277b09b..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f12b2a22cd5ebcd84f40a40e78fdd4e268b3b26d \ No newline at end of file diff --git a/plugins/analysis-stempel/src/test/java/org/opensearch/index/analysis/AnalysisPolishFactoryTests.java b/plugins/analysis-stempel/src/test/java/org/opensearch/index/analysis/AnalysisPolishFactoryTests.java index da6699fc1e95e..567f5644acbf8 100644 --- a/plugins/analysis-stempel/src/test/java/org/opensearch/index/analysis/AnalysisPolishFactoryTests.java +++ b/plugins/analysis-stempel/src/test/java/org/opensearch/index/analysis/AnalysisPolishFactoryTests.java @@ -33,9 +33,9 @@ package org.opensearch.index.analysis; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase; import org.apache.lucene.tests.analysis.MockTokenizer; -import org.apache.lucene.analysis.Tokenizer; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..eefa2809f3540 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +c9e534845bb08985d7fa21e2e71a14bc68c46089 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 358db9ea3f0f5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7dbf5cc3dff93cc1ffe45d79b129859590d001dd \ No newline at end of file diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle new file mode 100644 index 0000000000000..65e7daaaacf26 --- /dev/null +++ b/plugins/cache-ehcache/build.gradle @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.opensearch.gradle.Architecture +import org.opensearch.gradle.OS +import org.opensearch.gradle.info.BuildParams + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description 'Ehcache based cache implementation.' + classname 'org.opensearch.cache.EhcacheCachePlugin' +} + +versions << [ + 'ehcache' : '3.10.8' +] + +dependencies { + api "org.ehcache:ehcache:${versions.ehcache}" +} + +thirdPartyAudit { + ignoreViolations( + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$CounterCell', + 'org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin', + 'org.ehcache.impl.internal.concurrent.ThreadLocalRandomUtil', + 'org.ehcache.sizeof.impl.UnsafeSizeOf' + ) + + ignoreMissingClasses( + 'javax.cache.Cache', + 'javax.cache.Cache$Entry', + 'javax.cache.CacheException', + 'javax.cache.CacheManager', + 'javax.cache.configuration.CacheEntryListenerConfiguration', + 'javax.cache.configuration.CompleteConfiguration', + 'javax.cache.configuration.Configuration', + 'javax.cache.configuration.Factory', + 'javax.cache.configuration.OptionalFeature', + 'javax.cache.event.CacheEntryCreatedListener', + 'javax.cache.event.CacheEntryEvent', + 'javax.cache.event.CacheEntryEventFilter', + 'javax.cache.event.CacheEntryExpiredListener', + 'javax.cache.event.CacheEntryListener', + 'javax.cache.event.CacheEntryRemovedListener', + 'javax.cache.event.CacheEntryUpdatedListener', + 'javax.cache.event.EventType', + 'javax.cache.expiry.Duration', + 'javax.cache.expiry.EternalExpiryPolicy', + 'javax.cache.expiry.ExpiryPolicy', + 'javax.cache.integration.CacheLoader', + 'javax.cache.integration.CacheLoaderException', + 'javax.cache.integration.CacheWriter', + 'javax.cache.integration.CacheWriterException', + 'javax.cache.integration.CompletionListener', + 'javax.cache.management.CacheMXBean', + 'javax.cache.management.CacheStatisticsMXBean', + 'javax.cache.processor.EntryProcessor', + 'javax.cache.processor.EntryProcessorResult', + 'javax.cache.processor.MutableEntry', + 'javax.cache.spi.CachingProvider', + 'javax.xml.bind.JAXBContext', + 'javax.xml.bind.JAXBElement', + 'javax.xml.bind.Marshaller', + 'javax.xml.bind.Unmarshaller', + 'javax.xml.bind.annotation.XmlElement', + 'javax.xml.bind.annotation.XmlRootElement', + 'javax.xml.bind.annotation.XmlSchema', + 'javax.xml.bind.annotation.adapters.XmlAdapter', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.ServiceReference', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.Marker', + 'org.slf4j.event.Level' + ) +} + +tasks.named("bundlePlugin").configure { + from('config/cache-ehcache') { + into 'config' + } +} + +test { + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} diff --git a/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 new file mode 100644 index 0000000000000..dee07e9238ebf --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-3.10.8.jar.sha1 @@ -0,0 +1 @@ +f0d50ede46609db78413ca7f4250d348a597b101 \ No newline at end of file diff --git a/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt new file mode 100644 index 0000000000000..1dbd38242cc98 --- /dev/null +++ b/plugins/cache-ehcache/licenses/ehcache-NOTICE.txt @@ -0,0 +1,5 @@ +Ehcache V3 +Copyright 2014-2023 Terracotta, Inc. + +The product includes software from the Apache Commons Lang project, +under the Apache License 2.0 (see: org.ehcache.impl.internal.classes.commonslang) diff --git a/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java new file mode 100644 index 0000000000000..c68455463ee3d --- /dev/null +++ b/plugins/cache-ehcache/src/internalClusterTest/java/org/opensearch/cache/EhcacheDiskCacheIT.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class EhcacheDiskCacheIT extends OpenSearchIntegTestCase { + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(EhcacheCachePlugin.class); + } + + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.cache.EhcacheCachePlugin")) + ); + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java new file mode 100644 index 0000000000000..ceda96e4a7d7d --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheCachePlugin.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Setting; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.CACHE_TYPE_MAP; + +/** + * Ehcache based cache plugin. + */ +public class EhcacheCachePlugin extends Plugin implements CachePlugin { + + private static final String EHCACHE_CACHE_PLUGIN = "EhcachePlugin"; + + /** + * Default constructor to avoid javadoc related failures. + */ + public EhcacheCachePlugin() {} + + @Override + public Map<String, ICache.Factory> getCacheFactoryMap() { + return Map.of(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME, new EhcacheDiskCache.EhcacheDiskCacheFactory()); + } + + @Override + public List<Setting<?>> getSettings() { + List<Setting<?>> settingList = new ArrayList<>(); + for (Map.Entry<CacheType, Map<String, Setting<?>>> entry : CACHE_TYPE_MAP.entrySet()) { + for (Map.Entry<String, Setting<?>> entry1 : entry.getValue().entrySet()) { + settingList.add(entry1.getValue()); + } + } + return settingList; + } + + @Override + public String getName() { + return EHCACHE_CACHE_PLUGIN; + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java new file mode 100644 index 0000000000000..837fd6b268ce6 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -0,0 +1,222 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings related to ehcache disk cache. + */ +public class EhcacheDiskCacheSettings { + + /** + * Ehcache disk write minimum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.min_threads + */ + + public static final Setting.AffixSetting<Integer> DISK_WRITE_MINIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".min_threads", + (key) -> Setting.intSetting(key, 2, 1, 5, NodeScope) + ); + + /** + * Ehcache disk write maximum threads for its pool + * + * Setting pattern: {cache_type}.ehcache_disk.max_threads + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_MAXIMUM_THREADS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_threads", + (key) -> Setting.intSetting(key, 2, 1, 20, NodeScope) + ); + + /** + * Not be to confused with number of disk segments, this is different. Defines + * distinct write queues created for disk store where a group of segments share a write queue. This is + * implemented with ehcache using a partitioned thread pool exectutor By default all segments share a single write + * queue ie write concurrency is 1. Check OffHeapDiskStoreConfiguration and DiskWriteThreadPool. + * + * Default is 1 within ehcache. + * + * + */ + public static final Setting.AffixSetting<Integer> DISK_WRITE_CONCURRENCY_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".concurrency", + (key) -> Setting.intSetting(key, 1, 1, 3, NodeScope) + ); + + /** + * Defines how many segments the disk cache is separated into. Higher number achieves greater concurrency but + * will hold that many file pointers. Default is 16. + * + * Default value is 16 within Ehcache. + */ + public static final Setting.AffixSetting<Integer> DISK_SEGMENTS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".segments", + (key) -> Setting.intSetting(key, 16, 1, 32, NodeScope) + ); + + /** + * Storage path for disk cache. + */ + public static final Setting.AffixSetting<String> DISK_STORAGE_PATH_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".storage.path", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache alias. + */ + public static final Setting.AffixSetting<String> DISK_CACHE_ALIAS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".alias", + (key) -> Setting.simpleString(key, "", NodeScope) + ); + + /** + * Disk cache expire after access setting. + */ + public static final Setting.AffixSetting<TimeValue> DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".expire_after_access", + (key) -> Setting.positiveTimeSetting(key, TimeValue.MAX_VALUE, NodeScope) + ); + + /** + * Disk cache max size setting. + */ + public static final Setting.AffixSetting<Long> DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", + (key) -> Setting.longSetting(key, 1073741824L, NodeScope) + ); + + /** + * Disk cache listener mode setting. + */ + public static final Setting.AffixSetting<Boolean> DISK_CACHE_LISTENER_MODE_SYNC_SETTING = Setting.suffixKeySetting( + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".is_event_listener_sync", + (key) -> Setting.boolSetting(key, false, NodeScope) + ); + + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENT_KEY = "disk_segment"; + /** + * Key for max size. + */ + public static final String DISK_MAX_SIZE_IN_BYTES_KEY = "max_size_in_bytes"; + /** + * Key for expire after access. + */ + public static final String DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY = "disk_cache_expire_after_access_key"; + /** + * Key for cache alias. + */ + public static final String DISK_CACHE_ALIAS_KEY = "disk_cache_alias"; + /** + * Key for disk segment. + */ + public static final String DISK_SEGMENTS_KEY = "disk_segments"; + /** + * Key for disk write concurrency. + */ + public static final String DISK_WRITE_CONCURRENCY_KEY = "disk_write_concurrency"; + /** + * Key for max threads. + */ + public static final String DISK_WRITE_MAXIMUM_THREADS_KEY = "disk_write_max_threads"; + /** + * Key for min threads. + */ + public static final String DISK_WRITE_MIN_THREADS_KEY = "disk_write_min_threads"; + /** + * Key for storage path. + */ + public static final String DISK_STORAGE_PATH_KEY = "disk_storage_path"; + /** + * Key for listener mode + */ + public static final String DISK_LISTENER_MODE_SYNC_KEY = "disk_listener_mode"; + + /** + * Map of key to setting. + */ + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of( + DISK_SEGMENT_KEY, + DISK_SEGMENTS_SETTING, + DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY, + DISK_CACHE_EXPIRE_AFTER_ACCESS_SETTING, + DISK_CACHE_ALIAS_KEY, + DISK_CACHE_ALIAS_SETTING, + DISK_WRITE_CONCURRENCY_KEY, + DISK_WRITE_CONCURRENCY_SETTING, + DISK_WRITE_MAXIMUM_THREADS_KEY, + DISK_WRITE_MAXIMUM_THREADS_SETTING, + DISK_WRITE_MIN_THREADS_KEY, + DISK_WRITE_MINIMUM_THREADS_SETTING, + DISK_STORAGE_PATH_KEY, + DISK_STORAGE_PATH_SETTING, + DISK_MAX_SIZE_IN_BYTES_KEY, + DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING, + DISK_LISTENER_MODE_SYNC_KEY, + DISK_CACHE_LISTENER_MODE_SYNC_SETTING + ); + + /** + * Map to store desired settings for a cache type. + */ + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + /** + * Used to form concrete setting for cache types and return desired map + * @return map of cacheType and associated settings. + */ + private static final Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + /** + * Fetches setting list for a combination of cache type and store name. + * @param cacheType cache type + * @return settings + */ + public static final Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } + + /** + * Default constructor. Added to fix javadocs. + */ + public EhcacheDiskCacheSettings() {} +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java new file mode 100644 index 0000000000000..f9be1c3dbf826 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache plugin */ +package org.opensearch.cache; diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java new file mode 100644 index 0000000000000..ddfd5b838e927 --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -0,0 +1,597 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.io.File; +import java.time.Duration; +import java.util.Iterator; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +import org.ehcache.Cache; +import org.ehcache.CachePersistenceException; +import org.ehcache.PersistentCacheManager; +import org.ehcache.config.builders.CacheConfigurationBuilder; +import org.ehcache.config.builders.CacheEventListenerConfigurationBuilder; +import org.ehcache.config.builders.CacheManagerBuilder; +import org.ehcache.config.builders.PooledExecutionServiceConfigurationBuilder; +import org.ehcache.config.builders.ResourcePoolsBuilder; +import org.ehcache.config.units.MemoryUnit; +import org.ehcache.event.CacheEvent; +import org.ehcache.event.CacheEventListener; +import org.ehcache.event.EventType; +import org.ehcache.expiry.ExpiryPolicy; +import org.ehcache.impl.config.store.disk.OffHeapDiskStoreConfiguration; +import org.ehcache.spi.loaderwriter.CacheLoadingException; +import org.ehcache.spi.loaderwriter.CacheWritingException; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_ALIAS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_SEGMENT_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_CONCURRENCY_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MAXIMUM_THREADS_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_WRITE_MIN_THREADS_KEY; + +/** + * This variant of disk cache uses Ehcache underneath. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + * + */ +@ExperimentalApi +public class EhcacheDiskCache<K, V> implements ICache<K, V> { + + private static final Logger logger = LogManager.getLogger(EhcacheDiskCache.class); + + // Unique id associated with this cache. + private final static String UNIQUE_ID = UUID.randomUUID().toString(); + private final static String THREAD_POOL_ALIAS_PREFIX = "ehcachePool"; + private final static int MINIMUM_MAX_SIZE_IN_BYTES = 1024 * 100; // 100KB + + // A Cache manager can create many caches. + private final PersistentCacheManager cacheManager; + + // Disk cache + private Cache<K, V> cache; + private final long maxWeightInBytes; + private final String storagePath; + private final Class<K> keyType; + private final Class<V> valueType; + private final TimeValue expireAfterAccess; + private final EhCacheEventListener<K, V> ehCacheEventListener; + private final String threadPoolAlias; + private final Settings settings; + private final RemovalListener<K, V> removalListener; + private final CacheType cacheType; + private final String diskCacheAlias; + // TODO: Move count to stats once those changes are ready. + private final CounterMetric entries = new CounterMetric(); + + /** + * Used in computeIfAbsent to synchronize loading of a given key. This is needed as ehcache doesn't provide a + * computeIfAbsent method. + */ + Map<K, CompletableFuture<Tuple<K, V>>> completableFutureMap = new ConcurrentHashMap<>(); + + private EhcacheDiskCache(Builder<K, V> builder) { + this.keyType = Objects.requireNonNull(builder.keyType, "Key type shouldn't be null"); + this.valueType = Objects.requireNonNull(builder.valueType, "Value type shouldn't be null"); + this.expireAfterAccess = Objects.requireNonNull(builder.getExpireAfterAcess(), "ExpireAfterAccess value shouldn't " + "be null"); + this.maxWeightInBytes = builder.getMaxWeightInBytes(); + if (this.maxWeightInBytes <= MINIMUM_MAX_SIZE_IN_BYTES) { + throw new IllegalArgumentException("Ehcache Disk tier cache size should be greater than " + MINIMUM_MAX_SIZE_IN_BYTES); + } + this.cacheType = Objects.requireNonNull(builder.cacheType, "Cache type shouldn't be null"); + if (builder.diskCacheAlias == null || builder.diskCacheAlias.isBlank()) { + this.diskCacheAlias = "ehcacheDiskCache#" + this.cacheType; + } else { + this.diskCacheAlias = builder.diskCacheAlias; + } + this.storagePath = builder.storagePath; + if (this.storagePath == null || this.storagePath.isBlank()) { + throw new IllegalArgumentException("Storage path shouldn't be null or empty"); + } + if (builder.threadPoolAlias == null || builder.threadPoolAlias.isBlank()) { + this.threadPoolAlias = THREAD_POOL_ALIAS_PREFIX + "DiskWrite#" + UNIQUE_ID; + } else { + this.threadPoolAlias = builder.threadPoolAlias; + } + this.settings = Objects.requireNonNull(builder.getSettings(), "Settings objects shouldn't be null"); + this.cacheManager = buildCacheManager(); + Objects.requireNonNull(builder.getRemovalListener(), "Removal listener can't be null"); + this.removalListener = builder.getRemovalListener(); + this.ehCacheEventListener = new EhCacheEventListener<K, V>(builder.getRemovalListener()); + this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); + } + + private Cache<K, V> buildCache(Duration expireAfterAccess, Builder<K, V> builder) { + try { + return this.cacheManager.createCache( + this.diskCacheAlias, + CacheConfigurationBuilder.newCacheConfigurationBuilder( + this.keyType, + this.valueType, + ResourcePoolsBuilder.newResourcePoolsBuilder().disk(maxWeightInBytes, MemoryUnit.B) + ).withExpiry(new ExpiryPolicy<>() { + @Override + public Duration getExpiryForCreation(K key, V value) { + return INFINITE; + } + + @Override + public Duration getExpiryForAccess(K key, Supplier<? extends V> value) { + return expireAfterAccess; + } + + @Override + public Duration getExpiryForUpdate(K key, Supplier<? extends V> oldValue, V newValue) { + return INFINITE; + } + }) + .withService(getListenerConfiguration(builder)) + .withService( + new OffHeapDiskStoreConfiguration( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_CONCURRENCY_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType).get(DISK_SEGMENT_KEY).get(settings) + ) + ) + ); + } catch (IllegalArgumentException ex) { + logger.error("Ehcache disk cache initialization failed due to illegal argument: {}", ex.getMessage()); + throw ex; + } catch (IllegalStateException ex) { + logger.error("Ehcache disk cache initialization failed: {}", ex.getMessage()); + throw ex; + } + } + + private CacheEventListenerConfigurationBuilder getListenerConfiguration(Builder<K, V> builder) { + CacheEventListenerConfigurationBuilder configurationBuilder = CacheEventListenerConfigurationBuilder.newEventListenerConfiguration( + this.ehCacheEventListener, + EventType.EVICTED, + EventType.EXPIRED, + EventType.REMOVED, + EventType.UPDATED, + EventType.CREATED + ).unordered(); + if (builder.isEventListenerModeSync) { + return configurationBuilder.synchronous(); + } else { + return configurationBuilder.asynchronous(); + } + } + + // Package private for testing + Map<K, CompletableFuture<Tuple<K, V>>> getCompletableFutureMap() { + return completableFutureMap; + } + + @SuppressForbidden(reason = "Ehcache uses File.io") + private PersistentCacheManager buildCacheManager() { + // In case we use multiple ehCaches, we can define this cache manager at a global level. + return CacheManagerBuilder.newCacheManagerBuilder() + .with(CacheManagerBuilder.persistence(new File(storagePath))) + + .using( + PooledExecutionServiceConfigurationBuilder.newPooledExecutionServiceConfigurationBuilder() + .defaultPool(THREAD_POOL_ALIAS_PREFIX + "Default#" + UNIQUE_ID, 1, 3) // Default pool used for other tasks + // like event listeners + .pool( + this.threadPoolAlias, + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MIN_THREADS_KEY) + .get(settings), + (Integer) EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType) + .get(DISK_WRITE_MAXIMUM_THREADS_KEY) + .get(settings) + ) + .build() + ) + .build(true); + } + + @Override + public V get(K key) { + if (key == null) { + throw new IllegalArgumentException("Key passed to ehcache disk cache was null."); + } + V value; + try { + value = cache.get(key); + } catch (CacheLoadingException ex) { + throw new OpenSearchException("Exception occurred while trying to fetch item from ehcache disk cache"); + } + return value; + } + + /** + * Puts the item into cache. + * @param key Type of key. + * @param value Type of value. + */ + @Override + public void put(K key, V value) { + try { + cache.put(key, value); + } catch (CacheWritingException ex) { + throw new OpenSearchException("Exception occurred while put item to ehcache disk cache"); + } + } + + /** + * Computes the value using loader in case key is not present, otherwise fetches it. + * @param key Type of key + * @param loader loader to load the value in case key is missing + * @return value + * @throws Exception when either internal get or put calls fail. + */ + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // Ehache doesn't provide any computeIfAbsent function. Exposes putIfAbsent but that works differently and is + // not performant in case there are multiple concurrent request for same key. Below is our own custom + // implementation of computeIfAbsent on top of ehcache. Inspired by OpenSearch Cache implementation. + V value = cache.get(key); + if (value == null) { + value = compute(key, loader); + } + return value; + } + + private V compute(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + // A future that returns a pair of key/value. + CompletableFuture<Tuple<K, V>> completableFuture = new CompletableFuture<>(); + // Only one of the threads will succeed putting a future into map for the same key. + // Rest will fetch existing future. + CompletableFuture<Tuple<K, V>> future = completableFutureMap.putIfAbsent(key, completableFuture); + // Handler to handle results post processing. Takes a tuple<key, value> or exception as an input and returns + // the value. Also before returning value, puts the value in cache. + BiFunction<Tuple<K, V>, Throwable, V> handler = (pair, ex) -> { + V value = null; + if (pair != null) { + cache.put(pair.v1(), pair.v2()); + value = pair.v2(); // Returning a value itself assuming that a next get should return the same. Should + // be safe to assume if we got no exception and reached here. + } + completableFutureMap.remove(key); // Remove key from map as not needed anymore. + return value; + }; + CompletableFuture<V> completableValue; + if (future == null) { + future = completableFuture; + completableValue = future.handle(handler); + V value; + try { + value = loader.load(key); + } catch (Exception ex) { + future.completeExceptionally(ex); + throw new ExecutionException(ex); + } + if (value == null) { + NullPointerException npe = new NullPointerException("loader returned a null value"); + future.completeExceptionally(npe); + throw new ExecutionException(npe); + } else { + future.complete(new Tuple<>(key, value)); + } + + } else { + completableValue = future.handle(handler); + } + V value; + try { + value = completableValue.get(); + if (future.isCompletedExceptionally()) { + future.get(); // call get to force the exception to be thrown for other concurrent callers + throw new IllegalStateException("Future completed exceptionally but no error thrown"); + } + } catch (InterruptedException ex) { + throw new IllegalStateException(ex); + } + return value; + } + + /** + * Invalidate the item. + * @param key key to be invalidated. + */ + @Override + public void invalidate(K key) { + try { + cache.remove(key); + } catch (CacheWritingException ex) { + // Handle + throw new RuntimeException(ex); + } + + } + + @Override + public void invalidateAll() {} + + /** + * Provides a way to iterate over disk cache keys. + * @return Iterable + */ + @Override + public Iterable<K> keys() { + return () -> new EhCacheKeyIterator<>(cache.iterator()); + } + + /** + * Gives the current count of keys in disk cache. + * @return current count of keys + */ + @Override + public long count() { + return entries.count(); + } + + @Override + public void refresh() { + // TODO: ehcache doesn't provide a way to refresh a cache. + } + + @Override + public void close() { + cacheManager.removeCache(this.diskCacheAlias); + cacheManager.close(); + try { + cacheManager.destroyCache(this.diskCacheAlias); + } catch (CachePersistenceException e) { + throw new OpenSearchException("Exception occurred while destroying ehcache and associated data", e); + } + } + + /** + * This iterator wraps ehCache iterator and only iterates over its keys. + * @param <K> Type of key + */ + class EhCacheKeyIterator<K> implements Iterator<K> { + + Iterator<Cache.Entry<K, V>> iterator; + + EhCacheKeyIterator(Iterator<Cache.Entry<K, V>> iterator) { + this.iterator = iterator; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public K next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return iterator.next().getKey(); + } + } + + /** + * Wrapper over Ehcache original listener to listen to desired events and notify desired subscribers. + * @param <K> Type of key + * @param <V> Type of value + */ + class EhCacheEventListener<K, V> implements CacheEventListener<K, V> { + + private final RemovalListener<K, V> removalListener; + + EhCacheEventListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + } + + @Override + public void onEvent(CacheEvent<? extends K, ? extends V> event) { + switch (event.getType()) { + case CREATED: + entries.inc(); + // this.eventListener.onCached(event.getKey(), event.getNewValue(), CacheStoreType.DISK); + assert event.getOldValue() == null; + break; + case EVICTED: + this.removalListener.onRemoval(new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.EVICTED)); + entries.dec(); + assert event.getNewValue() == null; + break; + case REMOVED: + entries.dec(); + this.removalListener.onRemoval(new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.EXPLICIT)); + assert event.getNewValue() == null; + break; + case EXPIRED: + this.removalListener.onRemoval( + new RemovalNotification<>(event.getKey(), event.getOldValue(), RemovalReason.INVALIDATED) + ); + entries.dec(); + assert event.getNewValue() == null; + break; + case UPDATED: + break; + default: + break; + } + } + } + + /** + * Factory to create an ehcache disk cache. + */ + public static class EhcacheDiskCacheFactory implements ICache.Factory { + + /** + * Ehcache disk cache name. + */ + public static final String EHCACHE_DISK_CACHE_NAME = "ehcache_disk"; + + /** + * Default constructor. + */ + public EhcacheDiskCacheFactory() {} + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = EhcacheDiskCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + return new Builder<K, V>().setStoragePath((String) settingList.get(DISK_STORAGE_PATH_KEY).get(settings)) + .setDiskCacheAlias((String) settingList.get(DISK_CACHE_ALIAS_KEY).get(settings)) + .setIsEventListenerModeSync((Boolean) settingList.get(DISK_LISTENER_MODE_SYNC_KEY).get(settings)) + .setCacheType(cacheType) + .setKeyType((config.getKeyType())) + .setValueType(config.getValueType()) + .setRemovalListener(config.getRemovalListener()) + .setExpireAfterAccess((TimeValue) settingList.get(DISK_CACHE_EXPIRE_AFTER_ACCESS_KEY).get(settings)) + .setMaximumWeightInBytes((Long) settingList.get(DISK_MAX_SIZE_IN_BYTES_KEY).get(settings)) + .setSettings(settings) + .build(); + } + + @Override + public String getCacheName() { + return EHCACHE_DISK_CACHE_NAME; + } + } + + /** + * Builder object to build Ehcache disk tier. + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + private CacheType cacheType; + private String storagePath; + + private String threadPoolAlias; + + private String diskCacheAlias; + + // Provides capability to make ehCache event listener to run in sync mode. Used for testing too. + private boolean isEventListenerModeSync; + + private Class<K> keyType; + + private Class<V> valueType; + + /** + * Default constructor. Added to fix javadocs. + */ + public Builder() {} + + /** + * Sets the desired cache type. + * @param cacheType cache type + * @return builder + */ + public Builder<K, V> setCacheType(CacheType cacheType) { + this.cacheType = cacheType; + return this; + } + + /** + * Sets the key type of value. + * @param keyType type of key + * @return builder + */ + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + /** + * Sets the class type of value. + * @param valueType type of value + * @return builder + */ + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + /** + * Desired storage path for disk cache. + * @param storagePath path for disk cache + * @return builder + */ + public Builder<K, V> setStoragePath(String storagePath) { + this.storagePath = storagePath; + return this; + } + + /** + * Thread pool alias for the cache. + * @param threadPoolAlias alias + * @return builder + */ + public Builder<K, V> setThreadPoolAlias(String threadPoolAlias) { + this.threadPoolAlias = threadPoolAlias; + return this; + } + + /** + * Cache alias + * @param diskCacheAlias disk cache alias + * @return builder + */ + public Builder<K, V> setDiskCacheAlias(String diskCacheAlias) { + this.diskCacheAlias = diskCacheAlias; + return this; + } + + /** + * Determines whether event listener is triggered async/sync. + * @param isEventListenerModeSync mode sync + * @return builder + */ + public Builder<K, V> setIsEventListenerModeSync(boolean isEventListenerModeSync) { + this.isEventListenerModeSync = isEventListenerModeSync; + return this; + } + + @Override + public EhcacheDiskCache<K, V> build() { + return new EhcacheDiskCache<>(this); + } + } +} diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java new file mode 100644 index 0000000000000..79f8eec2f3f4c --- /dev/null +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * Base package for disk cache related stuff. + */ +package org.opensearch.cache.store.disk; diff --git a/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..40007eea62dba --- /dev/null +++ b/plugins/cache-ehcache/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + permission java.lang.RuntimePermission "createClassLoader"; +}; + diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java new file mode 100644 index 0000000000000..538a45456ddc3 --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/EhcachePluginTests.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache; + +import org.opensearch.cache.store.disk.EhcacheDiskCache; +import org.opensearch.common.cache.ICache; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +public class EhcachePluginTests extends OpenSearchTestCase { + + private EhcacheCachePlugin ehcacheCachePlugin = new EhcacheCachePlugin(); + + public void testGetCacheStoreTypeMap() { + Map<String, ICache.Factory> factoryMap = ehcacheCachePlugin.getCacheFactoryMap(); + assertNotNull(factoryMap); + assertNotNull(factoryMap.get(EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME)); + } +} diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java new file mode 100644 index 0000000000000..d5f5fbb9293bc --- /dev/null +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -0,0 +1,505 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cache.store.disk; + +import org.opensearch.cache.EhcacheDiskCacheSettings; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Phaser; + +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_LISTENER_MODE_SYNC_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_MAX_SIZE_IN_BYTES_KEY; +import static org.opensearch.cache.EhcacheDiskCacheSettings.DISK_STORAGE_PATH_KEY; +import static org.hamcrest.CoreMatchers.instanceOf; + +public class EhCacheDiskCacheTests extends OpenSearchSingleNodeTestCase { + + private static final int CACHE_SIZE_IN_BYTES = 1024 * 101; + + public void testBasicGetAndPut() throws IOException { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testBasicGetAndPutUsingFactory() throws IOException { + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(Settings.EMPTY)) { + ICache.Factory ehcacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory(); + ICache<String, String> ehcacheTest = ehcacheFactory.create( + new CacheConfig.Builder<String, String>().setValueType(String.class) + .setKeyType(String.class) + .setRemovalListener(removalListener) + .setSettings( + Settings.builder() + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + CACHE_SIZE_IN_BYTES + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/request_cache" + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_LISTENER_MODE_SYNC_KEY) + .getKey(), + true + ) + .build() + ) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of() + ); + int randomKeys = randomIntBetween(10, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + + // Validate misses + int expectedNumberOfMisses = randomIntBetween(10, 200); + for (int i = 0; i < expectedNumberOfMisses; i++) { + ehcacheTest.get(UUID.randomUUID().toString()); + } + + ehcacheTest.close(); + } + } + + public void testConcurrentPut() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + ehcacheTest.put(entry.getKey(), entry.getValue()); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + String value = ehcacheTest.get(entry.getKey()); + assertEquals(entry.getValue(), value); + } + assertEquals(randomKeys, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testEhcacheParallelGets() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) // For accurate count + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + int randomKeys = randomIntBetween(20, 100); + Thread[] threads = new Thread[randomKeys]; + Phaser phaser = new Phaser(randomKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(randomKeys); + Map<String, String> keyValueMap = new HashMap<>(); + int j = 0; + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + assertEquals(keyValueMap.size(), ehcacheTest.count()); + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + threads[j] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + assertEquals(entry.getValue(), ehcacheTest.get(entry.getKey())); + countDownLatch.countDown(); + }); + threads[j].start(); + j++; + } + phaser.arriveAndAwaitAdvance(); // Will trigger parallel puts above. + countDownLatch.await(); // Wait for all threads to finish + ehcacheTest.close(); + } + } + + public void testEhcacheKeyIterator() throws Exception { + Settings settings = Settings.builder().build(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(new MockRemovalListener<>()) + .build(); + + int randomKeys = randomIntBetween(2, 100); + Map<String, String> keyValueMap = new HashMap<>(); + for (int i = 0; i < randomKeys; i++) { + keyValueMap.put(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + for (Map.Entry<String, String> entry : keyValueMap.entrySet()) { + ehcacheTest.put(entry.getKey(), entry.getValue()); + } + Iterator<String> keys = ehcacheTest.keys().iterator(); + int keysCount = 0; + while (keys.hasNext()) { + String key = keys.next(); + keysCount++; + assertNotNull(ehcacheTest.get(key)); + } + assertEquals(keysCount, randomKeys); + ehcacheTest.close(); + } + } + + public void testEvictions() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + // Generate a string with 100 characters + String value = generateRandomString(100); + + // Trying to generate more than 100kb to cause evictions. + for (int i = 0; i < 1000; i++) { + String key = "Key" + i; + ehcacheTest.put(key, value); + } + assertEquals(660, removalListener.evictionMetric.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrently() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = 2;// randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + String value = "dummy"; + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Verify value is only loaded once. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + assertEquals(value, ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + int numberOfTimesValueLoaded = 0; + for (int i = 0; i < numberOfRequest; i++) { + if (loadAwareCacheLoaderList.get(i).isLoaded()) { + numberOfTimesValueLoaded++; + } + } + assertEquals(1, numberOfTimesValueLoaded); + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + assertEquals(1, ehcacheTest.count()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentConcurrentlyAndThrowsException() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setIsEventListenerModeSync(true) + .setThreadPoolAlias("ehcacheTest") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + throw new RuntimeException("Exception"); + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + public void testComputeIfAbsentWithNullValueLoading() throws Exception { + Settings settings = Settings.builder().build(); + MockRemovalListener<String, String> removalListener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(settings)) { + ICache<String, String> ehcacheTest = new EhcacheDiskCache.Builder<String, String>().setDiskCacheAlias("test1") + .setThreadPoolAlias("ehcacheTest") + .setIsEventListenerModeSync(true) + .setStoragePath(env.nodePaths()[0].indicesPath.toString() + "/request_cache") + .setKeyType(String.class) + .setValueType(String.class) + .setCacheType(CacheType.INDICES_REQUEST_CACHE) + .setSettings(settings) + .setExpireAfterAccess(TimeValue.MAX_VALUE) + .setMaximumWeightInBytes(CACHE_SIZE_IN_BYTES) + .setRemovalListener(removalListener) + .build(); + + int numberOfRequest = randomIntBetween(200, 400); + String key = UUID.randomUUID().toString(); + Thread[] threads = new Thread[numberOfRequest]; + Phaser phaser = new Phaser(numberOfRequest + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfRequest); + + List<LoadAwareCacheLoader<String, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + // Try to hit different request with the same key concurrently. Loader throws exception. + for (int i = 0; i < numberOfRequest; i++) { + threads[i] = new Thread(() -> { + LoadAwareCacheLoader<String, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() { + boolean isLoaded; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public String load(String key) throws Exception { + isLoaded = true; + return null; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + try { + ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception ex) { + assertThat(ex.getCause(), instanceOf(NullPointerException.class)); + } + assertThrows(ExecutionException.class, () -> ehcacheTest.computeIfAbsent(key, loadAwareCacheLoader)); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + assertEquals(0, ((EhcacheDiskCache) ehcacheTest).getCompletableFutureMap().size()); + ehcacheTest.close(); + } + } + + private static String generateRandomString(int length) { + String characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + StringBuilder randomString = new StringBuilder(length); + + for (int i = 0; i < length; i++) { + int index = (int) (randomDouble() * characters.length()); + randomString.append(characters.charAt(index)); + } + + return randomString.toString(); + } + + static class MockRemovalListener<K, V> implements RemovalListener<K, V> { + + CounterMetric evictionMetric = new CounterMetric(); + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + evictionMetric.inc(); + } + } +} diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle new file mode 100644 index 0000000000000..c4a8609b6df48 --- /dev/null +++ b/plugins/crypto-kms/build.gradle @@ -0,0 +1,74 @@ +import org.opensearch.gradle.info.BuildParams + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' +apply plugin: 'opensearch.yaml-rest-test' + +opensearchplugin { + description 'AWS KMS plugin to provide crypto keys' + classname 'org.opensearch.crypto.kms.CryptoKmsPlugin' +} + +ext { + // Do not fail on `javadoc` warning (ANTLR generated code) + failOnJavadocWarning = false +} + +dependencies { + api "software.amazon.awssdk:sdk-core:${versions.aws}" + api "software.amazon.awssdk:aws-core:${versions.aws}" + api "software.amazon.awssdk:utils:${versions.aws}" + api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:kms:${versions.aws}" + api "software.amazon.awssdk:http-client-spi:${versions.aws}" + api "software.amazon.awssdk:apache-client:${versions.aws}" + api "software.amazon.awssdk:regions:${versions.aws}" + api "software.amazon.awssdk:profiles:${versions.aws}" + api "software.amazon.awssdk:endpoints-spi:${versions.aws}" + api "software.amazon.awssdk:annotations:${versions.aws}" + api "software.amazon.awssdk:metrics-spi:${versions.aws}" + api "software.amazon.awssdk:json-utils:${versions.aws}" + api "software.amazon.awssdk:protocol-core:${versions.aws}" + api "software.amazon.awssdk:aws-query-protocol:${versions.aws}" + api "software.amazon.awssdk:aws-json-protocol:${versions.aws}" + api "software.amazon.awssdk:third-party-jackson-core:${versions.aws}" + api "org.apache.httpcomponents:httpclient:${versions.httpclient}" + api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + api "org.slf4j:slf4j-api:${versions.slf4j}" + api "commons-codec:commons-codec:${versions.commonscodec}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" +} + +//testClusters.all { +// module ':modules:crypto' +//} + +tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /jaxb-.*/, to: 'jaxb' + mapping from: /netty-.*/, to: 'netty' +} + +bundlePlugin { + from('config/crypto-kms') { + into 'config' + } +} + +thirdPartyAudit.enabled = false +testingConventions.enabled = false diff --git a/plugins/crypto-kms/config/crypto-kms/log4j2.properties b/plugins/crypto-kms/config/crypto-kms/log4j2.properties new file mode 100644 index 0000000000000..285ac4a1d1376 --- /dev/null +++ b/plugins/crypto-kms/config/crypto-kms/log4j2.properties @@ -0,0 +1,22 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# + +logger.com_amazonaws.name = software.amazon.awssdk +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = software.amazon.awssdk.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = software.amazon.awssdk.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = software.amazon.awssdk.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error diff --git a/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-context-LICENSE.txt b/plugins/crypto-kms/licenses/annotations-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/grpc-context-LICENSE.txt rename to plugins/crypto-kms/licenses/annotations-LICENSE.txt diff --git a/plugins/crypto-kms/licenses/annotations-NOTICE.txt b/plugins/crypto-kms/licenses/annotations-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt b/plugins/crypto-kms/licenses/apache-client-LICENSE.txt similarity index 100% rename from plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt rename to plugins/crypto-kms/licenses/apache-client-LICENSE.txt diff --git a/plugins/crypto-kms/licenses/apache-client-NOTICE.txt b/plugins/crypto-kms/licenses/apache-client-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-LICENSE.txt b/plugins/crypto-kms/licenses/auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/auth-NOTICE.txt b/plugins/crypto-kms/licenses/auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-LICENSE.txt b/plugins/crypto-kms/licenses/aws-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-core-NOTICE.txt b/plugins/crypto-kms/licenses/aws-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt b/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt b/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt b/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt b/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt b/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt b/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..56916449bbe10 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 b/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt b/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..57bc88a15a0ee --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt b/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..72eb32a902458 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt b/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt b/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt b/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt b/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 b/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/httpclient-LICENSE.txt b/plugins/crypto-kms/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from +<https://publicsuffix.org/list/effective_tld_names.dat> +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: <http://mozilla.org/MPL/2.0/> + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/plugins/crypto-kms/licenses/httpclient-NOTICE.txt b/plugins/crypto-kms/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..4f6058178b201 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 b/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/httpcore-LICENSE.txt b/plugins/crypto-kms/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..72819a9f06f2a --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/crypto-kms/licenses/httpcore-NOTICE.txt b/plugins/crypto-kms/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..c0be50a505ec1 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/crypto-kms/licenses/jackson-LICENSE b/plugins/crypto-kms/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/crypto-kms/licenses/jackson-NOTICE b/plugins/crypto-kms/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-LICENSE.txt b/plugins/crypto-kms/licenses/json-utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/json-utils-NOTICE.txt b/plugins/crypto-kms/licenses/json-utils-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..32c4e9f432898 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-2.20.86.jar.sha1 @@ -0,0 +1 @@ +6a81c2f14acaa7b9dcdc80c715d6e44d815a818a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-LICENSE.txt b/plugins/crypto-kms/licenses/kms-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/kms-NOTICE.txt b/plugins/crypto-kms/licenses/kms-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-LICENSE.txt b/plugins/crypto-kms/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/log4j-NOTICE.txt b/plugins/crypto-kms/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt b/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt b/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-LICENSE.txt b/plugins/crypto-kms/licenses/profiles-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/profiles-NOTICE.txt b/plugins/crypto-kms/licenses/profiles-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt b/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt b/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-LICENSE.txt b/plugins/crypto-kms/licenses/regions-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/regions-NOTICE.txt b/plugins/crypto-kms/licenses/regions-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt b/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt b/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt b/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..2be7689435062 --- /dev/null +++ b/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2022 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt b/plugins/crypto-kms/licenses/slf4j-api-NOTICE.txt similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-NOTICE.txt rename to plugins/crypto-kms/licenses/slf4j-api-NOTICE.txt diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt b/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt b/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-LICENSE.txt b/plugins/crypto-kms/licenses/utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/utils-NOTICE.txt b/plugins/crypto-kms/licenses/utils-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java new file mode 100644 index 0000000000000..8cd07f681f9cd --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.services.kms.KmsClient; + +import org.opensearch.common.concurrent.RefCountedReleasable; + +/** + * Handles the shutdown of the wrapped {@link KmsClient} using reference + * counting. + */ +public class AmazonKmsClientReference extends RefCountedReleasable<KmsClient> { + + AmazonKmsClientReference(KmsClient client) { + super("AWS_KMS_CLIENT", client, client::close); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java new file mode 100644 index 0000000000000..87a10f1408b8d --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ContainerCredentialsProvider; +import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; + +import java.util.function.Supplier; + +/** + * Creates credential providers based on the provided configuration. + */ +public class CredentialProviderFactory { + + /** + * Credential provider for EC2/ECS container + */ + static class PrivilegedInstanceProfileCredentialsProvider implements AwsCredentialsProvider { + private final AwsCredentialsProvider credentials; + + private PrivilegedInstanceProfileCredentialsProvider() { + this.credentials = initializeProvider(); + } + + private AwsCredentialsProvider initializeProvider() { + if (SdkSystemSetting.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI.getStringValue().isPresent() + || SdkSystemSetting.AWS_CONTAINER_CREDENTIALS_FULL_URI.getStringValue().isPresent()) { + + return ContainerCredentialsProvider.builder().asyncCredentialUpdateEnabled(true).build(); + } + // InstanceProfileCredentialsProvider as last item of chain + return InstanceProfileCredentialsProvider.builder().asyncCredentialUpdateEnabled(true).build(); + } + + @Override + public AwsCredentials resolveCredentials() { + return SocketAccess.doPrivileged(credentials::resolveCredentials); + } + } + + /** + * Creates a credential provider based on the provided configuration. + * @param staticCredsSupplier Static credentials are used in case supplier returns a non-null instance. + * @return Credential provider instance. + */ + public AwsCredentialsProvider createAwsCredentialsProvider(Supplier<AwsCredentials> staticCredsSupplier) { + AwsCredentials awsCredentials = staticCredsSupplier.get(); + if (awsCredentials != null) { + return StaticCredentialsProvider.create(awsCredentials); + } + + // Add other credential providers here + return new PrivilegedInstanceProfileCredentialsProvider(); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java new file mode 100644 index 0000000000000..f2ea9e37a0c09 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import org.opensearch.SpecialPermission; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.ReloadablePlugin; + +import java.util.Arrays; +import java.util.List; + +/** + * AWS KMS based crypto key provider plugin. + */ +public class CryptoKmsPlugin extends Plugin implements CryptoKeyProviderPlugin, ReloadablePlugin { + private static final String PROVIDER_NAME = "aws-kms"; + + static { + SpecialPermission.check(); + } + + private final Settings settings; + // protected for testing + protected final KmsService kmsService; + + public CryptoKmsPlugin(Settings settings) { + this(settings, new KmsService()); + } + + protected CryptoKmsPlugin(Settings settings, KmsService kmsService) { + this.settings = settings; + this.kmsService = kmsService; + // eagerly load client settings when secure settings are accessible + reload(settings); + } + + @Override + public MasterKeyProvider createKeyProvider(CryptoMetadata cryptoMetadata) { + return kmsService.createMasterKeyProvider(cryptoMetadata); + } + + @Override + public String type() { + return PROVIDER_NAME; + } + + @Override + public List<Setting<?>> getSettings() { + return Arrays.asList( + KmsClientSettings.ACCESS_KEY_SETTING, + KmsClientSettings.SECRET_KEY_SETTING, + KmsClientSettings.SESSION_TOKEN_SETTING, + KmsClientSettings.ENDPOINT_SETTING, + KmsClientSettings.REGION_SETTING, + KmsClientSettings.PROXY_HOST_SETTING, + KmsClientSettings.PROXY_PORT_SETTING, + KmsClientSettings.PROXY_USERNAME_SETTING, + KmsClientSettings.PROXY_PASSWORD_SETTING, + KmsClientSettings.READ_TIMEOUT_SETTING, + KmsService.ENC_CTX_SETTING, + KmsService.KEY_ARN_SETTING + ); + } + + @Override + public void reload(Settings settings) { + // secure settings should be readable + final KmsClientSettings clientSettings = KmsClientSettings.getClientSettings(settings); + kmsService.refreshAndClearCache(clientSettings); + } + + @Override + public void close() { + kmsService.close(); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java new file mode 100644 index 0000000000000..187a80a6355f7 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java @@ -0,0 +1,258 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.SecureSetting; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.settings.SecureString; + +import java.util.Locale; +import java.util.Objects; + +/** + * A container for settings used to create an kms client. + */ +public class KmsClientSettings { + + /** The access key (ie login id) for connecting to kms. */ + static final Setting<SecureString> ACCESS_KEY_SETTING = SecureSetting.secureString("kms.access_key", null); + + /** The secret key (ie password) for connecting to kms. */ + static final Setting<SecureString> SECRET_KEY_SETTING = SecureSetting.secureString("kms.secret_key", null); + + /** The session token for connecting to kms. */ + static final Setting<SecureString> SESSION_TOKEN_SETTING = SecureSetting.secureString("kms.session_token", null); + + /** The host name of a proxy to connect to kms through. */ + static final Setting<String> PROXY_HOST_SETTING = Setting.simpleString("kms.proxy.host", Property.NodeScope); + + /** The port of a proxy to connect to kms through. */ + static final Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("kms.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + + /** An override for the kms endpoint to connect to. */ + static final Setting<String> ENDPOINT_SETTING = new Setting<>("kms.endpoint", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); + + /** An override for the scoping region for authentication. */ + static final Setting<String> REGION_SETTING = new Setting<>("kms.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); + + /** The username of a proxy to connect to kms through. */ + static final Setting<SecureString> PROXY_USERNAME_SETTING = SecureSetting.secureString("kms.proxy.username", null); + + /** The password of a proxy to connect to kms through. */ + static final Setting<SecureString> PROXY_PASSWORD_SETTING = SecureSetting.secureString("kms.proxy.password", null); + + /** The socket timeout for connecting to kms. */ + static final Setting<TimeValue> READ_TIMEOUT_SETTING = Setting.timeSetting( + "kms.read_timeout", + TimeValue.timeValueMillis(50_000), + Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(KmsClientSettings.class); + + /** Credentials to authenticate with kms. */ + final AwsCredentials credentials; + + /** + * The kms endpoint the client should talk to, or empty string to use the + * default. + */ + final String endpoint; + + /** + * The kms signing region. + */ + final String region; + + /** An optional proxy host that requests to kms should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the kms client only takes String, so + // storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the kms client. */ + final int readTimeoutMillis; + + protected KmsClientSettings( + AwsCredentials credentials, + String endpoint, + String region, + String proxyHost, + int proxyPort, + String proxyUsername, + String proxyPassword, + int readTimeoutMillis + ) { + this.credentials = credentials; + this.endpoint = endpoint; + this.region = region; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + static AwsCredentials loadCredentials(Settings settings) { + try ( + SecureString key = ACCESS_KEY_SETTING.get(settings); + SecureString secret = SECRET_KEY_SETTING.get(settings); + SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings) + ) { + if (key.length() == 0 && secret.length() == 0) { + if (sessionToken.length() > 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] and [{}] are not", + SESSION_TOKEN_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + + logger.debug("Using either environment variables, system properties or instance profile credentials"); + return null; + } else { + if (key.length() == 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] is not", + SECRET_KEY_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + if (secret.length() == 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] is not", + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + + final AwsCredentials credentials; + if (sessionToken.length() == 0) { + logger.debug("Using basic key/secret credentials"); + credentials = AwsBasicCredentials.create(key.toString(), secret.toString()); + } else { + logger.debug("Using basic session credentials"); + credentials = AwsSessionCredentials.create(key.toString(), secret.toString(), sessionToken.toString()); + } + return credentials; + } + } + } + + /** Parse settings for a single client. */ + static KmsClientSettings getClientSettings(Settings settings) { + final AwsCredentials credentials = loadCredentials(settings); + try ( + SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); + SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings) + ) { + return new KmsClientSettings( + credentials, + ENDPOINT_SETTING.get(settings), + REGION_SETTING.get(settings), + PROXY_HOST_SETTING.get(settings), + PROXY_PORT_SETTING.get(settings), + proxyUsername.toString(), + proxyPassword.toString(), + (int) READ_TIMEOUT_SETTING.get(settings).millis() + ); + } + } + + KmsClientSettings getMetadataSettings(Settings settings) { + AwsCredentials newCredentials = loadCredentials(settings); + newCredentials = newCredentials == null ? this.credentials : newCredentials; + final Settings normalizedSettings = Settings.builder().put(settings).normalizePrefix("kms.").build(); + + String newProxyUsername = this.proxyUsername, newProxyPassword = this.proxyPassword; + if (PROXY_USERNAME_SETTING.exists(normalizedSettings)) { + try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings)) { + newProxyUsername = proxyUsername.toString(); + } + } + if (PROXY_PASSWORD_SETTING.exists(normalizedSettings)) { + try (SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { + newProxyPassword = proxyPassword.toString(); + } + } + + String newEndpoint = getCryptoMetadataSettingOrExisting(ENDPOINT_SETTING, normalizedSettings, this.endpoint); + String newRegion = getCryptoMetadataSettingOrExisting(REGION_SETTING, normalizedSettings, this.region); + String newProxyHost = getCryptoMetadataSettingOrExisting(PROXY_HOST_SETTING, normalizedSettings, this.proxyHost); + int newProxyPort = getCryptoMetadataSettingOrExisting(PROXY_PORT_SETTING, normalizedSettings, this.proxyPort); + TimeValue newReadTimeout = getCryptoMetadataSettingOrExisting( + READ_TIMEOUT_SETTING, + normalizedSettings, + TimeValue.timeValueMillis(this.readTimeoutMillis) + ); + + return new KmsClientSettings( + newCredentials, + newEndpoint, + newRegion, + newProxyHost, + newProxyPort, + newProxyUsername, + newProxyPassword, + (int) newReadTimeout.millis() + ); + } + + private static <T> T getCryptoMetadataSettingOrExisting(Setting<T> setting, Settings normalizedSettings, T defaultValue) { + if (setting.exists(normalizedSettings)) { + return setting.get(normalizedSettings); + } + return defaultValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final KmsClientSettings that = (KmsClientSettings) o; + return readTimeoutMillis == that.readTimeoutMillis + && Objects.equals(credentials, that.credentials) + && Objects.equals(endpoint, that.endpoint) + && Objects.equals(region, that.region) + && Objects.equals(proxyHost, that.proxyHost) + && Objects.equals(proxyPort, that.proxyPort) + && Objects.equals(proxyUsername, that.proxyUsername) + && Objects.equals(proxyPassword, that.proxyPassword); + } + + @Override + public int hashCode() { + return Objects.hash(readTimeoutMillis, credentials, endpoint, region, proxyHost, proxyPort, proxyUsername, proxyPassword); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java new file mode 100644 index 0000000000000..19e000628a9c8 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.kms.model.DataKeySpec; +import software.amazon.awssdk.services.kms.model.DecryptRequest; +import software.amazon.awssdk.services.kms.model.DecryptResponse; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyRequest; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyResponse; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.crypto.DataKeyPair; +import org.opensearch.common.crypto.MasterKeyProvider; + +import java.util.Map; +import java.util.function.Supplier; + +public class KmsMasterKeyProvider implements MasterKeyProvider { + private final Map<String, String> encryptionContext; + private final String keyArn; + private final Supplier<AmazonKmsClientReference> clientReferenceSupplier; + + private static final Logger logger = LogManager.getLogger(KmsMasterKeyProvider.class); + + public KmsMasterKeyProvider( + Map<String, String> encryptionContext, + String keyArn, + Supplier<AmazonKmsClientReference> clientReferenceSupplier + ) { + this.encryptionContext = encryptionContext; + this.keyArn = keyArn; + this.clientReferenceSupplier = clientReferenceSupplier; + } + + @Override + public DataKeyPair generateDataPair() { + logger.info("Generating new data key pair"); + try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { + GenerateDataKeyRequest request = GenerateDataKeyRequest.builder() + .encryptionContext(encryptionContext) + // Currently only 32 byte data key is supported. To add support for other key sizes add key providers + // in org.opensearch.encryption.CryptoManagerFactory.createCryptoProvider. + .keySpec(DataKeySpec.AES_256) + .keyId(keyArn) + .build(); + GenerateDataKeyResponse dataKeyPair = SocketAccess.doPrivileged(() -> clientReference.get().generateDataKey(request)); + return new DataKeyPair(dataKeyPair.plaintext().asByteArray(), dataKeyPair.ciphertextBlob().asByteArray()); + } + } + + @Override + public byte[] decryptKey(byte[] encryptedKey) { + try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { + DecryptRequest decryptRequest = DecryptRequest.builder() + .ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)) + .encryptionContext(encryptionContext) + .build(); + DecryptResponse decryptResponse = SocketAccess.doPrivileged(() -> clientReference.get().decrypt(decryptRequest)); + return decryptResponse.plaintext().asByteArray(); + } + } + + @Override + public String getKeyId() { + return keyArn; + } + + @Override + public Map<String, String> getEncryptionContext() { + return encryptionContext; + } + + @Override + public void close() {} +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java new file mode 100644 index 0000000000000..108c88bd3bf80 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.kms.KmsClientBuilder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; + +import java.io.Closeable; +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; + +/** + * Service class which exposes APIs for communication with AWS KMS. + */ +public class KmsService implements Closeable { + + private static final Logger logger = LogManager.getLogger(KmsService.class); + private final CredentialProviderFactory credentialProviderFactory; + + static final Setting<String> ENC_CTX_SETTING = Setting.simpleString("kms.encryption_context", Setting.Property.NodeScope); + + static final Setting<String> KEY_ARN_SETTING = Setting.simpleString("kms.key_arn", Setting.Property.NodeScope); + + private volatile Map<KmsClientSettings, AmazonKmsClientReference> clientsCache = emptyMap(); + + /** + * Client settings calculated from static configuration and settings in the keystore. + */ + private volatile KmsClientSettings staticClientSettings; + + /** + * Client settings derived from those in {@link #staticClientSettings} by combining them with crypto settings + */ + private volatile Map<Settings, KmsClientSettings> derivedClientSettings; + + public KmsService() { + credentialProviderFactory = new CredentialProviderFactory(); + } + + private KmsClient buildClient(KmsClientSettings clientSettings) { + SocketAccess.doPrivilegedVoid(KmsService::setDefaultAwsProfilePath); + final AwsCredentialsProvider awsCredentialsProvider = buildCredentials(clientSettings); + final ClientOverrideConfiguration overrideConfiguration = buildOverrideConfiguration(); + final ProxyConfiguration proxyConfiguration = SocketAccess.doPrivileged(() -> buildProxyConfiguration(clientSettings)); + return buildClient( + awsCredentialsProvider, + proxyConfiguration, + overrideConfiguration, + clientSettings.endpoint, + clientSettings.region, + clientSettings.readTimeoutMillis + ); + } + + // proxy for testing + protected KmsClient buildClient( + AwsCredentialsProvider awsCredentialsProvider, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration overrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + ApacheHttpClient.Builder clientBuilder = ApacheHttpClient.builder() + .proxyConfiguration(proxyConfiguration) + .socketTimeout(Duration.ofMillis(readTimeoutMillis)); + + KmsClientBuilder builder = KmsClient.builder() + .region(Region.of(region)) + .overrideConfiguration(overrideConfiguration) + .httpClientBuilder(clientBuilder) + .credentialsProvider(awsCredentialsProvider); + + if (Strings.hasText(endpoint)) { + logger.debug("using explicit kms endpoint [{}]", endpoint); + builder.endpointOverride(URI.create(endpoint)); + } + + if (Strings.hasText(region)) { + logger.debug("using explicit kms region [{}]", region); + builder.region(Region.of(region)); + } + + return SocketAccess.doPrivileged(builder::build); + } + + ProxyConfiguration buildProxyConfiguration(KmsClientSettings clientSettings) { + if (Strings.hasText(clientSettings.proxyHost)) { + try { + return ProxyConfiguration.builder() + .endpoint(new URI("https", null, clientSettings.proxyHost, clientSettings.proxyPort, null, null, null)) + .username(clientSettings.proxyUsername) + .password(clientSettings.proxyPassword) + .build(); + } catch (URISyntaxException e) { + throw SdkException.create("Invalid proxy URL", e); + } + } else { + return ProxyConfiguration.builder().build(); + } + } + + ClientOverrideConfiguration buildOverrideConfiguration() { + return ClientOverrideConfiguration.builder().retryPolicy(buildRetryPolicy()).build(); + } + + // pkg private for tests + RetryPolicy buildRetryPolicy() { + // Increase the number of retries in case of 5xx API responses. + // Note that AWS SDK v2 introduced a concept of TokenBucketRetryCondition, which effectively limits retries for + // APIs that have been failing continuously. It allocates tokens (default is 500), which means that once 500 + // retries fail for any API on a bucket, new retries will only be allowed once some retries are rejected. + // https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/core/retry/conditions/TokenBucketRetryCondition.html + RetryPolicy.Builder retryPolicy = RetryPolicy.builder().numRetries(10); + return retryPolicy.build(); + } + + AwsCredentialsProvider buildCredentials(KmsClientSettings clientSettings) { + final AwsCredentials credentials = clientSettings.credentials; + return credentialProviderFactory.createAwsCredentialsProvider(() -> credentials); + } + + public AmazonKmsClientReference client(CryptoMetadata cryptoMetadata) { + final KmsClientSettings clientSettings = settings(cryptoMetadata); + { + final AmazonKmsClientReference clientReference = clientsCache.get(clientSettings); + if (clientReference != null && clientReference.tryIncRef()) { + return clientReference; + } + } + synchronized (this) { + final AmazonKmsClientReference existing = clientsCache.get(clientSettings); + if (existing != null && existing.tryIncRef()) { + return existing; + } + final AmazonKmsClientReference clientReference = new AmazonKmsClientReference( + SocketAccess.doPrivileged(() -> buildClient(clientSettings)) + ); + clientReference.incRef(); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); + return clientReference; + } + } + + /** + * Either fetches {@link KmsClientSettings} for a given {@link CryptoMetadata} from cached settings or creates them + * by overriding static client settings from {@link #staticClientSettings} with settings found in the crypto metadata. + * @param cryptoMetadata Crypto Metadata + * @return KmsClientSettings + */ + KmsClientSettings settings(CryptoMetadata cryptoMetadata) { + final Settings settings = cryptoMetadata.settings(); + { + final KmsClientSettings existing = derivedClientSettings.get(settings); + if (existing != null) { + return existing; + } + } + synchronized (this) { + final KmsClientSettings existing = derivedClientSettings.get(settings); + if (existing != null) { + return existing; + } + final KmsClientSettings newSettings = staticClientSettings.getMetadataSettings(settings); + derivedClientSettings = MapBuilder.newMapBuilder(derivedClientSettings).put(settings, newSettings).immutableMap(); + return newSettings; + } + } + + /** + * Refreshes the settings for the AmazonKMS client. The new client will be build + * using these new settings. The old client is usable until released. On release it + * will be destroyed instead of being returned to the cache. + */ + public void refreshAndClearCache(KmsClientSettings clientSettings) { + // shutdown all unused clients + // others will shutdown on their respective release + releaseCachedClients(); + this.staticClientSettings = clientSettings; + derivedClientSettings = emptyMap(); + } + + private synchronized void releaseCachedClients() { + // the clients will shutdown when they will not be used anymore + for (final AmazonKmsClientReference clientReference : clientsCache.values()) { + clientReference.decRef(); + } + + // clear previously cached clients, they will be build lazily + clientsCache = emptyMap(); + derivedClientSettings = emptyMap(); + } + + @Override + public void close() { + releaseCachedClients(); + } + + // By default, AWS v2 SDK loads a default profile from $USER_HOME, which is restricted. Use the OpenSearch configuration path instead. + @SuppressForbidden(reason = "Prevent AWS SDK v2 from using ~/.aws/config and ~/.aws/credentials.") + static void setDefaultAwsProfilePath() { + if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) { + logger.info("setting aws.sharedCredentialsFile={}", System.getProperty("opensearch.path.conf")); + System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf")); + } + if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) { + logger.info("setting aws.sharedCredentialsFile={}", System.getProperty("opensearch.path.conf")); + System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf")); + } + } + + public MasterKeyProvider createMasterKeyProvider(CryptoMetadata cryptoMetadata) { + Settings cryptoSettings = Settings.builder().put(cryptoMetadata.settings()).normalizePrefix("kms.").build(); + String keyArn = KEY_ARN_SETTING.get(cryptoSettings); + if (!Strings.hasText(keyArn)) { + throw new IllegalArgumentException("Missing key_arn setting"); + } + + String kmsEncCtx = ENC_CTX_SETTING.get(cryptoSettings); + Map<String, String> encCtx; + if (Strings.hasText(kmsEncCtx)) { + try { + encCtx = Arrays.stream(kmsEncCtx.split(",")) + .map(s -> s.split("=")) + .collect(Collectors.toMap(e -> e[0].trim(), e -> e[1].trim())); + } catch (Exception ex) { + throw new IllegalArgumentException(ENC_CTX_SETTING.getKey() + " Format should be: Name1=Value1, Name2=Value2"); + } + } else { + encCtx = new HashMap<>(); + } + + // Verify client creation is successful to early detect any failure. + try (AmazonKmsClientReference clientReference = client(cryptoMetadata)) { + clientReference.get(); + } + + return new KmsMasterKeyProvider(encCtx, keyArn, () -> client(cryptoMetadata)); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java new file mode 100644 index 0000000000000..f3d0f278c7ce7 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import org.opensearch.SpecialPermission; + +import java.net.SocketPermission; +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This plugin uses aws libraries to connect to Aws services. For these remote calls the plugin needs + * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + */ +@SuppressWarnings("removal") +public final class SocketAccess { + + private SocketAccess() {} + + public static <T> T doPrivileged(PrivilegedAction<T> operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction<Void>) () -> { + action.run(); + return null; + }); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java new file mode 100644 index 0000000000000..787adc32d8941 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Crypto plugin to for encryption and decryption use cases. + */ +package org.opensearch.crypto.kms; diff --git a/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy b/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..46fb79010f9ef --- /dev/null +++ b/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // needed because of problems in ClientConfiguration + // TODO: get these fixed in aws sdk + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "getClassLoader"; + permission java.lang.RuntimePermission "setContextClassLoader"; + // Needed because of problems in kms client + // When no region is set on a kms client instance, the + // AWS SDK loads all known partitions from a JSON file and + // uses a Jackson's ObjectMapper for that: this one, in + // version 2.5.3 with the default binding options, tries + // to suppress access checks of ctor/field/method and thus + // requires this special permission. AWS must be fixed to + // uses Jackson correctly and have the correct modifiers + // on binded classes. + // TODO: get these fixed in aws sdk + // See https://github.com/aws/aws-sdk-java/issues/766 + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + permission java.lang.RuntimePermission "getClassLoader"; + + // kms client opens socket connections for to kms + permission java.net.SocketPermission "*", "connect,resolve"; + + // kms client set Authenticator for proxy username/password + permission java.net.NetPermission "setDefaultAuthenticator"; + + permission java.util.PropertyPermission "aws.sharedCredentialsFile", "read,write"; + permission java.util.PropertyPermission "aws.configFile", "read,write"; + permission java.util.PropertyPermission "aws.region", "read,write"; + permission java.util.PropertyPermission "aws.accessKeyId", "read,write"; + permission java.util.PropertyPermission "aws.secretAccessKey", "read,write"; + permission java.util.PropertyPermission "opensearch.path.conf", "read,write"; + + permission java.io.FilePermission "config", "read"; +}; diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java new file mode 100644 index 0000000000000..3fe49f9d3b523 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; + +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.PathUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.file.Path; + +public abstract class AbstractAwsTestCase extends OpenSearchTestCase { + @Override + public void setUp() throws Exception { + super.setUp(); + setUpAwsProfile(); + } + + @Override + public void tearDown() throws Exception { + resetAwsProfile(); + super.tearDown(); + } + + private Path configPath() { + return PathUtils.get("config"); + } + + private String previousOpenSearchPathConf; + private String awsRegion; + private String awsAccessKeyId; + private String awsSecretAccessKey; + private String awsSharedCredentialsFile; + private String awsConfigFile; + + @SuppressForbidden(reason = "set predictable aws defaults") + private void setUpAwsProfile() throws Exception { + previousOpenSearchPathConf = SocketAccess.doPrivileged(() -> System.setProperty("opensearch.path.conf", configPath().toString())); + awsRegion = SocketAccess.doPrivileged(() -> System.setProperty("aws.region", "us-west-2")); + awsAccessKeyId = SocketAccess.doPrivileged(() -> System.setProperty("aws.accessKeyId", "aws-access-key-id")); + awsSecretAccessKey = SocketAccess.doPrivileged(() -> System.setProperty("aws.secretAccessKey", "aws-secret-access-key")); + awsSharedCredentialsFile = System.getProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property()); + awsConfigFile = System.getProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property()); + SocketAccess.doPrivilegedVoid(KmsService::setDefaultAwsProfilePath); + } + + @SuppressForbidden(reason = "reset aws settings") + private void resetAwsProfile() throws Exception { + resetPropertyValue("opensearch.path.conf", previousOpenSearchPathConf); + resetPropertyValue("aws.region", awsRegion); + resetPropertyValue("aws.accessKeyId", awsAccessKeyId); + resetPropertyValue("aws.secretAccessKey", awsSecretAccessKey); + resetPropertyValue(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), awsSharedCredentialsFile); + resetPropertyValue(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), awsConfigFile); + } + + @SuppressForbidden(reason = "reset aws settings") + private void resetPropertyValue(String key, String value) { + if (value != null) { + SocketAccess.doPrivileged(() -> System.setProperty(key, value)); + } else { + SocketAccess.doPrivileged(() -> System.clearProperty(key)); + } + } +} diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java new file mode 100644 index 0000000000000..842d85faaa677 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.regions.Region; + +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +public class CryptoKmsClientSettingsTests extends AbstractAwsTestCase { + + public void testNondefaultClientCreatedBySettingItsSettings() { + final KmsClientSettings settings = KmsClientSettings.getClientSettings( + Settings.builder().put("kms.endpoint", "custom_endpoint").build() + ); + + assertEquals(settings.endpoint, "custom_endpoint"); + // Check if defaults are still present + assertNotNull(settings.proxyHost); + } + + public void testRejectionOfLoneAccessKey() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.access_key", "aws_secret"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.access_key] is set but [kms.secret_key] is not")); + } + } + + public void testRejectionOfLoneSecretKey() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.secret_key", "aws_key"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.secret_key] is set but [kms.access_key] is not")); + } + } + + public void testRejectionOfLoneSessionToken() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.session_token", "aws_session_token"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.session_token] is set but [kms.access_key] and [kms.secret_key] are not")); + } + } + + public void testDefaultEndpoint() { + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(Settings.EMPTY); + assertEquals(baseSettings.endpoint, ""); + } + + public void testDefaultRegion() { + final Settings settings = Settings.builder().build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.region, ""); + } + + public void testSpecificRegion() { + final Settings settings = Settings.builder().put(KmsClientSettings.REGION_SETTING.getKey(), "us-west-2").build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.region, Region.US_WEST_2.toString()); + } + + public void testSpecificEndpoint() { + final Settings settings = Settings.builder().put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms.endpoint").build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.endpoint, "kms.endpoint"); + } + + public void testOverrideWithPrefixedMetadataSettings() { + overrideWithMetadataSettings("kms."); + } + + public void testOverrideWithNoPrefixMetadataSettings() { + overrideWithMetadataSettings(""); + } + + public void overrideWithMetadataSettings(String prefix) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + String accessKey = "access_key", secretKey = "secret_key", sessionToken = "session_token"; + secureSettings.setString("kms.access_key", accessKey); + secureSettings.setString("kms.secret_key", secretKey); + secureSettings.setString("kms.session_token", sessionToken); + final KmsClientSettings baseSettings = KmsClientSettings.getClientSettings( + Settings.builder().setSecureSettings(secureSettings).build() + ); + + { + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings(Settings.EMPTY); + assertEquals(refinedSettings, baseSettings); + } + + { + final String endpoint = "some.host"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "endpoint", endpoint).build() + ); + assertEquals(refinedSettings.endpoint, endpoint); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + String region = "eu-west-1"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "region", region).build() + ); + assertEquals(refinedSettings.region, region); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + String proxyHost = "proxy-host"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "proxy.host", proxyHost).build() + ); + assertEquals(refinedSettings.proxyHost, proxyHost); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + int proxyPort = 70; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "proxy.port", proxyPort).build() + ); + assertEquals(refinedSettings.proxyPort, proxyPort); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + TimeValue readTimeout = TimeValue.timeValueMillis(5000); + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "read_timeout", readTimeout).build() + ); + assertEquals(refinedSettings.readTimeoutMillis, readTimeout.getMillis()); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + } + + private void validateCredsAreStillSame(KmsClientSettings refinedSettings, String accessKey, String secretKey, String sessionToken) { + AwsSessionCredentials credentials = (AwsSessionCredentials) refinedSettings.credentials; + assertEquals(credentials.accessKeyId(), accessKey); + assertEquals(credentials.secretAccessKey(), secretKey); + assertEquals(credentials.sessionToken(), sessionToken); + } +} diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java new file mode 100644 index 0000000000000..1424cce473592 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.services.kms.KmsClient; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; + +public class KmsServiceTests extends AbstractAwsTestCase { + private final CryptoMetadata cryptoMetadata = new CryptoMetadata("kp1", "kp2", Settings.EMPTY); + + public void testAWSDefaultConfiguration() { + try (KmsService kmsService = new KmsService()) { + // proxy configuration + final ProxyConfiguration proxyConfiguration = kmsService.buildProxyConfiguration( + KmsClientSettings.getClientSettings(Settings.EMPTY) + ); + + assertNull(proxyConfiguration.scheme()); + assertNull(proxyConfiguration.host()); + assertEquals(proxyConfiguration.port(), 0); + assertNull(proxyConfiguration.username()); + assertNull(proxyConfiguration.password()); + + // retry policy + RetryPolicy retryPolicyConfiguration = SocketAccess.doPrivileged(kmsService::buildRetryPolicy); + + assertEquals(retryPolicyConfiguration.numRetries().intValue(), 10); + + ClientOverrideConfiguration clientOverrideConfiguration = SocketAccess.doPrivileged(kmsService::buildOverrideConfiguration); + assertTrue(clientOverrideConfiguration.retryPolicy().isPresent()); + assertEquals(clientOverrideConfiguration.retryPolicy().get().numRetries().intValue(), 10); + } + } + + public void testAWSConfigurationWithAwsSettings() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("kms.proxy.username", "aws_proxy_username"); + secureSettings.setString("kms.proxy.password", "aws_proxy_password"); + + final Settings settings = Settings.builder() + // NOTE: a host cannot contain the _ character when parsed by URI, hence aws-proxy-host and not aws_proxy_host + .put("kms.proxy.host", "aws-proxy-host") + .put("kms.proxy.port", 8080) + .put("kms.read_timeout", "10s") + .setSecureSettings(secureSettings) + .build(); + + try (KmsService kmsService = new KmsService()) { + // proxy configuration + final ProxyConfiguration proxyConfiguration = SocketAccess.doPrivileged( + () -> kmsService.buildProxyConfiguration(KmsClientSettings.getClientSettings(settings)) + ); + + assertEquals(proxyConfiguration.host(), "aws-proxy-host"); + assertEquals(proxyConfiguration.port(), 8080); + assertEquals(proxyConfiguration.username(), "aws_proxy_username"); + assertEquals(proxyConfiguration.password(), "aws_proxy_password"); + + // retry policy + RetryPolicy retryPolicyConfiguration = SocketAccess.doPrivileged(kmsService::buildRetryPolicy); + assertEquals(retryPolicyConfiguration.numRetries().intValue(), 10); + + ClientOverrideConfiguration clientOverrideConfiguration = SocketAccess.doPrivileged(kmsService::buildOverrideConfiguration); + assertTrue(clientOverrideConfiguration.retryPolicy().isPresent()); + assertEquals(clientOverrideConfiguration.retryPolicy().get().numRetries().intValue(), 10); + } + } + + public void testClientSettingsReInit() { + final MockSecureSettings mockSecure1 = new MockSecureSettings(); + mockSecure1.setString(KmsClientSettings.ACCESS_KEY_SETTING.getKey(), "kms_access_1"); + mockSecure1.setString(KmsClientSettings.SECRET_KEY_SETTING.getKey(), "kms_secret_1"); + final boolean mockSecure1HasSessionToken = randomBoolean(); + if (mockSecure1HasSessionToken) { + mockSecure1.setString(KmsClientSettings.SESSION_TOKEN_SETTING.getKey(), "kms_session_token_1"); + } + mockSecure1.setString(KmsClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); + mockSecure1.setString(KmsClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); + final Settings settings1 = Settings.builder() + .put(KmsClientSettings.PROXY_HOST_SETTING.getKey(), "proxy-host-1") + .put(KmsClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(KmsClientSettings.REGION_SETTING.getKey(), "kms_region") + .put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms_endpoint_1") + .setSecureSettings(mockSecure1) + .build(); + final MockSecureSettings mockSecure2 = new MockSecureSettings(); + mockSecure2.setString(KmsClientSettings.ACCESS_KEY_SETTING.getKey(), "kms_access_2"); + mockSecure2.setString(KmsClientSettings.SECRET_KEY_SETTING.getKey(), "kms_secret_2"); + final boolean mockSecure2HasSessionToken = randomBoolean(); + if (mockSecure2HasSessionToken) { + mockSecure2.setString(KmsClientSettings.SESSION_TOKEN_SETTING.getKey(), "kms_session_token_2"); + } + mockSecure2.setString(KmsClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); + mockSecure2.setString(KmsClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); + final Settings settings2 = Settings.builder() + .put(KmsClientSettings.PROXY_HOST_SETTING.getKey(), "proxy-host-2") + .put(KmsClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(KmsClientSettings.REGION_SETTING.getKey(), "kms_region") + .put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms_endpoint_2") + .setSecureSettings(mockSecure2) + .build(); + try (CryptoKmsPluginMockTest plugin = new CryptoKmsPluginMockTest(settings1)) { + try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { + { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_1"); + + final AwsCredentials credentials = mockKmsClientTest.credentials.resolveCredentials(); + assertEquals(credentials.accessKeyId(), "kms_access_1"); + assertEquals(credentials.secretAccessKey(), "kms_secret_1"); + if (mockSecure1HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_1"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + } + // reload secure settings2 + plugin.reload(settings2); + // client is not released, it is still using the old settings + { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_1"); + + final AwsCredentials credentials = ((MockKmsClientTest) clientReference.get()).credentials.resolveCredentials(); + if (mockSecure1HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_1"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + } + } + try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_2"); + + final AwsCredentials credentials = ((MockKmsClientTest) clientReference.get()).credentials.resolveCredentials(); + assertEquals(credentials.accessKeyId(), "kms_access_2"); + assertEquals(credentials.secretAccessKey(), "kms_secret_2"); + if (mockSecure2HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_2"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-2"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 882); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_2"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_2"); + } + } + } + + static class CryptoKmsPluginMockTest extends CryptoKmsPlugin { + + CryptoKmsPluginMockTest(Settings settings) { + super(settings, new KmsService() { + @Override + protected KmsClient buildClient( + AwsCredentialsProvider credentials, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration overrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + return new MockKmsClientTest( + credentials, + proxyConfiguration, + overrideConfiguration, + endpoint, + region, + readTimeoutMillis + ); + } + }); + } + } + + static class MockKmsClientTest implements KmsClient { + + String endpoint; + final String region; + final AwsCredentialsProvider credentials; + final ClientOverrideConfiguration clientOverrideConfiguration; + final ProxyConfiguration proxyConfiguration; + final long readTimeoutMillis; + + MockKmsClientTest( + AwsCredentialsProvider credentials, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration clientOverrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + this.credentials = credentials; + this.proxyConfiguration = proxyConfiguration; + this.clientOverrideConfiguration = clientOverrideConfiguration; + this.endpoint = endpoint; + this.region = region; + this.readTimeoutMillis = readTimeoutMillis; + } + + @Override + public String serviceName() { + return "kms"; + } + + @Override + public void close() { + // ignore + } + } +} diff --git a/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java b/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..87e6691e40855 --- /dev/null +++ b/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.kms; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; +import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; + +public class CloudAwsClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { + + public CloudAwsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable<Object[]> parameters() throws Exception { + return OpenSearchClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml b/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml new file mode 100644 index 0000000000000..3d6c3056a6975 --- /dev/null +++ b/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml @@ -0,0 +1,16 @@ +# Integration tests for KMS component +# +"KMS loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.plugins: { name: crypto-kms } } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 1fed446016647..c3d70e9c64968 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -53,7 +53,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" - api "commons-io:commons-io:2.13.0" + api "commons-io:commons-io:2.15.1" api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt +++ b/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt +++ b/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index f95f358532bac..9c4b577df3e5f 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -39,7 +39,6 @@ import com.microsoft.windowsazure.management.compute.models.RoleInstance; import com.microsoft.windowsazure.management.compute.models.RoleInstancePowerState; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; - import org.opensearch.cloud.azure.classic.management.AzureComputeService; import org.opensearch.cloud.azure.classic.management.AzureComputeService.Discovery; import org.opensearch.cloud.azure.classic.management.AzureComputeService.Management; @@ -47,13 +46,12 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.azure.classic.AzureSeedHostsProvider; import org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.TransportService; - import org.junit.After; import java.util.ArrayList; @@ -63,7 +61,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import static org.opensearch.common.util.CollectionUtils.newSingletonArrayList; +import static org.opensearch.core.common.util.CollectionUtils.newSingletonArrayList; import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; public abstract class AbstractAzureComputeServiceTestCase extends OpenSearchIntegTestCase { diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index 2615460c53602..a4b733ec7d894 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -32,17 +32,18 @@ package org.opensearch.discovery.azure.classic; -import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; -import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; + +import com.microsoft.windowsazure.management.compute.models.DeploymentSlot; +import com.microsoft.windowsazure.management.compute.models.DeploymentStatus; import org.apache.logging.log4j.LogManager; import org.opensearch.cloud.azure.classic.management.AzureComputeService; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; import org.opensearch.node.Node; @@ -62,6 +63,7 @@ import javax.xml.stream.XMLOutputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamWriter; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -294,6 +296,7 @@ private static SSLContext getSSLContext() throws Exception { * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK */ + @SuppressWarnings("removal") private static String getProtocol() { if (Runtime.version().compareTo(Version.parse("12")) < 0) { return "TLSv1.2"; diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 9dbf08a3e1a01..6e21feca7f5fb 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -32,12 +32,6 @@ package org.opensearch.cloud.azure.classic.management; -import java.io.IOException; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.ServiceLoader; - import com.microsoft.windowsazure.Configuration; import com.microsoft.windowsazure.core.Builder; import com.microsoft.windowsazure.core.DefaultBuilder; @@ -51,11 +45,17 @@ import org.opensearch.OpenSearchException; import org.opensearch.SpecialPermission; import org.opensearch.cloud.azure.classic.AzureServiceRemoteException; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ServiceLoader; + public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { private static final Logger logger = LogManager.getLogger(AzureComputeServiceImpl.class); @@ -112,6 +112,7 @@ private static String getRequiredSetting(Settings settings, Setting<String> sett return value; } + @SuppressWarnings("removal") @Override public HostedServiceGetDetailedResponse getServiceDetails() { SpecialPermission.check(); diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java index e2bc180876a17..9d5958fa2d40d 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java @@ -37,7 +37,6 @@ import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.compute.models.InstanceEndpoint; import com.microsoft.windowsazure.management.compute.models.RoleInstance; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cloud.azure.classic.AzureServiceDisableException; @@ -48,9 +47,9 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.transport.TransportService; diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 deleted file mode 100644 index 5a626eeb5725b..0000000000000 --- a/plugins/discovery-ec2/licenses/annotations-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/discovery-ec2/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 deleted file mode 100644 index 3ee96bb6e4076..0000000000000 --- a/plugins/discovery-ec2/licenses/apache-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/discovery-ec2/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 deleted file mode 100644 index 010464bdf9fd1..0000000000000 --- a/plugins/discovery-ec2/licenses/auth-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/discovery-ec2/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 deleted file mode 100644 index 4b4ee1db864a8..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 deleted file mode 100644 index 45a88305c1928..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 deleted file mode 100644 index ba5f43378730c..0000000000000 --- a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 deleted file mode 100644 index f123343bfe27e..0000000000000 --- a/plugins/discovery-ec2/licenses/ec2-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c095e527442835130b18387da6b1d01f365a6dbf \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..18c43cfc7516d --- /dev/null +++ b/plugins/discovery-ec2/licenses/ec2-2.20.86.jar.sha1 @@ -0,0 +1 @@ +3522a0829622a9c80152e6e2528bb79166f0b709 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 deleted file mode 100644 index 5bc0e31166c77..0000000000000 --- a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/discovery-ec2/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 deleted file mode 100644 index 523cf43dcb2e9..0000000000000 --- a/plugins/discovery-ec2/licenses/http-client-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/discovery-ec2/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 deleted file mode 100644 index a19b00e62f8b5..0000000000000 --- a/plugins/discovery-ec2/licenses/json-utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/discovery-ec2/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 deleted file mode 100644 index db6701d87892a..0000000000000 --- a/plugins/discovery-ec2/licenses/metrics-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/discovery-ec2/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 deleted file mode 100644 index b7104cf0939e6..0000000000000 --- a/plugins/discovery-ec2/licenses/profiles-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/discovery-ec2/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 deleted file mode 100644 index 4dee45f4d9dd3..0000000000000 --- a/plugins/discovery-ec2/licenses/protocol-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/discovery-ec2/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 deleted file mode 100644 index 993fc2f97de62..0000000000000 --- a/plugins/discovery-ec2/licenses/regions-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/discovery-ec2/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 deleted file mode 100644 index 5f12be9c08c5b..0000000000000 --- a/plugins/discovery-ec2/licenses/sdk-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/discovery-ec2/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 deleted file mode 100644 index e7eebbb98f1fe..0000000000000 --- a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/discovery-ec2/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 deleted file mode 100644 index fc4cde604e33c..0000000000000 --- a/plugins/discovery-ec2/licenses/utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/discovery-ec2/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java index a0f4984751a7b..6e86148f208bd 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java index 6aba3c8fa9241..8dc9db69b674f 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/yamlRestTest/java/org/opensearch/discovery/ec2/AmazonEC2Fixture.java @@ -32,12 +32,12 @@ package org.opensearch.discovery.ec2; import software.amazon.awssdk.utils.DateUtils; + import org.apache.http.NameValuePair; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.utils.URLEncodedUtils; - import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.rest.RestStatus; @@ -46,6 +46,7 @@ import javax.xml.XMLConstants; import javax.xml.stream.XMLOutputFactory; import javax.xml.stream.XMLStreamWriter; + import java.io.IOException; import java.io.StringWriter; import java.nio.file.Files; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AmazonEc2ClientReference.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AmazonEc2ClientReference.java index 7d1004122c13b..1c5ffbfb38ec3 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AmazonEc2ClientReference.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AmazonEc2ClientReference.java @@ -32,9 +32,10 @@ package org.opensearch.discovery.ec2; -import org.opensearch.common.concurrent.RefCountedReleasable; import software.amazon.awssdk.services.ec2.Ec2Client; +import org.opensearch.common.concurrent.RefCountedReleasable; + /** * Handles the shutdown of the wrapped {@link Ec2Client} using reference * counting. diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java index 4afdff7d2c272..fb46b82065fd1 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java @@ -35,11 +35,10 @@ import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.ec2.model.DescribeInstancesRequest; import software.amazon.awssdk.services.ec2.model.DescribeInstancesResponse; -import software.amazon.awssdk.services.ec2.model.Instance; -import software.amazon.awssdk.services.ec2.model.Reservation; import software.amazon.awssdk.services.ec2.model.Filter; - import software.amazon.awssdk.services.ec2.model.GroupIdentifier; +import software.amazon.awssdk.services.ec2.model.Instance; +import software.amazon.awssdk.services.ec2.model.Reservation; import software.amazon.awssdk.services.ec2.model.Tag; import org.apache.logging.log4j.LogManager; @@ -47,9 +46,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.SingleObjectCache; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.transport.TransportService; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java index 2a142b841b713..a2e920761b655 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -32,30 +32,31 @@ package org.opensearch.discovery.ec2; -import java.net.URI; -import java.net.URISyntaxException; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.OpenSearchException; -import org.opensearch.common.util.LazyInitializable; -import org.opensearch.core.common.Strings; -import org.opensearch.common.SuppressForbidden; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.apache.ApacheHttpClient; import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.ec2.Ec2Client; import software.amazon.awssdk.services.ec2.Ec2ClientBuilder; -import software.amazon.awssdk.core.retry.RetryPolicy; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.util.LazyInitializable; +import org.opensearch.core.common.Strings; + +import java.net.URI; +import java.net.URISyntaxException; import java.time.Duration; import java.util.concurrent.atomic.AtomicReference; -import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; -import software.amazon.awssdk.regions.Region; class AwsEc2ServiceImpl implements AwsEc2Service { private static final Logger logger = LogManager.getLogger(AwsEc2ServiceImpl.class); @@ -98,7 +99,7 @@ protected Ec2Client buildClient( if (Strings.hasText(endpoint)) { logger.debug("using explicit ec2 endpoint [{}]", endpoint); - builder.endpointOverride(URI.create(endpoint)); + builder.endpointOverride(URI.create(getFullEndpoint(endpoint))); } if (Strings.hasText(region)) { @@ -109,6 +110,19 @@ protected Ec2Client buildClient( return SocketAccess.doPrivileged(builder::build); } + protected String getFullEndpoint(String endpoint) { + if (!Strings.hasText(endpoint)) { + return null; + } + if (endpoint.startsWith("http://") || endpoint.startsWith("https://")) { + return endpoint; + } + + // if no scheme is provided, default to https + logger.debug("no scheme found in endpoint [{}], defaulting to https", endpoint); + return "https://" + endpoint; + } + static ProxyConfiguration buildProxyConfiguration(Logger logger, Ec2ClientSettings clientSettings) { if (Strings.hasText(clientSettings.proxyHost)) { try { diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java index 14d39cf762bed..8c010bbcdec3a 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2ClientSettings.java @@ -32,20 +32,21 @@ package org.opensearch.discovery.ec2; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.core.Protocol; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.core.Protocol; +import org.opensearch.core.common.settings.SecureString; import java.util.Locale; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java index e1979d9af8352..eb02e99582f93 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -32,6 +32,8 @@ package org.opensearch.discovery.ec2; +import software.amazon.awssdk.core.SdkSystemSetting; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.SpecialPermission; @@ -45,7 +47,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.transport.TransportService; -import software.amazon.awssdk.core.SdkSystemSetting; import java.io.BufferedReader; import java.io.IOException; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java index 3f7a8e1e03353..7efaf41bc3133 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/Ec2NameResolver.java @@ -32,12 +32,13 @@ package org.opensearch.discovery.ec2; +import software.amazon.awssdk.core.SdkSystemSetting; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.network.NetworkService.CustomNameResolver; import org.opensearch.common.util.io.IOUtils; -import software.amazon.awssdk.core.SdkSystemSetting; import java.io.BufferedReader; import java.io.IOException; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java index c6605002c4462..0125ae4d19c3e 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2DiscoveryTestCase.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2DiscoveryTestCase.java index b7f8d3a119ff7..5250f8d88855e 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2DiscoveryTestCase.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2DiscoveryTestCase.java @@ -8,11 +8,13 @@ package org.opensearch.discovery.ec2; -import org.opensearch.test.OpenSearchTestCase; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.test.OpenSearchTestCase; + import java.nio.file.Path; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; public abstract class AbstractEc2DiscoveryTestCase extends OpenSearchTestCase { @Override diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2MockAPITestCase.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2MockAPITestCase.java index d7ac70199e11d..595c51bdb1f47 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2MockAPITestCase.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AbstractEc2MockAPITestCase.java @@ -31,9 +31,11 @@ package org.opensearch.discovery.ec2; +import com.sun.net.httpserver.HttpServer; + import software.amazon.awssdk.services.ec2.model.Instance; import software.amazon.awssdk.services.ec2.model.Tag; -import com.sun.net.httpserver.HttpServer; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.network.NetworkService; diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java index fb6273dd9a223..3164abe456515 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -35,14 +35,15 @@ import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import org.opensearch.common.settings.MockSecureSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; import software.amazon.awssdk.core.Protocol; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.retry.RetryPolicy; import software.amazon.awssdk.http.apache.ProxyConfiguration; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; + import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -201,4 +202,32 @@ public void testAWSConfigurationWithAwsSettings() { assertTrue(clientOverrideConfiguration.retryPolicy().isPresent()); assertThat(clientOverrideConfiguration.retryPolicy().get().numRetries(), is(10)); } + + public void testGetFullEndpointWithScheme() { + final Settings settings = Settings.builder().put("discovery.ec2.endpoint", "http://ec2.us-west-2.amazonaws.com").build(); + Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings); + + AwsEc2ServiceImpl awsEc2ServiceImpl = new AwsEc2ServiceImpl(); + + String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint); + assertEquals("http://ec2.us-west-2.amazonaws.com", endpoint); + + assertEquals("http://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("http://httpserver.example.com")); + + assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("https://httpserver.example.com")); + } + + public void testGetFullEndpointWithoutScheme() { + final Settings settings = Settings.builder().put("discovery.ec2.endpoint", "ec2.us-west-2.amazonaws.com").build(); + Ec2ClientSettings clientSettings = Ec2ClientSettings.getClientSettings(settings); + + AwsEc2ServiceImpl awsEc2ServiceImpl = new AwsEc2ServiceImpl(); + + String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint); + assertEquals("https://ec2.us-west-2.amazonaws.com", endpoint); + + assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("httpserver.example.com")); + + assertNull(awsEc2ServiceImpl.getFullEndpoint("")); + } } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 35df2bdbfdfb8..bde508a0afe96 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -32,14 +32,15 @@ package org.opensearch.discovery.ec2; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.http.apache.ProxyConfiguration; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.ec2.Ec2Client; + import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index 861926a9e67c9..02e1ff40f7ed6 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -36,6 +36,7 @@ import software.amazon.awssdk.services.ec2.model.InstanceState; import software.amazon.awssdk.services.ec2.model.InstanceStateName; import software.amazon.awssdk.services.ec2.model.Tag; + import org.apache.http.HttpStatus; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; @@ -44,9 +45,10 @@ import org.opensearch.common.io.Streams; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -90,7 +92,8 @@ protected MockTransportService createTransportService() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, writableRegistry(), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public TransportAddress[] addressesFromString(String address) { @@ -98,7 +101,14 @@ public TransportAddress[] addressesFromString(String address) { return new TransportAddress[] { poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress()) }; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + return new MockTransportService( + Settings.EMPTY, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null, + NoopTracer.INSTANCE + ); } protected List<TransportAddress> buildDynamicHosts(Settings nodeSettings, int nodes) { diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java index 47846cada48f6..9518fac442111 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2NetworkTests.java @@ -39,7 +39,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.rest.RestStatus; - import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -90,6 +89,7 @@ public static void startHttp() throws Exception { httpServer.start(); } + @SuppressWarnings("removal") @Before public void setup() { // redirect EC2 metadata service to httpServer @@ -117,6 +117,7 @@ public void testNetworkHostEc2() throws IOException { /** * Test for network.host: _ec2_ */ + @SuppressWarnings("removal") public void testNetworkHostUnableToResolveEc2() { // redirect EC2 metadata service to unknown location AccessController.doPrivileged( diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index cc65e92b1485a..ce097667f9c4b 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -33,19 +33,21 @@ package org.opensearch.discovery.ec2; import software.amazon.awssdk.services.ec2.model.Instance; + import org.apache.http.HttpStatus; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; import org.opensearch.Version; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.Streams; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; @@ -75,11 +77,13 @@ protected MockTransportService createTransportService() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index c8b52d3afcd45..85efcc43fd65a 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.34.1" + api "com.google.oauth-client:google-oauth-client:1.35.0" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt b/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt +++ b/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt b/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt +++ b/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 deleted file mode 100644 index a8434bd380761..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 new file mode 100644 index 0000000000000..a52e79088c7ca --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.35.0.jar.sha1 @@ -0,0 +1 @@ +2f52003156e40ba8be5f349a2716a77428896e69 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEDiscoveryClientYamlTestSuiteIT.java b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEDiscoveryClientYamlTestSuiteIT.java index 8b05c700a9650..adb7c46665560 100644 --- a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEDiscoveryClientYamlTestSuiteIT.java +++ b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEDiscoveryClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java index f588316fdc32f..f5a231ef869f5 100644 --- a/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java +++ b/plugins/discovery-gce/qa/gce/src/yamlRestTest/java/org/opensearch/cloud/gce/GCEFixture.java @@ -32,8 +32,6 @@ package org.opensearch.cloud.gce; import org.apache.http.client.methods.HttpGet; - -import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.path.PathTrie; @@ -139,13 +137,12 @@ private PathTrie<RequestHandler> defaultHandlers() { handlers.insert( nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/instance/service-accounts/default/token"), request -> jsonValue.apply( - Strings.toString( - jsonBuilder().startObject() - .field("access_token", TOKEN) - .field("expires_in", TimeUnit.HOURS.toSeconds(1)) - .field("token_type", TOKEN_TYPE) - .endObject() - ) + jsonBuilder().startObject() + .field("access_token", TOKEN) + .field("expires_in", TimeUnit.HOURS.toSeconds(1)) + .field("token_type", TOKEN_TYPE) + .endObject() + .toString() ) ); @@ -179,9 +176,7 @@ private PathTrie<RequestHandler> defaultHandlers() { ); } - final String json = Strings.toString( - jsonBuilder().startObject().field("id", "test-instances").field("items", items).endObject() - ); + final String json = jsonBuilder().startObject().field("id", "test-instances").field("items", items).endObject().toString(); final byte[] responseAsBytes = json.getBytes(StandardCharsets.UTF_8); final Map<String, String> headers = new HashMap<>(JSON_CONTENT_TYPE); @@ -213,29 +208,28 @@ protected Response handle(final Request request) throws IOException { } private static Response newError(final RestStatus status, final String code, final String message) throws IOException { - final String response = Strings.toString( - jsonBuilder().startObject() - .field( - "error", - MapBuilder.<String, Object>newMapBuilder() - .put( - "errors", - Collections.singletonList( - MapBuilder.<String, Object>newMapBuilder() - .put("domain", "global") - .put("reason", "required") - .put("message", message) - .put("locationType", "header") - .put("location", code) - .immutableMap() - ) + final String response = jsonBuilder().startObject() + .field( + "error", + MapBuilder.<String, Object>newMapBuilder() + .put( + "errors", + Collections.singletonList( + MapBuilder.<String, Object>newMapBuilder() + .put("domain", "global") + .put("reason", "required") + .put("message", message) + .put("locationType", "header") + .put("location", code) + .immutableMap() ) - .put("code", status.getStatus()) - .put("message", message) - .immutableMap() - ) - .endObject() - ); + ) + .put("code", status.getStatus()) + .put("message", message) + .immutableMap() + ) + .endObject() + .toString(); return new Response(status.getStatus(), JSON_CONTENT_TYPE, response.getBytes(UTF_8)); } diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java index 4f83c39dc4ef0..e97a4650ca8ae 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/opensearch/discovery/gce/GceDiscoverTests.java @@ -35,7 +35,6 @@ import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.NetworkInterface; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; - import org.opensearch.cloud.gce.GceInstancesService; import org.opensearch.cloud.gce.util.Access; import org.opensearch.cluster.node.DiscoveryNode; @@ -44,7 +43,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.TransportService; - import org.junit.After; import java.io.IOException; diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java index f25faaf415140..46cc1c8eab537 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceInstancesServiceImpl.java @@ -46,7 +46,6 @@ import com.google.api.services.compute.Compute; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.InstanceList; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java index 4873cb6dcbf7a..ef73f741ad20c 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/GceMetadataService.java @@ -32,12 +32,6 @@ package org.opensearch.cloud.gce; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.GeneralSecurityException; -import java.util.function.Function; - import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; import com.google.api.client.http.GenericUrl; import com.google.api.client.http.HttpHeaders; @@ -46,10 +40,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cloud.gce.util.Access; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.GeneralSecurityException; +import java.util.function.Function; + public class GceMetadataService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(GceMetadataService.class); diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java index 1401f7ca26ce6..c46bfedbd8507 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/cloud/gce/util/Access.java @@ -48,6 +48,7 @@ * {@code connect}. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class Access { private Access() {} diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java index dfd60f52730a6..5958c07e244ad 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java @@ -35,7 +35,6 @@ import com.google.api.services.compute.model.AccessConfig; import com.google.api.services.compute.model.Instance; import com.google.api.services.compute.model.NetworkInterface; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -46,9 +45,9 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.transport.TransportService; diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java index 26b8215bed7ff..6e5372cad0a4b 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/RetryHttpInitializerWrapper.java @@ -32,8 +32,6 @@ package org.opensearch.discovery.gce; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import com.google.api.client.auth.oauth2.Credential; import com.google.api.client.googleapis.testing.auth.oauth2.MockGoogleCredential; import com.google.api.client.http.HttpBackOffIOExceptionHandler; @@ -44,6 +42,8 @@ import com.google.api.client.http.HttpUnsuccessfulResponseHandler; import com.google.api.client.util.ExponentialBackOff; import com.google.api.client.util.Sleeper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.cloud.gce.util.Access; import org.opensearch.common.unit.TimeValue; diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index 2ca1234bb8a04..2208c78bef67a 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -37,7 +37,8 @@ import org.opensearch.cloud.gce.GceMetadataService; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -65,7 +66,6 @@ * compute/v1/projects/[project-id]/zones/[zone] * * By default, project-id is the test method name, lowercase and missing the "test" prefix. - * * For example, if you create a test `myNewAwesomeTest` with following settings: * * Settings nodeSettings = Settings.builder() @@ -74,7 +74,6 @@ * .build(); * * You need to create a file under `src/test/resources/org/opensearch/discovery/gce/` named: - * * compute/v1/projects/mynewawesometest/zones/europe-west1-b/instances.json * */ @@ -109,7 +108,7 @@ public void setProjectName() { @Before public void createTransportService() { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); } @After diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceMockUtils.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceMockUtils.java index 6d6028eca713a..f39ab200da8ed 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceMockUtils.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceMockUtils.java @@ -41,9 +41,9 @@ import com.google.api.client.testing.http.MockLowLevelHttpResponse; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.io.Streams; import org.opensearch.core.common.Strings; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.common.io.Streams; import java.io.IOException; import java.io.InputStream; diff --git a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java b/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java index 86ccd925551fd..028848a91213e 100644 --- a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java +++ b/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java @@ -9,13 +9,13 @@ package org.opensearch.plugin.correlation; import org.apache.lucene.search.join.ScoreMode; -import org.junit.Assert; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; @@ -26,9 +26,9 @@ import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; import java.util.Arrays; import java.util.Collection; diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java index dd83dfa84dbdb..414fe1948f053 100644 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java +++ b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java @@ -11,21 +11,21 @@ import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpEntity; import org.apache.lucene.index.VectorSimilarityFunction; -import org.junit.Assert; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.Arrays; @@ -94,7 +94,7 @@ public void testQuery() throws IOException { .endObject() .endObject(); - String mapping = Strings.toString(builder); + String mapping = builder.toString(); createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); for (int idx = 0; idx < TEST_VECTORS.length; ++idx) { @@ -209,7 +209,7 @@ public void testQueryWithWrongMapping() throws IOException { .endObject() .endObject(); - String mapping = Strings.toString(builder); + String mapping = builder.toString(); Exception ex = assertThrows(ResponseException.class, () -> { createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); }); @@ -225,7 +225,7 @@ public void testQueryWithWrongMapping() throws IOException { private String createTestIndexWithMappingJson(RestClient client, String index, String mapping, Settings settings) throws IOException { Request request = new Request("PUT", "/" + index); - String entity = "{\"settings\": " + Strings.toString(XContentType.JSON, settings); + String entity = "{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings); if (mapping != null) { entity = entity + ",\"mappings\" : " + mapping; } @@ -253,7 +253,7 @@ private void addCorrelationDoc(String index, String docId, List<String> fieldNam } builder.endObject(); - request.setJsonEntity(Strings.toString(builder)); + request.setJsonEntity(builder.toString()); Response response = client().performRequest(request); assertEquals(request.getEndpoint() + ": failed", RestStatus.CREATED, RestStatus.fromCode(response.getStatusLine().getStatusCode())); } diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java index 252849e4a1a36..3791a5cdf5db0 100644 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java +++ b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java @@ -8,7 +8,6 @@ package org.opensearch.plugin.correlation; -import org.junit.Assert; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -16,6 +15,7 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.List; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java index ea05302733ff1..9637042974d03 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java @@ -9,17 +9,17 @@ package org.opensearch.plugin.correlation; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java index 7763b1e42d63e..00b55eb75995c 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java @@ -27,12 +27,12 @@ public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * <a href="https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html">HnswGraph.html</a> */ public static final String METHOD_PARAMETER_M = "m"; /** * the hyper-parameters for constructing HNSW graphs. - * https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html + * <a href="https://lucene.apache.org/core/9_4_0/core/org/apache/lucene/util/hnsw/HnswGraph.html">HnswGraph.html</a> */ public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java index 5e2cb8bfbc03a..3fcc995fb4199 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java @@ -9,10 +9,10 @@ package org.opensearch.plugin.correlation.core.index.codec; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec; -import org.opensearch.plugin.correlation.core.index.codec.correlation950.PerFieldCorrelationVectorsFormat; +import org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec; +import org.opensearch.plugin.correlation.core.index.codec.correlation990.PerFieldCorrelationVectorsFormat; import java.util.Optional; import java.util.function.BiFunction; @@ -24,15 +24,15 @@ * @opensearch.internal */ public enum CorrelationCodecVersion { - V_9_5_0( + V_9_9_0( "CorrelationCodec", - new Lucene95Codec(), + new Lucene99Codec(), new PerFieldCorrelationVectorsFormat(Optional.empty()), (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))), CorrelationCodec::new ); - private static final CorrelationCodecVersion CURRENT = V_9_5_0; + private static final CorrelationCodecVersion CURRENT = V_9_9_0; private final String codecName; private final Codec defaultCodecDelegate; private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java deleted file mode 100644 index b4dad34d2718e..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * custom Lucene9.5 codec package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java similarity index 97% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java rename to plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java index f91ba429fbea9..022972e2e06c3 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FilterCodec; @@ -19,7 +19,7 @@ * @opensearch.internal */ public class CorrelationCodec extends FilterCodec { - private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_5_0; + private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_9_0; private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; /** diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java similarity index 77% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java rename to plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java index f6862ecc17736..89cc0b614a1a5 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java @@ -6,9 +6,9 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; -import org.apache.lucene.codecs.lucene95.Lucene95HnswVectorsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; import org.opensearch.index.mapper.MapperService; import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat; @@ -26,10 +26,10 @@ public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVec public PerFieldCorrelationVectorsFormat(final Optional<MapperService> mapperService) { super( mapperService, - Lucene95HnswVectorsFormat.DEFAULT_MAX_CONN, - Lucene95HnswVectorsFormat.DEFAULT_BEAM_WIDTH, - Lucene95HnswVectorsFormat::new, - Lucene95HnswVectorsFormat::new + Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN, + Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH, + Lucene99HnswVectorsFormat::new, + Lucene99HnswVectorsFormat::new ); } } diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java new file mode 100644 index 0000000000000..fc2a9de58a73a --- /dev/null +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * custom Lucene9.5 codec package for events-correlation-engine + */ +package org.opensearch.plugin.correlation.core.index.codec.correlation990; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java index a1918f3c954d0..18c9dd222e2cf 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java @@ -8,6 +8,7 @@ package org.opensearch.plugin.correlation.core.index.mapper; +import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.KnnFloatVectorField; import org.apache.lucene.document.StoredField; @@ -23,8 +24,6 @@ import java.util.Locale; import java.util.Optional; -import static org.apache.lucene.index.FloatVectorValues.MAX_DIMENSIONS; - /** * Field mapper for the correlation vector type * @@ -32,7 +31,7 @@ */ public class CorrelationVectorFieldMapper extends VectorFieldMapper { - private static final int LUCENE_MAX_DIMENSION = MAX_DIMENSIONS; + private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS; private final FieldType vectorFieldType; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java index f43f8d0e63755..806ac0389b5f3 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java @@ -11,11 +11,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.search.Query; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java index 8ba975a6b9b35..8102e6585825e 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java @@ -8,14 +8,14 @@ package org.opensearch.plugin.correlation.rules.action; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.ParseField; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java index 0c90e08da9cbc..3797e0c7043dc 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java @@ -10,10 +10,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java index 56cb198484f56..6978d7248e199 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java @@ -10,10 +10,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java index ee29352d3e0f0..3b2b7eb02ae5f 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.plugin.correlation.EventsCorrelationPlugin; @@ -23,7 +24,6 @@ import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestResponseListener; import java.io.IOException; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java index 5fb31f1f352cb..7b4fb670c4aee 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; @@ -24,6 +23,8 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; @@ -32,7 +33,6 @@ import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; import org.opensearch.plugin.correlation.utils.IndexUtils; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java index a38ebd58cb655..3656bd413733a 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java @@ -10,13 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.correlation.rules.model.CorrelationRule; import java.io.IOException; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java index a240fa17d1d22..362be3d2932e3 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java @@ -8,14 +8,14 @@ package org.opensearch.plugin.correlation.utils; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.IndicesAdminClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; @@ -80,7 +80,7 @@ public static Boolean shouldUpdateIndex(IndexMetadata index, String mapping) thr * @throws IOException IOException */ public static Integer getSchemaVersion(String mapping) throws IOException { - XContentParser xcp = XContentType.JSON.xContent() + XContentParser xcp = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, mapping); while (!xcp.isClosed()) { @@ -129,7 +129,7 @@ public static void updateIndexMapping( ) throws IOException { if (clusterState.metadata().indices().containsKey(index)) { if (shouldUpdateIndex(clusterState.metadata().index(index), mapping)) { - PutMappingRequest putMappingRequest = new PutMappingRequest(index).source(mapping, XContentType.JSON); + PutMappingRequest putMappingRequest = new PutMappingRequest(index).source(mapping, MediaTypeRegistry.JSON); client.putMapping(putMappingRequest, actionListener); } else { actionListener.onResponse(new AcknowledgedResponse(true)); diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index 598a3b6af73c2..013c17e4a9736 100644 --- a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec +org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java index 2207d5a8a41c4..005ffa2097b03 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java @@ -8,8 +8,8 @@ package org.opensearch.plugin.correlation; -import org.junit.Assert; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; public class EventsCorrelationPluginTests extends OpenSearchTestCase { diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java index 1e61cd1de549d..19ce3b33514d8 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java @@ -9,7 +9,6 @@ package org.opensearch.plugin.correlation.core.index; import org.apache.lucene.index.VectorSimilarityFunction; -import org.junit.Assert; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; @@ -18,6 +17,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.HashMap; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java index 861870ab02aa1..32c71dcd37196 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java @@ -9,13 +9,13 @@ package org.opensearch.plugin.correlation.core.index; import org.apache.lucene.document.FieldType; -import org.junit.Assert; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.common.Randomness; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; import java.io.IOException; import java.nio.charset.StandardCharsets; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java similarity index 96% rename from plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java rename to plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java index ac859773f6350..7223b450a136c 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; @@ -30,14 +30,14 @@ import java.util.Optional; import java.util.function.Function; +import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; +import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; +import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_9_0; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.spy; -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; -import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_5_0; /** * Unit tests for custom correlation codec @@ -51,11 +51,12 @@ public class CorrelationCodecTests extends OpenSearchTestCase { * test correlation vector index * @throws Exception Exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8329") public void testCorrelationVectorIndex() throws Exception { Function<MapperService, PerFieldCorrelationVectorsFormat> perFieldCorrelationVectorsProvider = mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); Function<PerFieldCorrelationVectorsFormat, Codec> correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec( - V_9_5_0.getDefaultCodecDelegate(), + V_9_9_0.getDefaultCodecDelegate(), correlationVectorsFormat )); testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider); diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java index 19dff08b63d5f..674f35069a742 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java @@ -12,8 +12,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.VectorSimilarityFunction; import org.apache.lucene.search.FieldExistsQuery; -import org.junit.Assert; -import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Explicit; @@ -35,6 +33,7 @@ import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.Arrays; @@ -43,6 +42,8 @@ import java.util.Map; import java.util.Optional; +import org.mockito.Mockito; + import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -190,7 +191,7 @@ public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimil /** * test parseCreateField in CorrelationVectorFieldMapper - * @throws IOException + * @throws IOException ioexception */ public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException { String fieldName = "test-field-name"; diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java index f23a4f25302b1..3e567d0c04e53 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java @@ -9,21 +9,20 @@ package org.opensearch.plugin.correlation.core.index.query; import org.apache.lucene.search.KnnFloatVectorQuery; -import org.junit.Assert; import org.opensearch.Version; import org.opensearch.cluster.ClusterModule; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -32,6 +31,7 @@ import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.List; @@ -114,7 +114,7 @@ public void testFromXContentFromString() throws IOException { XContentParser contentParser = createParser(JsonXContent.jsonXContent, correlationQuery); contentParser.nextToken(); CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(XContentType.JSON, actualBuilder)); + Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(MediaTypeRegistry.JSON, actualBuilder)); } /** @@ -203,7 +203,8 @@ public void testDoToQueryInvalidFieldType() { /** * test serialization of Correlation Query Builder - * @throws Exception + * @throws Exception throws an IOException if serialization fails + * @throws Exception Exception */ public void testSerialization() throws Exception { assertSerialization(Optional.empty()); diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java index faec6057b04c6..45cb47b05b5c2 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java @@ -8,13 +8,13 @@ package org.opensearch.plugin.correlation.settings; -import org.junit.Assert; -import org.junit.Before; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugin.correlation.EventsCorrelationPlugin; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; +import org.junit.Before; import java.util.List; import java.util.concurrent.TimeUnit; diff --git a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java index 19efdc0982ddb..d3ab54775d8e9 100644 --- a/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java +++ b/plugins/examples/custom-settings/src/main/java/org/opensearch/example/customsettings/ExampleCustomSettingsConfig.java @@ -33,10 +33,10 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import java.io.IOException; diff --git a/plugins/examples/custom-settings/src/yamlRestTest/java/org/opensearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java b/plugins/examples/custom-settings/src/yamlRestTest/java/org/opensearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java index 2e6da40553401..2f163aa5aabf5 100644 --- a/plugins/examples/custom-settings/src/yamlRestTest/java/org/opensearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java +++ b/plugins/examples/custom-settings/src/yamlRestTest/java/org/opensearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java @@ -33,6 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/custom-significance-heuristic/src/yamlRestTest/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java b/plugins/examples/custom-significance-heuristic/src/yamlRestTest/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java index 881905fbceade..69ba949f60271 100644 --- a/plugins/examples/custom-significance-heuristic/src/yamlRestTest/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java +++ b/plugins/examples/custom-significance-heuristic/src/yamlRestTest/java/org/opensearch/example/customsigheuristic/CustomSignificanceHeuristicClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java index 9255d3a2f299e..c8e9d8cfc0a12 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java @@ -53,7 +53,6 @@ public class CustomSuggestion extends Suggest.Suggestion<CustomSuggestion.Entry> /** * An integer representing the type of the suggestion formerly used for internal serialization over the network. - * * This class is now serialized as a NamedWriteable and this value only remains for backwards compatibility */ public static final int TYPE = 999; @@ -106,7 +105,7 @@ public int getWriteableType() { /** * A meaningless value used to test that plugin suggesters can add fields to their Suggestion types - * + * <p> * This can't be serialized to xcontent because Suggestions appear in xcontent as an array of entries, so there is no place * to add a custom field. But we can still use a custom field internally and use it to define a Suggestion's behavior * diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestionBuilder.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestionBuilder.java index 733568590037a..65803d7bbd93c 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestionBuilder.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestionBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.example.customsuggester; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryShardContext; diff --git a/plugins/examples/custom-suggester/src/yamlRestTest/java/org/opensearch/example/customsuggester/CustomSuggesterClientYamlTestSuiteIT.java b/plugins/examples/custom-suggester/src/yamlRestTest/java/org/opensearch/example/customsuggester/CustomSuggesterClientYamlTestSuiteIT.java index b6237a7a9f855..b924a003c4e1d 100644 --- a/plugins/examples/custom-suggester/src/yamlRestTest/java/org/opensearch/example/customsuggester/CustomSuggesterClientYamlTestSuiteIT.java +++ b/plugins/examples/custom-suggester/src/yamlRestTest/java/org/opensearch/example/customsuggester/CustomSuggesterClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistExtension.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistExtension.java index d4af0daddf2b4..dd5650c02accd 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistExtension.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistExtension.java @@ -32,10 +32,10 @@ package org.opensearch.example.painlessallowlist; -import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.painless.spi.Allowlist; import org.opensearch.painless.spi.AllowlistInstanceBinding; import org.opensearch.painless.spi.AllowlistLoader; +import org.opensearch.painless.spi.PainlessExtension; import org.opensearch.painless.spi.annotation.AllowlistAnnotationParser; import org.opensearch.script.FieldScript; import org.opensearch.script.ScriptContext; diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java index ca58d97377478..ee04741eb5a03 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedClass.java @@ -34,7 +34,7 @@ /** * An example of a class to be allowlisted for use by painless scripts - * + * <p> * Each of the members and methods below are allowlisted for use in search scripts. * See <a href="file:example_allowlist.txt">example_allowlist.txt</a>. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java index 0833f1f0c6659..35385e98187ea 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleAllowlistedInstance.java @@ -34,7 +34,7 @@ /** * An example of an instance to be allowlisted for use by painless scripts. - * + * <p> * Each of the members and methods below are allowlisted for use in search scripts but only from this instance. */ public class ExampleAllowlistedInstance { diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java index 428a0cc688ad3..ac42c2b1cf9bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExamplePainlessAnnotation.java @@ -34,7 +34,7 @@ /** * An example of an annotation to be allowlisted for use by painless scripts - * + * <p> * The annotation below is allowlisted for use in search scripts. * See <a href="file:example_allowlist.txt">example_allowlist.txt</a>. */ diff --git a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java index 25443977a0f66..2f6d1ad9349bc 100644 --- a/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java +++ b/plugins/examples/painless-allowlist/src/main/java/org/opensearch/example/painlessallowlist/ExampleStaticMethodClass.java @@ -34,7 +34,7 @@ /** * An example of a class with static methods to be allowlisted for use by painless scripts - * + * <p> * The method below is allowlisted for use in search scripts. * See <a href="file:example_allowlist.txt">example_allowlist.txt</a>. */ diff --git a/plugins/examples/painless-allowlist/src/yamlRestTest/java/org/opensearch/example/painlessallowlist/PainlessAllowlistClientYamlTestSuiteIT.java b/plugins/examples/painless-allowlist/src/yamlRestTest/java/org/opensearch/example/painlessallowlist/PainlessAllowlistClientYamlTestSuiteIT.java index 346d91bb27f1b..572e16e5a1bbb 100644 --- a/plugins/examples/painless-allowlist/src/yamlRestTest/java/org/opensearch/example/painlessallowlist/PainlessAllowlistClientYamlTestSuiteIT.java +++ b/plugins/examples/painless-allowlist/src/yamlRestTest/java/org/opensearch/example/painlessallowlist/PainlessAllowlistClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/rescore/src/yamlRestTest/java/org/opensearch/example/rescore/ExampleRescoreClientYamlTestSuiteIT.java b/plugins/examples/rescore/src/yamlRestTest/java/org/opensearch/example/rescore/ExampleRescoreClientYamlTestSuiteIT.java index aec77cffdc9e9..7e870c7d5cb97 100644 --- a/plugins/examples/rescore/src/yamlRestTest/java/org/opensearch/example/rescore/ExampleRescoreClientYamlTestSuiteIT.java +++ b/plugins/examples/rescore/src/yamlRestTest/java/org/opensearch/example/rescore/ExampleRescoreClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/rest-handler/src/yamlRestTest/java/org/opensearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java b/plugins/examples/rest-handler/src/yamlRestTest/java/org/opensearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java index 4fe87c089d09f..6e374ec7e70db 100644 --- a/plugins/examples/rest-handler/src/yamlRestTest/java/org/opensearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java +++ b/plugins/examples/rest-handler/src/yamlRestTest/java/org/opensearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java @@ -33,6 +33,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java index e7615d9ad7204..07c2d4d6435d7 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/opensearch/example/expertscript/ExpertScriptPlugin.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -120,20 +121,22 @@ public boolean isResultDeterministic() { @Override public LeafFactory newFactory( Map<String, Object> params, - SearchLookup lookup + SearchLookup lookup, + IndexSearcher indexSearcher ) { - return new PureDfLeafFactory(params, lookup); + return new PureDfLeafFactory(params, lookup, indexSearcher); } } private static class PureDfLeafFactory implements LeafFactory { private final Map<String, Object> params; private final SearchLookup lookup; + private final IndexSearcher indexSearcher; private final String field; private final String term; private PureDfLeafFactory( - Map<String, Object> params, SearchLookup lookup) { + Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher) { if (params.containsKey("field") == false) { throw new IllegalArgumentException( "Missing parameter [field]"); @@ -144,6 +147,7 @@ private PureDfLeafFactory( } this.params = params; this.lookup = lookup; + this.indexSearcher = indexSearcher; field = params.get("field").toString(); term = params.get("term").toString(); } @@ -163,7 +167,7 @@ public ScoreScript newInstance(LeafReaderContext context) * the field and/or term don't exist in this segment, * so always return 0 */ - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, indexSearcher, context) { @Override public double execute( ExplanationHolder explanation @@ -172,7 +176,7 @@ public double execute( } }; } - return new ScoreScript(params, lookup, context) { + return new ScoreScript(params, lookup, indexSearcher, context) { int currentDocid = -1; @Override public void setDocument(int docid) { diff --git a/plugins/examples/script-expert-scoring/src/yamlRestTest/java/org/opensearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java b/plugins/examples/script-expert-scoring/src/yamlRestTest/java/org/opensearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java index 294b1de4e7f99..b104f612c35c0 100644 --- a/plugins/examples/script-expert-scoring/src/yamlRestTest/java/org/opensearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java +++ b/plugins/examples/script-expert-scoring/src/yamlRestTest/java/org/opensearch/example/expertscript/ExpertScriptClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index baa3464d0a98e..222443efcb214 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -17,7 +17,7 @@ opensearchplugin { } dependencies { - implementation 'org.apache.shiro:shiro-core:1.11.0' + implementation 'org.apache.shiro:shiro-core:1.13.0' // Needed for shiro implementation "org.slf4j:slf4j-api:${versions.slf4j}" @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" + implementation "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation project(path: ':plugins:transport-nio') // for http diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-LICENSE.txt diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt b/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt +++ b/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt b/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt index 1a45218353e87..72eb32a902458 100644 --- a/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt +++ b/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2016 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 deleted file mode 100644 index 67c33e15ec689..0000000000000 --- a/plugins/identity-shiro/licenses/shiro-core-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -033a70c87e91968a299f1ee00f4e95050312346d \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 new file mode 100644 index 0000000000000..25bd4d9acd166 --- /dev/null +++ b/plugins/identity-shiro/licenses/shiro-core-1.13.0.jar.sha1 @@ -0,0 +1 @@ +7e542e3d614b197bf10005e98e19f9f19cb943e7 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt +++ b/plugins/identity-shiro/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java index c3c08b1359aaa..77cab13880c27 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java @@ -8,15 +8,15 @@ package org.opensearch.identity.shiro; -import org.opensearch.identity.Subject; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.shiro.SecurityUtils; +import org.apache.shiro.mgt.SecurityManager; +import org.opensearch.common.settings.Settings; +import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.TokenManager; import org.opensearch.plugins.IdentityPlugin; -import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; -import org.apache.shiro.SecurityUtils; -import org.apache.shiro.mgt.SecurityManager; /** * Identity implementation with Shiro diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java index 89a801d1aab76..e55204593621c 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java @@ -8,12 +8,12 @@ package org.opensearch.identity.shiro; -import java.security.Principal; -import java.util.Objects; - import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.AuthToken; +import java.security.Principal; +import java.util.Objects; + /** * Subject backed by Shiro * diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index 110095a5cd4ef..a14215aa7655b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -8,26 +8,30 @@ package org.opensearch.identity.shiro; -import java.util.Arrays; -import java.util.Base64; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Random; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.shiro.SecurityUtils; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.common.Randomness; import org.opensearch.identity.IdentityService; +import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; +import org.opensearch.identity.tokens.OnBehalfOfClaims; import org.opensearch.identity.tokens.TokenManager; + +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; + import org.passay.CharacterRule; import org.passay.EnglishCharacterData; import org.passay.PasswordGenerator; + import static java.nio.charset.StandardCharsets.UTF_8; /** @@ -51,15 +55,16 @@ public Optional<AuthenticationToken> translateAuthToken(org.opensearch.identity. final BasicAuthToken basicAuthToken = (BasicAuthToken) authenticationToken; return Optional.of(new UsernamePasswordToken(basicAuthToken.getUser(), basicAuthToken.getPassword())); } - return Optional.empty(); } @Override - public AuthToken issueToken(String audience) { + public AuthToken issueOnBehalfOfToken(Subject subject, OnBehalfOfClaims claims) { String password = generatePassword(); - final byte[] rawEncoded = Base64.getEncoder().encode((audience + ":" + password).getBytes(UTF_8)); + // Make a new ShiroSubject audience as name + final byte[] rawEncoded = Base64.getUrlEncoder().encode((claims.getAudience() + ":" + password).getBytes(UTF_8)); + final String usernamePassword = new String(rawEncoded, UTF_8); final String header = "Basic " + usernamePassword; BasicAuthToken token = new BasicAuthToken(header); @@ -68,13 +73,17 @@ public AuthToken issueToken(String audience) { return token; } - public boolean validateToken(AuthToken token) { - if (token instanceof BasicAuthToken) { - final BasicAuthToken basicAuthToken = (BasicAuthToken) token; - return basicAuthToken.getUser().equals(SecurityUtils.getSubject().toString()) - && basicAuthToken.getPassword().equals(shiroTokenPasswordMap.get(basicAuthToken)); - } - return false; + @Override + public AuthToken issueServiceAccountToken(String audience) { + + String password = generatePassword(); + final byte[] rawEncoded = Base64.getUrlEncoder().withoutPadding().encode((audience + ":" + password).getBytes(UTF_8)); // Make a new + final String usernamePassword = new String(rawEncoded, UTF_8); + final String header = "Basic " + usernamePassword; + + BasicAuthToken token = new BasicAuthToken(header); + shiroTokenPasswordMap.put(token, password); + return token; } public String getTokenInfo(AuthToken token) { diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java index e005d7c3360cf..ef405a5637ae7 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java @@ -17,12 +17,10 @@ import org.apache.shiro.authc.UsernamePasswordToken; import org.apache.shiro.authc.pam.UnsupportedTokenException; import org.apache.shiro.realm.AuthenticatingRealm; -import org.apache.shiro.authc.UsernamePasswordToken; - import org.opensearch.identity.NamedPrincipal; -import java.util.Objects; import java.util.Map; +import java.util.Objects; import java.util.Optional; /** diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java index 540fed368aeda..f99484083e2fb 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/AuthTokenHandlerTests.java @@ -8,21 +8,27 @@ package org.opensearch.identity.shiro; -import java.util.Optional; import org.apache.shiro.authc.AuthenticationToken; import org.apache.shiro.authc.UsernamePasswordToken; -import org.junit.Before; +import org.opensearch.identity.Subject; +import org.opensearch.identity.noop.NoopSubject; import org.opensearch.identity.noop.NoopTokenManager; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.identity.tokens.BearerAuthToken; +import org.opensearch.identity.tokens.OnBehalfOfClaims; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Optional; + import org.passay.CharacterCharacteristicsRule; import org.passay.CharacterRule; import org.passay.EnglishCharacterData; import org.passay.LengthRule; import org.passay.PasswordData; import org.passay.PasswordValidator; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -31,16 +37,15 @@ public class AuthTokenHandlerTests extends OpenSearchTestCase { private ShiroTokenManager shiroAuthTokenHandler; - private NoopTokenManager noopTokenManager; @Before public void testSetup() { shiroAuthTokenHandler = new ShiroTokenManager(); - noopTokenManager = new NoopTokenManager(); } public void testShouldExtractBasicAuthTokenSuccessfully() { final BasicAuthToken authToken = new BasicAuthToken("Basic YWRtaW46YWRtaW4="); // admin:admin + assertEquals(authToken.asAuthHeaderValue(), "YWRtaW46YWRtaW4="); final AuthenticationToken translatedToken = shiroAuthTokenHandler.translateAuthToken(authToken).get(); assertThat(translatedToken, is(instanceOf(UsernamePasswordToken.class))); @@ -95,18 +100,13 @@ public void testShouldFailGetTokenInfo() { assertThrows(UnsupportedAuthenticationToken.class, () -> shiroAuthTokenHandler.getTokenInfo(bearerAuthToken)); } - public void testShouldFailValidateToken() { - final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); - assertFalse(shiroAuthTokenHandler.validateToken(bearerAuthToken)); - } - public void testShoudPassMapLookupWithToken() { final BasicAuthToken authToken = new BasicAuthToken("Basic dGVzdDp0ZTpzdA=="); shiroAuthTokenHandler.getShiroTokenPasswordMap().put(authToken, "te:st"); assertTrue(authToken.getPassword().equals(shiroAuthTokenHandler.getShiroTokenPasswordMap().get(authToken))); } - public void testShouldPassThrougbResetToken(AuthToken token) { + public void testShouldPassThroughResetToken() { final BearerAuthToken bearerAuthToken = new BearerAuthToken("header.payload.signature"); shiroAuthTokenHandler.resetToken(bearerAuthToken); } @@ -121,6 +121,7 @@ public void testVerifyBearerTokenObject() { assertEquals(testGoodToken.getPayload(), "payload"); assertEquals(testGoodToken.getSignature(), "signature"); assertEquals(testGoodToken.toString(), "Bearer auth token with header=header, payload=payload, signature=signature"); + assertEquals(testGoodToken.asAuthHeaderValue(), "header.payload.signature"); } public void testGeneratedPasswordContents() { @@ -144,4 +145,35 @@ public void testGeneratedPasswordContents() { validator.validate(data); } + public void testIssueOnBehalfOfTokenFromClaims() { + Subject subject = new NoopSubject(); + OnBehalfOfClaims claims = new OnBehalfOfClaims("test", "test"); + BasicAuthToken authToken = (BasicAuthToken) shiroAuthTokenHandler.issueOnBehalfOfToken(subject, claims); + assertTrue(authToken instanceof BasicAuthToken); + UsernamePasswordToken translatedToken = (UsernamePasswordToken) shiroAuthTokenHandler.translateAuthToken(authToken).get(); + assertEquals(authToken.getPassword(), new String(translatedToken.getPassword())); + assertTrue(shiroAuthTokenHandler.getShiroTokenPasswordMap().containsKey(authToken)); + assertEquals(shiroAuthTokenHandler.getShiroTokenPasswordMap().get(authToken), new String(translatedToken.getPassword())); + } + + public void testTokenNoopIssuance() { + NoopTokenManager tokenManager = new NoopTokenManager(); + OnBehalfOfClaims claims = new OnBehalfOfClaims("test", "test"); + Subject subject = new NoopSubject(); + AuthToken token = tokenManager.issueOnBehalfOfToken(subject, claims); + assertTrue(token instanceof AuthToken); + AuthToken serviceAccountToken = tokenManager.issueServiceAccountToken("test"); + assertTrue(serviceAccountToken instanceof AuthToken); + assertEquals(serviceAccountToken.asAuthHeaderValue(), "noopToken"); + } + + public void testShouldSucceedIssueServiceAccountToken() { + String audience = "testExtensionName"; + BasicAuthToken authToken = (BasicAuthToken) shiroAuthTokenHandler.issueServiceAccountToken(audience); + assertTrue(authToken instanceof BasicAuthToken); + UsernamePasswordToken translatedToken = (UsernamePasswordToken) shiroAuthTokenHandler.translateAuthToken(authToken).get(); + assertEquals(authToken.getPassword(), new String(translatedToken.getPassword())); + assertTrue(shiroAuthTokenHandler.getShiroTokenPasswordMap().containsKey(authToken)); + assertEquals(shiroAuthTokenHandler.getShiroTokenPasswordMap().get(authToken), new String(translatedToken.getPassword())); + } } diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java index f06dff7eea382..626cd44d13ec8 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java @@ -8,12 +8,14 @@ package org.opensearch.identity.shiro; -import java.util.List; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; import org.opensearch.identity.IdentityService; import org.opensearch.plugins.IdentityPlugin; import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; + import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroSubjectTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroSubjectTests.java index 930945e9a2d8d..ca896e1475120 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroSubjectTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroSubjectTests.java @@ -9,18 +9,17 @@ package org.opensearch.identity.shiro; import org.opensearch.test.OpenSearchTestCase; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import org.junit.Before; import org.junit.After; +import org.junit.Before; import java.security.Principal; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; public class ShiroSubjectTests extends OpenSearchTestCase { diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java index 37c8c4c220b58..91e88ed1bf701 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcherTests.java @@ -8,14 +8,13 @@ package org.opensearch.identity.shiro.realm; +import org.apache.shiro.authc.AuthenticationInfo; +import org.apache.shiro.authc.UsernamePasswordToken; import org.opensearch.test.OpenSearchTestCase; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.hamcrest.Matchers.equalTo; - -import org.apache.shiro.authc.UsernamePasswordToken; -import org.apache.shiro.authc.AuthenticationInfo; public class BCryptPasswordMatcherTests extends OpenSearchTestCase { diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/OpenSearchRealmTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/OpenSearchRealmTests.java index d6129c238408a..db96a6d91a38e 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/OpenSearchRealmTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/realm/OpenSearchRealmTests.java @@ -11,8 +11,8 @@ import org.apache.shiro.authc.AuthenticationInfo; import org.apache.shiro.authc.IncorrectCredentialsException; import org.apache.shiro.authc.UsernamePasswordToken; -import org.junit.Before; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; public class OpenSearchRealmTests extends OpenSearchTestCase { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 62651216c8144..6da34c4c9caf2 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.15.1' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection @@ -71,29 +71,29 @@ dependencies { api "org.apache.pdfbox:fontbox:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.bouncycastle:bcmail-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcmail-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk18on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk18on:${versions.bouncycastle}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.1.1' + api 'org.apache.xmlbeans:xmlbeans:5.2.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - api 'org.apache.commons:commons-compress:1.23.0' + api "org.apache.commons:commons-compress:${versions.commonscompress}" // Outlook documents api "org.apache.james:apache-mime4j-core:${versions.mime4j}" api "org.apache.james:apache-mime4j-dom:${versions.mime4j}" // EPUB books - api 'org.apache.commons:commons-lang3:3.12.0' + api "org.apache.commons:commons-lang3:${versions.commonslang}" // Microsoft Word files with visio diagrams api 'org.apache.commons:commons-math3:3.6.1' // POIs dependency - api 'com.zaxxer:SparseBitSet:1.2' + api 'com.zaxxer:SparseBitSet:1.3' } restResources { diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 deleted file mode 100644 index 5f1d015b87ac7..0000000000000 --- a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8467c813d442837fcaeddbc42cf5c5359fab4933 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 new file mode 100644 index 0000000000000..2803db7c91e30 --- /dev/null +++ b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 @@ -0,0 +1 @@ +533eac055afe3d5f614ea95e333afd6c2bde8f26 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index e6840a9b02b38..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b316bcd094e3917b1ece93a6edbab93f8315fb3b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..f71659316b8cd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +f2bb8aa55dc901ee8b8aae7d1007c03592d65e03 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9181b1c3ab1b6..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f16e5252ad7a46d5eaf255231b0a5da307599082 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..05a8b2d5729bd --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +ed953791ba0229747dd0fd9911e3d76a462acfd3 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt deleted file mode 100644 index e1fc4a1506db5..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt +++ /dev/null @@ -1,23 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc. - (http://www.bouncycastle.org) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 new file mode 100644 index 0000000000000..3e780df9559a9 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-1.77.jar.sha1 @@ -0,0 +1 @@ +2cc971b6c20949c1ff98d1a4bc741ee848a09523 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt new file mode 100644 index 0000000000000..9f27bafe96885 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk18on-LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2000 - 2013 The Legion of the Bouncy Castle Inc. + (http://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk18on-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.23.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.23.0.jar.sha1 deleted file mode 100644 index 48dba88409c17..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4af2060ea9b0c8b74f1854c6cafe4d43cfc161fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt b/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt b/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt +++ b/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt index 52055e61de46f..2be7689435062 100644 --- a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt @@ -1,21 +1,21 @@ -Copyright (c) 2004-2014 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2004-2022 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 deleted file mode 100644 index 4d1d2ad0807e7..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48a369df0eccb509d46203104e4df9cb00f0f68b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 new file mode 100644 index 0000000000000..f34274d593697 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.2.0.jar.sha1 @@ -0,0 +1 @@ +6198ac997b3f234f2b5393fa415f78fac2e06510 \ No newline at end of file diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java index 3f7b341edc409..54d2e0c7832e6 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/AttachmentProcessor.java @@ -37,7 +37,6 @@ import org.apache.tika.language.detect.LanguageResult; import org.apache.tika.metadata.Metadata; import org.apache.tika.metadata.TikaCoreProperties; - import org.opensearch.OpenSearchParseException; import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/IngestAttachmentPlugin.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/IngestAttachmentPlugin.java index a7e51b21c3c1a..a1d1b07e73f5d 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/IngestAttachmentPlugin.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/IngestAttachmentPlugin.java @@ -32,13 +32,13 @@ package org.opensearch.ingest.attachment; -import java.util.Collections; -import java.util.Map; - import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; +import java.util.Collections; +import java.util.Map; + public class IngestAttachmentPlugin extends Plugin implements IngestPlugin { @Override diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index ce7ceb5e3d776..fe783e5ddb675 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -112,6 +112,7 @@ final class TikaImpl { /** * parses with tika, throwing any exception hit while parsing the document */ + @SuppressWarnings("removal") static String parse(final byte content[], final Metadata metadata, final int limit) throws TikaException, IOException { // check that its not unprivileged code like a script SpecialPermission.check(); @@ -136,6 +137,7 @@ static String parse(final byte content[], final Metadata metadata, final int lim // apply additional containment for parsers, this is intersected with the current permissions // its hairy, but worth it so we don't have some XML flaw reading random crap from the FS + @SuppressWarnings("removal") private static final AccessControlContext RESTRICTED_CONTEXT = new AccessControlContext( new ProtectionDomain[] { new ProtectionDomain(null, getRestrictedPermissions()) } ); diff --git a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java index 2fd29d5bfdeae..17d73a74b6073 100644 --- a/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/opensearch/ingest/attachment/AttachmentProcessorTests.java @@ -33,13 +33,11 @@ package org.opensearch.ingest.attachment; import org.apache.commons.io.IOUtils; - import org.opensearch.OpenSearchParseException; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.test.OpenSearchTestCase; - import org.junit.Before; import java.io.InputStream; diff --git a/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java b/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java index d03fcd47fe991..9bfd5669e731b 100644 --- a/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java +++ b/plugins/mapper-annotated-text/src/internalClusterTest/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapperTests.java @@ -47,10 +47,9 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.CharFilterFactory; @@ -396,92 +395,87 @@ public void testPositionIncrementGap() throws IOException { } public void testSearchAnalyzerSerialization() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "standard") - .field("search_analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "standard") + .field("search_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index analyzer - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "default") - .field("search_analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "default") + .field("search_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default search analyzer - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .field("search_analyzer", "default") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .field("search_analyzer", "default") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapper = createDocumentMapper("_doc", mapping); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -489,48 +483,46 @@ public void testSearchAnalyzerSerialization() throws IOException { mapper.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true"))); builder.endObject(); - String mappingString = Strings.toString(builder); + String mappingString = builder.toString(); assertTrue(mappingString.contains("analyzer")); assertTrue(mappingString.contains("search_analyzer")); assertTrue(mappingString.contains("search_quote_analyzer")); } public void testSearchQuoteAnalyzerSerialization() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "standard") - .field("search_analyzer", "standard") - .field("search_quote_analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "standard") + .field("search_analyzer", "standard") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); // special case: default index/search analyzer - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field") - .field("type", "annotated_text") - .field("analyzer", "default") - .field("search_analyzer", "default") - .field("search_quote_analyzer", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "annotated_text") + .field("analyzer", "default") + .field("search_analyzer", "default") + .field("search_quote_analyzer", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index e377a7c030f37..952cff96860f2 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -44,7 +44,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; - import org.opensearch.OpenSearchParseException; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; @@ -74,9 +73,9 @@ import java.util.regex.Pattern; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + * <p> * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + * <p> * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points diff --git a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java index 7e6cf47fa92fd..34851cf9bcbae 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedTextHighlighterTests.java @@ -67,8 +67,8 @@ import java.util.ArrayList; import java.util.Locale; -import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; import static org.hamcrest.CoreMatchers.equalTo; +import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; public class AnnotatedTextHighlighterTests extends OpenSearchTestCase { diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 2c307ef4cc015..51e0979324623 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -34,8 +34,8 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.action.get.GetResponse; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugin.mapper.MapperSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -123,7 +123,7 @@ private void assertSizeMappingEnabled(String index, boolean enabled) throws IOEx public void testBasic() throws Exception { assertAcked(prepareCreate("test").setMapping("_size", "enabled=true")); final String source = "{\"f\":10}"; - indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, XContentType.JSON)); + indexRandom(true, client().prepareIndex("test").setId("1").setSource(source, MediaTypeRegistry.JSON)); GetResponse getResponse = client().prepareGet("test", "1").setStoredFields("_size").get(); assertNotNull(getResponse.getField("_size")); assertEquals(source.length(), (int) getResponse.getField("_size").getValue()); diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java index 834c8a448d3d5..e7e8d92cee65a 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingTests.java @@ -32,29 +32,27 @@ package org.opensearch.index.mapper.size; -import java.util.Collection; - -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; +import org.apache.lucene.index.IndexableField; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.plugin.mapper.MapperSizePlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Collection; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -import org.apache.lucene.index.IndexableField; - public class SizeMappingTests extends OpenSearchSingleNodeTestCase { @Override protected Collection<Class<? extends Plugin>> getPlugins() { @@ -66,7 +64,7 @@ public void testSizeEnabled() throws Exception { DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); - ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, MediaTypeRegistry.JSON)); boolean stored = false; boolean points = false; @@ -83,7 +81,7 @@ public void testSizeDisabled() throws Exception { DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); - ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, MediaTypeRegistry.JSON)); assertThat(doc.rootDoc().getField("_size"), nullValue()); } @@ -93,7 +91,7 @@ public void testSizeNotSet() throws Exception { DocumentMapper docMapper = service.mapperService().documentMapper(); BytesReference source = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()); - ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, XContentType.JSON)); + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", source, MediaTypeRegistry.JSON)); assertThat(doc.rootDoc().getField("_size"), nullValue()); } @@ -103,16 +101,15 @@ public void testThatDisablingWorksWhenMerging() throws Exception { DocumentMapper docMapper = service.mapperService().documentMapper(); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(true)); - String disabledMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_size") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + String disabledMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_size") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); docMapper = service.mapperService() .merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE); diff --git a/plugins/query-insights/build.gradle b/plugins/query-insights/build.gradle new file mode 100644 index 0000000000000..eabbd395bd3bd --- /dev/null +++ b/plugins/query-insights/build.gradle @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +opensearchplugin { + description 'OpenSearch Query Insights Plugin.' + classname 'org.opensearch.plugin.insights.QueryInsightsPlugin' +} + +dependencies { +} diff --git a/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java new file mode 100644 index 0000000000000..04e715444f50a --- /dev/null +++ b/plugins/query-insights/src/internalClusterTest/java/org/opensearch/plugin/insights/QueryInsightsPluginTransportIT.java @@ -0,0 +1,274 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Transport Action tests for Query Insights Plugin + */ + +@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) +public class QueryInsightsPluginTransportIT extends OpenSearchIntegTestCase { + + private final int TOTAL_NUMBER_OF_NODES = 2; + private final int TOTAL_SEARCH_REQUESTS = 5; + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(QueryInsightsPlugin.class); + } + + /** + * Test Query Insights Plugin is installed + */ + public void testQueryInsightPluginInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List<PluginInfo> pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function<NodeInfo, Stream<PluginInfo>>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.insights.QueryInsightsPlugin")) + ); + } + + /** + * Test get top queries when feature disabled + */ + public void testGetTopQueriesWhenFeatureDisabled() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + } + + /** + * Test update top query record when feature enabled + */ + public void testUpdateRecordWhenFeatureDisabledThenEnabled() throws ExecutionException, InterruptedException { + Settings commonSettings = Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "false").build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertNotEquals(0, response.failures().size()); + Assert.assertEquals( + "Cannot get top n queries for [latency] when it is not enabled.", + response.failures().get(0).getCause().getCause().getMessage() + ); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder().put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true").build() + ); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).get()); + TopQueriesRequest request2 = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response2 = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request2).actionGet(); + Assert.assertEquals(0, response2.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response2.getNodes().size()); + for (int i = 0; i < TOTAL_NUMBER_OF_NODES; i++) { + Assert.assertEquals(0, response2.getNodes().get(i).getTopQueriesRecord().size()); + } + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries when feature enabled + */ + public void testGetTopQueriesWhenFeatureEnabled() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + // Sleep to wait for queue drained to top queries store + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(TOTAL_SEARCH_REQUESTS, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small top n size + */ + public void testGetTopQueriesWithSmallTopN() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "1") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "600s") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + Thread.sleep(6000); + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Assert.assertEquals(2, response.getNodes().stream().mapToInt(o -> o.getTopQueriesRecord().size()).sum()); + + internalCluster().stopAllNodes(); + } + + /** + * Test get top queries with small window size + */ + public void testGetTopQueriesWithSmallWindowSize() throws InterruptedException { + Settings commonSettings = Settings.builder() + .put(TOP_N_LATENCY_QUERIES_ENABLED.getKey(), "true") + .put(TOP_N_LATENCY_QUERIES_SIZE.getKey(), "100") + .put(TOP_N_LATENCY_QUERIES_WINDOW_SIZE.getKey(), "1m") + .build(); + + logger.info("--> starting nodes for query insight testing"); + List<String> nodes = internalCluster().startNodes(TOTAL_NUMBER_OF_NODES, Settings.builder().put(commonSettings).build()); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet(); + assertFalse(health.isTimedOut()); + + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 2)) + ); + ensureStableCluster(2); + logger.info("--> creating indices for query insight testing"); + for (int i = 0; i < 5; i++) { + IndexResponse response = client().prepareIndex("test_" + i).setId("" + i).setSource("field_" + i, "value_" + i).get(); + assertEquals("CREATED", response.status().toString()); + } + // making search requests to get top queries + for (int i = 0; i < TOTAL_SEARCH_REQUESTS; i++) { + SearchResponse searchResponse = internalCluster().client(randomFrom(nodes)) + .prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertEquals(searchResponse.getFailedShards(), 0); + } + + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + TopQueriesResponse response = OpenSearchIntegTestCase.client().execute(TopQueriesAction.INSTANCE, request).actionGet(); + Assert.assertEquals(0, response.failures().size()); + Assert.assertEquals(TOTAL_NUMBER_OF_NODES, response.getNodes().size()); + Thread.sleep(6000); + internalCluster().stopAllNodes(); + } +} diff --git a/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java new file mode 100644 index 0000000000000..57dea6ad8d5ff --- /dev/null +++ b/plugins/query-insights/src/javaRestTest/java/org/opensearch/plugin/insights/TopQueriesRestIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +/** + * Rest Action tests for Query Insights + */ +public class TopQueriesRestIT extends OpenSearchRestTestCase { + + /** + * test Query Insights is installed + * @throws IOException IOException + */ + @SuppressWarnings("unchecked") + public void testQueryInsightsPluginInstalled() throws IOException { + Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); + Response response = client().performRequest(request); + List<Object> pluginsList = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.getEntity().getContent() + ).list(); + Assert.assertTrue( + pluginsList.stream().map(o -> (Map<String, Object>) o).anyMatch(plugin -> plugin.get("component").equals("query-insights")) + ); + } + + /** + * test enabling top queries + * @throws IOException IOException + */ + public void testTopQueriesResponses() throws IOException { + // Enable Top N Queries feature + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity(defaultTopQueriesSettings()); + Response response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Create documents for search + request = new Request("POST", "/my-index-0/_doc"); + request.setJsonEntity(createDocumentsBody()); + response = client().performRequest(request); + + Assert.assertEquals(201, response.getStatusLine().getStatusCode()); + + // Do Search + request = new Request("GET", "/my-index-0/_search?size=20&pretty"); + request.setJsonEntity(searchBody()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + response = client().performRequest(request); + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + + // Get Top Queries + request = new Request("GET", "/_insights/top_queries?pretty"); + response = client().performRequest(request); + + Assert.assertEquals(200, response.getStatusLine().getStatusCode()); + String top_requests = new String(response.getEntity().getContent().readAllBytes(), StandardCharsets.UTF_8); + Assert.assertTrue(top_requests.contains("top_queries")); + Assert.assertEquals(2, top_requests.split("searchType", -1).length - 1); + } + + private String defaultTopQueriesSettings() { + return "{\n" + + " \"persistent\" : {\n" + + " \"search.top_n_queries.latency.enabled\" : \"true\",\n" + + " \"search.top_n_queries.latency.window_size\" : \"600s\",\n" + + " \"search.top_n_queries.latency.top_n_size\" : 5\n" + + " }\n" + + "}"; + } + + private String createDocumentsBody() { + return "{\n" + + " \"@timestamp\": \"2099-11-15T13:12:00\",\n" + + " \"message\": \"this is document 1\",\n" + + " \"user\": {\n" + + " \"id\": \"cyji\"\n" + + " }\n" + + "}"; + } + + private String searchBody() { + return "{}"; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java new file mode 100644 index 0000000000000..4d7e0d486068a --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/QueryInsightsPlugin.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.rules.transport.top_queries.TransportTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.RestController; +import org.opensearch.rest.RestHandler; +import org.opensearch.script.ScriptService; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; + +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +/** + * Plugin class for Query Insights. + */ +public class QueryInsightsPlugin extends Plugin implements ActionPlugin { + /** + * Default constructor + */ + public QueryInsightsPlugin() {} + + @Override + public Collection<Object> createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<RepositoriesService> repositoriesServiceSupplier + ) { + // create top n queries service + final QueryInsightsService queryInsightsService = new QueryInsightsService(threadPool); + return List.of(queryInsightsService, new QueryInsightsListener(clusterService, queryInsightsService)); + } + + @Override + public List<ExecutorBuilder<?>> getExecutorBuilders(final Settings settings) { + return List.of( + new ScalingExecutorBuilder( + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR, + 1, + Math.min((OpenSearchExecutors.allocatedProcessors(settings) + 1) / 2, QueryInsightsSettings.MAX_THREAD_COUNT), + TimeValue.timeValueMinutes(5) + ) + ); + } + + @Override + public List<RestHandler> getRestHandlers( + final Settings settings, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier<DiscoveryNodes> nodesInCluster + ) { + return List.of(new RestTopQueriesAction()); + } + + @Override + public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() { + return List.of(new ActionPlugin.ActionHandler<>(TopQueriesAction.INSTANCE, TransportTopQueriesAction.class)); + } + + @Override + public List<Setting<?>> getSettings() { + return List.of( + // Settings for top N queries + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java new file mode 100644 index 0000000000000..9ec8673147c38 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListener.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE; +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE; + +/** + * The listener for query insights services. + * It forwards query-related data to the appropriate query insights stores, + * either for each request or for each phase. + * + * @opensearch.internal + */ +public final class QueryInsightsListener extends SearchRequestOperationsListener { + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + private static final Logger log = LogManager.getLogger(QueryInsightsListener.class); + + private final QueryInsightsService queryInsightsService; + + /** + * Constructor for QueryInsightsListener + * + * @param clusterService The Node's cluster service. + * @param queryInsightsService The topQueriesByLatencyService associated with this listener + */ + @Inject + public QueryInsightsListener(final ClusterService clusterService, final QueryInsightsService queryInsightsService) { + this.queryInsightsService = queryInsightsService; + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(TOP_N_LATENCY_QUERIES_ENABLED, v -> this.setEnableTopQueries(MetricType.LATENCY, v)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setTopNSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateTopNSize(v) + ); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + TOP_N_LATENCY_QUERIES_WINDOW_SIZE, + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).setWindowSize(v), + v -> this.queryInsightsService.getTopQueriesService(MetricType.LATENCY).validateWindowSize(v) + ); + this.setEnableTopQueries(MetricType.LATENCY, clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_ENABLED)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setTopNSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_SIZE)); + this.queryInsightsService.getTopQueriesService(MetricType.LATENCY) + .setWindowSize(clusterService.getClusterSettings().get(TOP_N_LATENCY_QUERIES_WINDOW_SIZE)); + } + + /** + * Enable or disable top queries insights collection for {@link MetricType} + * This function will enable or disable the corresponding listeners + * and query insights services. + * + * @param metricType {@link MetricType} + * @param enabled boolean + */ + public void setEnableTopQueries(final MetricType metricType, final boolean enabled) { + boolean isAllMetricsDisabled = !queryInsightsService.isEnabled(); + this.queryInsightsService.enableCollection(metricType, enabled); + if (!enabled) { + // disable QueryInsightsListener only if all metrics collections are disabled now. + if (!queryInsightsService.isEnabled()) { + super.setEnabled(false); + this.queryInsightsService.stop(); + } + } else { + super.setEnabled(true); + // restart QueryInsightsListener only if none of metrics collections is enabled before. + if (isAllMetricsDisabled) { + this.queryInsightsService.stop(); + this.queryInsightsService.start(); + } + } + + } + + @Override + public boolean isEnabled() { + return super.isEnabled(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(final SearchPhaseContext context, final SearchRequestContext searchRequestContext) { + final SearchRequest request = context.getRequest(); + try { + Map<MetricType, Number> measurements = new HashMap<>(); + if (queryInsightsService.isCollectionEnabled(MetricType.LATENCY)) { + measurements.put( + MetricType.LATENCY, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - searchRequestContext.getAbsoluteStartNanos()) + ); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, request.searchType().toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, request.source().toString(FORMAT_PARAMS)); + attributes.put(Attribute.TOTAL_SHARDS, context.getNumShards()); + attributes.put(Attribute.INDICES, request.indices()); + attributes.put(Attribute.PHASE_LATENCY_MAP, searchRequestContext.phaseTookMap()); + SearchQueryRecord record = new SearchQueryRecord(request.getOrCreateAbsoluteStartMillis(), measurements, attributes); + queryInsightsService.addRecord(record); + } catch (Exception e) { + log.error(String.format(Locale.ROOT, "fail to ingest query insight data, error: %s", e)); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java new file mode 100644 index 0000000000000..3cb9cacf7fd1c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/listener/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Listeners for Query Insights + */ +package org.opensearch.plugin.insights.core.listener; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java new file mode 100644 index 0000000000000..525ca0d4a3d33 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/QueryInsightsService.java @@ -0,0 +1,180 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.LinkedBlockingQueue; + +/** + * Service responsible for gathering, analyzing, storing and exporting + * information related to search queries + * + * @opensearch.internal + */ +public class QueryInsightsService extends AbstractLifecycleComponent { + /** + * The internal OpenSearch thread pool that execute async processing and exporting tasks + */ + private final ThreadPool threadPool; + + /** + * Services to capture top n queries for different metric types + */ + private final Map<MetricType, TopQueriesService> topQueriesServices; + + /** + * Flags for enabling insight data collection for different metric types + */ + private final Map<MetricType, Boolean> enableCollect; + + /** + * The internal thread-safe queue to ingest the search query data and subsequently forward to processors + */ + private final LinkedBlockingQueue<SearchQueryRecord> queryRecordsQueue; + + /** + * Holds a reference to delayed operation {@link Scheduler.Cancellable} so it can be cancelled when + * the service closed concurrently. + */ + protected volatile Scheduler.Cancellable scheduledFuture; + + /** + * Constructor of the QueryInsightsService + * + * @param threadPool The OpenSearch thread pool to run async tasks + */ + @Inject + public QueryInsightsService(final ThreadPool threadPool) { + enableCollect = new HashMap<>(); + queryRecordsQueue = new LinkedBlockingQueue<>(QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY); + topQueriesServices = new HashMap<>(); + for (MetricType metricType : MetricType.allMetricTypes()) { + enableCollect.put(metricType, false); + topQueriesServices.put(metricType, new TopQueriesService(metricType)); + } + this.threadPool = threadPool; + } + + /** + * Ingest the query data into in-memory stores + * + * @param record the record to ingest + */ + public boolean addRecord(final SearchQueryRecord record) { + boolean shouldAdd = false; + for (Map.Entry<MetricType, TopQueriesService> entry : topQueriesServices.entrySet()) { + if (!enableCollect.get(entry.getKey())) { + continue; + } + List<SearchQueryRecord> currentSnapshot = entry.getValue().getTopQueriesCurrentSnapshot(); + // skip add to top N queries store if the incoming record is smaller than the Nth record + if (currentSnapshot.size() < entry.getValue().getTopNSize() + || SearchQueryRecord.compare(record, currentSnapshot.get(0), entry.getKey()) > 0) { + shouldAdd = true; + break; + } + } + if (shouldAdd) { + return queryRecordsQueue.offer(record); + } + return false; + } + + /** + * Drain the queryRecordsQueue into internal stores and services + */ + public void drainRecords() { + final List<SearchQueryRecord> records = new ArrayList<>(); + queryRecordsQueue.drainTo(records); + records.sort(Comparator.comparingLong(SearchQueryRecord::getTimestamp)); + for (MetricType metricType : MetricType.allMetricTypes()) { + if (enableCollect.get(metricType)) { + // ingest the records into topQueriesService + topQueriesServices.get(metricType).consumeRecords(records); + } + } + } + + /** + * Get the top queries service based on metricType + * @param metricType {@link MetricType} + * @return {@link TopQueriesService} + */ + public TopQueriesService getTopQueriesService(final MetricType metricType) { + return topQueriesServices.get(metricType); + } + + /** + * Set flag to enable or disable Query Insights data collection + * + * @param metricType {@link MetricType} + * @param enable Flag to enable or disable Query Insights data collection + */ + public void enableCollection(final MetricType metricType, final boolean enable) { + this.enableCollect.put(metricType, enable); + this.topQueriesServices.get(metricType).setEnabled(enable); + } + + /** + * Get if the Query Insights data collection is enabled for a MetricType + * + * @param metricType {@link MetricType} + * @return if the Query Insights data collection is enabled + */ + public boolean isCollectionEnabled(final MetricType metricType) { + return this.enableCollect.get(metricType); + } + + /** + * Check if query insights service is enabled + * + * @return if query insights service is enabled + */ + public boolean isEnabled() { + for (MetricType t : MetricType.allMetricTypes()) { + if (isCollectionEnabled(t)) { + return true; + } + } + return false; + } + + @Override + protected void doStart() { + if (isEnabled()) { + scheduledFuture = threadPool.scheduleWithFixedDelay( + this::drainRecords, + QueryInsightsSettings.QUERY_RECORD_QUEUE_DRAIN_INTERVAL, + QueryInsightsSettings.QUERY_INSIGHTS_EXECUTOR + ); + } + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java new file mode 100644 index 0000000000000..d2c30cbdf98e7 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.PriorityQueue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Service responsible for gathering and storing top N queries + * with high latency or resource usage + * + * @opensearch.internal + */ +public class TopQueriesService { + private boolean enabled; + /** + * The metric type to measure top n queries + */ + private final MetricType metricType; + private int topNSize; + /** + * The window size to keep the top n queries + */ + private TimeValue windowSize; + /** + * The current window start timestamp + */ + private long windowStart; + /** + * The internal thread-safe store that holds the top n queries insight data + */ + private final PriorityQueue<SearchQueryRecord> topQueriesStore; + + /** + * The AtomicReference of a snapshot of the current window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesCurrentSnapshot; + + /** + * The AtomicReference of a snapshot of the last window top queries for getters to consume + */ + private final AtomicReference<List<SearchQueryRecord>> topQueriesHistorySnapshot; + + TopQueriesService(final MetricType metricType) { + this.enabled = false; + this.metricType = metricType; + this.topNSize = QueryInsightsSettings.DEFAULT_TOP_N_SIZE; + this.windowSize = QueryInsightsSettings.DEFAULT_WINDOW_SIZE; + this.windowStart = -1L; + topQueriesStore = new PriorityQueue<>(topNSize, (a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot = new AtomicReference<>(new ArrayList<>()); + topQueriesHistorySnapshot = new AtomicReference<>(new ArrayList<>()); + } + + /** + * Set the top N size for TopQueriesService service. + * + * @param topNSize the top N size to set + */ + public void setTopNSize(final int topNSize) { + this.topNSize = topNSize; + } + + /** + * Get the current configured top n size + * + * @return top n size + */ + public int getTopNSize() { + return topNSize; + } + + /** + * Validate the top N size based on the internal constrains + * + * @param size the wanted top N size + */ + public void validateTopNSize(final int size) { + if (size > QueryInsightsSettings.MAX_N_SIZE) { + throw new IllegalArgumentException( + "Top N size setting for [" + + metricType + + "]" + + " should be smaller than max top N size [" + + QueryInsightsSettings.MAX_N_SIZE + + "was (" + + size + + " > " + + QueryInsightsSettings.MAX_N_SIZE + + ")" + ); + } + } + + /** + * Set enable flag for the service + * @param enabled boolean + */ + public void setEnabled(final boolean enabled) { + this.enabled = enabled; + } + + /** + * Set the window size for top N queries service + * + * @param windowSize window size to set + */ + public void setWindowSize(final TimeValue windowSize) { + this.windowSize = windowSize; + // reset the window start time since the window size has changed + this.windowStart = -1L; + } + + /** + * Validate if the window size is valid, based on internal constrains. + * + * @param windowSize the window size to validate + */ + public void validateWindowSize(final TimeValue windowSize) { + if (windowSize.compareTo(QueryInsightsSettings.MAX_WINDOW_SIZE) > 0 + || windowSize.compareTo(QueryInsightsSettings.MIN_WINDOW_SIZE) < 0) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be between [" + + QueryInsightsSettings.MIN_WINDOW_SIZE + + "," + + QueryInsightsSettings.MAX_WINDOW_SIZE + + "]" + + "was (" + + windowSize + + ")" + ); + } + if (!(QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES.contains(windowSize) || windowSize.getMinutes() % 60 == 0)) { + throw new IllegalArgumentException( + "Window size setting for [" + + metricType + + "]" + + " should be multiple of 1 hour, or one of " + + QueryInsightsSettings.VALID_WINDOW_SIZES_IN_MINUTES + + ", was (" + + windowSize + + ")" + ); + } + } + + /** + * Get all top queries records that are in the current top n queries store + * Optionally include top N records from the last window. + * + * By default, return the records in sorted order. + * + * @param includeLastWindow if the top N queries from the last window should be included + * @return List of the records that are in the query insight store + * @throws IllegalArgumentException if query insight is disabled in the cluster + */ + public List<SearchQueryRecord> getTopQueriesRecords(final boolean includeLastWindow) throws IllegalArgumentException { + if (!enabled) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "Cannot get top n queries for [%s] when it is not enabled.", metricType.toString()) + ); + } + // read from window snapshots + final List<SearchQueryRecord> queries = new ArrayList<>(topQueriesCurrentSnapshot.get()); + if (includeLastWindow) { + queries.addAll(topQueriesHistorySnapshot.get()); + } + return Stream.of(queries) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .collect(Collectors.toList()); + } + + /** + * Consume records to top queries stores + * + * @param records a list of {@link SearchQueryRecord} + */ + void consumeRecords(final List<SearchQueryRecord> records) { + final long currentWindowStart = calculateWindowStart(System.currentTimeMillis()); + List<SearchQueryRecord> recordsInLastWindow = new ArrayList<>(); + List<SearchQueryRecord> recordsInThisWindow = new ArrayList<>(); + for (SearchQueryRecord record : records) { + // skip the records that does not have the corresponding measurement + if (!record.getMeasurements().containsKey(metricType)) { + continue; + } + if (record.getTimestamp() < currentWindowStart) { + recordsInLastWindow.add(record); + } else { + recordsInThisWindow.add(record); + } + } + // add records in last window, if there are any, to the top n store + addToTopNStore(recordsInLastWindow); + // rotate window and reset window start if necessary + rotateWindowIfNecessary(currentWindowStart); + // add records in current window, if there are any, to the top n store + addToTopNStore(recordsInThisWindow); + // update the current window snapshot for getters to consume + final List<SearchQueryRecord> newSnapShot = new ArrayList<>(topQueriesStore); + newSnapShot.sort((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + topQueriesCurrentSnapshot.set(newSnapShot); + } + + private void addToTopNStore(final List<SearchQueryRecord> records) { + topQueriesStore.addAll(records); + // remove top elements for fix sizing priority queue + while (topQueriesStore.size() > topNSize) { + topQueriesStore.poll(); + } + } + + /** + * Reset the current window and rotate the data to history snapshot for top n queries, + * This function would be invoked zero time or only once in each consumeRecords call + * + * @param newWindowStart the new windowStart to set to + */ + private void rotateWindowIfNecessary(final long newWindowStart) { + // reset window if the current window is outdated + if (windowStart < newWindowStart) { + final List<SearchQueryRecord> history = new ArrayList<>(); + // rotate the current window to history store only if the data belongs to the last window + if (windowStart == newWindowStart - windowSize.getMillis()) { + history.addAll(topQueriesStore); + } + topQueriesHistorySnapshot.set(history); + topQueriesStore.clear(); + topQueriesCurrentSnapshot.set(new ArrayList<>()); + windowStart = newWindowStart; + } + } + + /** + * Calculate the window start for the given timestamp + * + * @param timestamp the given timestamp to calculate window start + */ + private long calculateWindowStart(final long timestamp) { + final LocalDateTime currentTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(timestamp), ZoneId.of("UTC")); + LocalDateTime windowStartTime = currentTime.truncatedTo(ChronoUnit.HOURS); + while (!windowStartTime.plusMinutes(windowSize.getMinutes()).isAfter(currentTime)) { + windowStartTime = windowStartTime.plusMinutes(windowSize.getMinutes()); + } + return windowStartTime.toInstant(ZoneOffset.UTC).getEpochSecond() * 1000; + } + + /** + * Get the current top queries snapshot from the AtomicReference. + * + * @return a list of {@link SearchQueryRecord} + */ + public List<SearchQueryRecord> getTopQueriesCurrentSnapshot() { + return topQueriesCurrentSnapshot.get(); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java new file mode 100644 index 0000000000000..5068f28234f6d --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Service Classes for Query Insights + */ +package org.opensearch.plugin.insights.core.service; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java new file mode 100644 index 0000000000000..04d1f9bfff7e1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base Package of Query Insights + */ +package org.opensearch.plugin.insights; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java new file mode 100644 index 0000000000000..9b6b5856f7d27 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions, Requests and Responses for Query Insights + */ +package org.opensearch.plugin.insights.rules.action; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java new file mode 100644 index 0000000000000..26cff82aae52e --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueries.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.List; + +/** + * Holds all top queries records by resource usage or latency on a node + * Mainly used in the top N queries node response workflow. + * + * @opensearch.internal + */ +public class TopQueries extends BaseNodeResponse implements ToXContentObject { + /** The store to keep the top queries records */ + private final List<SearchQueryRecord> topQueriesRecords; + + /** + * Create the TopQueries Object from StreamInput + * @param in A {@link StreamInput} object. + * @throws IOException IOException + */ + public TopQueries(final StreamInput in) throws IOException { + super(in); + topQueriesRecords = in.readList(SearchQueryRecord::new); + } + + /** + * Create the TopQueries Object + * @param node A node that is part of the cluster. + * @param searchQueryRecords A list of SearchQueryRecord associated in this TopQueries. + */ + public TopQueries(final DiscoveryNode node, final List<SearchQueryRecord> searchQueryRecords) { + super(node); + topQueriesRecords = searchQueryRecords; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + if (topQueriesRecords != null) { + for (SearchQueryRecord record : topQueriesRecords) { + record.toXContent(builder, params); + } + } + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(topQueriesRecords); + + } + + /** + * Get all top queries records + * + * @return the top queries records in this node response + */ + public List<SearchQueryRecord> getTopQueriesRecord() { + return topQueriesRecords; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java new file mode 100644 index 0000000000000..b8ed69fa5692b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesAction.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.ActionType; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesAction extends ActionType<TopQueriesResponse> { + + /** + * The TopQueriesAction Instance. + */ + public static final TopQueriesAction INSTANCE = new TopQueriesAction(); + /** + * The name of this Action + */ + public static final String NAME = "cluster:admin/opensearch/insights/top_queries"; + + private TopQueriesAction() { + super(NAME, TopQueriesResponse::new); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java new file mode 100644 index 0000000000000..3bdff2c403161 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequest.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.rules.model.MetricType; + +import java.io.IOException; + +/** + * A request to get cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesRequest extends BaseNodesRequest<TopQueriesRequest> { + + final MetricType metricType; + + /** + * Constructor for TopQueriesRequest + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesRequest(final StreamInput in) throws IOException { + super(in); + this.metricType = MetricType.readFromStream(in); + } + + /** + * Get top queries from nodes based on the nodes ids specified. + * If none are passed, cluster level top queries will be returned. + * + * @param metricType {@link MetricType} + * @param nodesIds the nodeIds specified in the request + */ + public TopQueriesRequest(final MetricType metricType, final String... nodesIds) { + super(nodesIds); + this.metricType = metricType; + } + + /** + * Get the type of requested metrics + */ + public MetricType getMetricType() { + return metricType; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(metricType.toString()); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java new file mode 100644 index 0000000000000..2e66bb7f77baf --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponse.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Transport response for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TopQueriesResponse extends BaseNodesResponse<TopQueries> implements ToXContentFragment { + + private static final String CLUSTER_LEVEL_RESULTS_KEY = "top_queries"; + private final MetricType metricType; + private final int top_n_size; + + /** + * Constructor for TopQueriesResponse. + * + * @param in A {@link StreamInput} object. + * @throws IOException if the stream cannot be deserialized. + */ + public TopQueriesResponse(final StreamInput in) throws IOException { + super(in); + top_n_size = in.readInt(); + metricType = in.readEnum(MetricType.class); + } + + /** + * Constructor for TopQueriesResponse + * + * @param clusterName The current cluster name + * @param nodes A list that contains top queries results from all nodes + * @param failures A list that contains FailedNodeException + * @param top_n_size The top N size to return to the user + * @param metricType the {@link MetricType} to be returned in this response + */ + public TopQueriesResponse( + final ClusterName clusterName, + final List<TopQueries> nodes, + final List<FailedNodeException> failures, + final int top_n_size, + final MetricType metricType + ) { + super(clusterName, nodes, failures); + this.top_n_size = top_n_size; + this.metricType = metricType; + } + + @Override + protected List<TopQueries> readNodesFrom(final StreamInput in) throws IOException { + return in.readList(TopQueries::new); + } + + @Override + protected void writeNodesTo(final StreamOutput out, final List<TopQueries> nodes) throws IOException { + out.writeList(nodes); + out.writeLong(top_n_size); + out.writeEnum(metricType); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + final List<TopQueries> results = getNodes(); + postProcess(results); + builder.startObject(); + toClusterLevelResult(builder, params, results); + return builder.endObject(); + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + this.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.toString(); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + /** + * Post process the top queries results to add customized attributes + * + * @param results the top queries results + */ + private void postProcess(final List<TopQueries> results) { + for (TopQueries topQueries : results) { + final String nodeId = topQueries.getNode().getId(); + for (SearchQueryRecord record : topQueries.getTopQueriesRecord()) { + record.addAttribute(Attribute.NODE_ID, nodeId); + } + } + } + + /** + * Merge top n queries results from nodes into cluster level results in XContent format. + * + * @param builder XContent builder + * @param params serialization parameters + * @param results top queries results from all nodes + * @throws IOException if an error occurs + */ + private void toClusterLevelResult(final XContentBuilder builder, final Params params, final List<TopQueries> results) + throws IOException { + final List<SearchQueryRecord> all_records = results.stream() + .map(TopQueries::getTopQueriesRecord) + .flatMap(Collection::stream) + .sorted((a, b) -> SearchQueryRecord.compare(a, b, metricType) * -1) + .limit(top_n_size) + .collect(Collectors.toList()); + builder.startArray(CLUSTER_LEVEL_RESULTS_KEY); + for (SearchQueryRecord record : all_records) { + record.toXContent(builder, params); + } + builder.endArray(); + } + +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java new file mode 100644 index 0000000000000..3cc7900e5ce7d --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/action/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions, Requests and Responses for Top N Queries + */ +package org.opensearch.plugin.insights.rules.action.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java new file mode 100644 index 0000000000000..c1d17edf9ff14 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/Attribute.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Locale; + +/** + * Valid attributes for a search query record + * + * @opensearch.internal + */ +public enum Attribute { + /** + * The search query type + */ + SEARCH_TYPE, + /** + * The search query source + */ + SOURCE, + /** + * Total shards queried + */ + TOTAL_SHARDS, + /** + * The indices involved + */ + INDICES, + /** + * The per phase level latency map for a search query + */ + PHASE_LATENCY_MAP, + /** + * The node id for this request + */ + NODE_ID; + + /** + * Read an Attribute from a StreamInput + * + * @param in the StreamInput to read from + * @return Attribute + * @throws IOException IOException + */ + static Attribute readFromStream(final StreamInput in) throws IOException { + return Attribute.valueOf(in.readString().toUpperCase(Locale.ROOT)); + } + + /** + * Write Attribute to a StreamOutput + * + * @param out the StreamOutput to write + * @param attribute the Attribute to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final Attribute attribute) throws IOException { + out.writeString(attribute.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java new file mode 100644 index 0000000000000..cdd090fbf4804 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/MetricType.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Valid metric types for a search query record + * + * @opensearch.internal + */ +public enum MetricType implements Comparator<Number> { + /** + * Latency metric type + */ + LATENCY, + /** + * CPU usage metric type + */ + CPU, + /** + * JVM heap usage metric type + */ + JVM; + + /** + * Read a MetricType from a StreamInput + * + * @param in the StreamInput to read from + * @return MetricType + * @throws IOException IOException + */ + public static MetricType readFromStream(final StreamInput in) throws IOException { + return fromString(in.readString()); + } + + /** + * Create MetricType from String + * + * @param metricType the String representation of MetricType + * @return MetricType + */ + public static MetricType fromString(final String metricType) { + return MetricType.valueOf(metricType.toUpperCase(Locale.ROOT)); + } + + /** + * Write MetricType to a StreamOutput + * + * @param out the StreamOutput to write + * @param metricType the MetricType to write + * @throws IOException IOException + */ + static void writeTo(final StreamOutput out, final MetricType metricType) throws IOException { + out.writeString(metricType.toString()); + } + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + + /** + * Get all valid metrics + * + * @return A set of String that contains all valid metrics + */ + public static Set<MetricType> allMetricTypes() { + return Arrays.stream(values()).collect(Collectors.toSet()); + } + + /** + * Compare two numbers based on the metric type + * + * @param a the first Number to be compared. + * @param b the second Number to be compared. + * @return a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second + */ + public int compare(final Number a, final Number b) { + switch (this) { + case LATENCY: + return Long.compare(a.longValue(), b.longValue()); + case JVM: + case CPU: + return Double.compare(a.doubleValue(), b.doubleValue()); + } + return -1; + } + + /** + * Parse a value with the correct type based on MetricType + * + * @param o the generic object to parse + * @return {@link Number} + */ + Number parseValue(final Object o) { + switch (this) { + case LATENCY: + return (Long) o; + case JVM: + case CPU: + return (Double) o; + default: + return (Number) o; + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java new file mode 100644 index 0000000000000..060711edb5580 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecord.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * SearchQueryRecord represents a minimal atomic record stored in the Query Insight Framework, + * which contains extensive information related to a search query. + * + * @opensearch.internal + */ +public class SearchQueryRecord implements ToXContentObject, Writeable { + private final long timestamp; + private final Map<MetricType, Number> measurements; + private final Map<Attribute, Object> attributes; + + /** + * Constructor of SearchQueryRecord + * + * @param in the StreamInput to read the SearchQueryRecord from + * @throws IOException IOException + * @throws ClassCastException ClassCastException + */ + public SearchQueryRecord(final StreamInput in) throws IOException, ClassCastException { + this.timestamp = in.readLong(); + measurements = new HashMap<>(); + in.readMap(MetricType::readFromStream, StreamInput::readGenericValue) + .forEach(((metricType, o) -> measurements.put(metricType, metricType.parseValue(o)))); + this.attributes = in.readMap(Attribute::readFromStream, StreamInput::readGenericValue); + } + + /** + * Constructor of SearchQueryRecord + * + * @param timestamp The timestamp of the query. + * @param measurements A list of Measurement associated with this query + * @param attributes A list of Attributes associated with this query + */ + public SearchQueryRecord(final long timestamp, Map<MetricType, Number> measurements, final Map<Attribute, Object> attributes) { + if (measurements == null) { + throw new IllegalArgumentException("Measurements cannot be null"); + } + this.measurements = measurements; + this.attributes = attributes; + this.timestamp = timestamp; + } + + /** + * Returns the observation time of the metric. + * + * @return the observation time in milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Returns the measurement associated with the specified name. + * + * @param name the name of the measurement + * @return the measurement object, or null if not found + */ + public Number getMeasurement(final MetricType name) { + return measurements.get(name); + } + + /** + * Returns a map of all the measurements associated with the metric. + * + * @return a map of measurement names to measurement objects + */ + public Map<MetricType, Number> getMeasurements() { + return measurements; + } + + /** + * Returns a map of the attributes associated with the metric. + * + * @return a map of attribute keys to attribute values + */ + public Map<Attribute, Object> getAttributes() { + return attributes; + } + + /** + * Add an attribute to this record + * + * @param attribute attribute to add + * @param value the value associated with the attribute + */ + public void addAttribute(final Attribute attribute, final Object value) { + attributes.put(attribute, value); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("timestamp", timestamp); + for (Map.Entry<Attribute, Object> entry : attributes.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + for (Map.Entry<MetricType, Number> entry : measurements.entrySet()) { + builder.field(entry.getKey().toString(), entry.getValue()); + } + return builder.endObject(); + } + + /** + * Write a SearchQueryRecord to a StreamOutput + * + * @param out the StreamOutput to write + * @throws IOException IOException + */ + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeLong(timestamp); + out.writeMap(measurements, (stream, metricType) -> MetricType.writeTo(out, metricType), StreamOutput::writeGenericValue); + out.writeMap(attributes, (stream, attribute) -> Attribute.writeTo(out, attribute), StreamOutput::writeGenericValue); + } + + /** + * Compare two SearchQueryRecord, based on the given MetricType + * + * @param a the first SearchQueryRecord to compare + * @param b the second SearchQueryRecord to compare + * @param metricType the MetricType to compare on + * @return 0 if the first SearchQueryRecord is numerically equal to the second SearchQueryRecord; + * -1 if the first SearchQueryRecord is numerically less than the second SearchQueryRecord; + * 1 if the first SearchQueryRecord is numerically greater than the second SearchQueryRecord. + */ + public static int compare(final SearchQueryRecord a, final SearchQueryRecord b, final MetricType metricType) { + return metricType.compare(a.getMeasurement(metricType), b.getMeasurement(metricType)); + } + + /** + * Check if a SearchQueryRecord is deep equal to another record + * + * @param o the other SearchQueryRecord record + * @return true if two records are deep equal, false otherwise. + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof SearchQueryRecord)) { + return false; + } + final SearchQueryRecord other = (SearchQueryRecord) o; + return timestamp == other.getTimestamp() + && measurements.equals(other.getMeasurements()) + && attributes.size() == other.getAttributes().size(); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, measurements, attributes); + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java new file mode 100644 index 0000000000000..c59ec1550f54b --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/model/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Data Models for Query Insight Records + */ +package org.opensearch.plugin.insights.rules.model; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java new file mode 100644 index 0000000000000..3787f05f65552 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Query Insights + */ +package org.opensearch.plugin.insights.rules.resthandler; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java new file mode 100644 index 0000000000000..6aa511c626ab1 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesAction.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.plugin.insights.settings.QueryInsightsSettings.TOP_QUERIES_BASE_URI; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action to get Top N queries by certain metric type + * + * @opensearch.api + */ +public class RestTopQueriesAction extends BaseRestHandler { + /** The metric types that are allowed in top N queries */ + static final Set<String> ALLOWED_METRICS = MetricType.allMetricTypes().stream().map(MetricType::toString).collect(Collectors.toSet()); + + /** + * Constructor for RestTopQueriesAction + */ + public RestTopQueriesAction() {} + + @Override + public List<Route> routes() { + return List.of( + new Route(GET, TOP_QUERIES_BASE_URI), + new Route(GET, String.format(Locale.ROOT, "%s/{nodeId}", TOP_QUERIES_BASE_URI)) + ); + } + + @Override + public String getName() { + return "query_insights_top_queries_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { + final TopQueriesRequest topQueriesRequest = prepareRequest(request); + topQueriesRequest.timeout(request.param("timeout")); + + return channel -> client.execute(TopQueriesAction.INSTANCE, topQueriesRequest, topQueriesResponse(channel)); + } + + static TopQueriesRequest prepareRequest(final RestRequest request) { + final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); + final String metricType = request.param("type", MetricType.LATENCY.toString()); + if (!ALLOWED_METRICS.contains(metricType)) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "request [%s] contains invalid metric type [%s]", request.path(), metricType) + ); + } + return new TopQueriesRequest(MetricType.fromString(metricType), nodesIds); + } + + @Override + protected Set<String> responseParams() { + return Settings.FORMAT_PARAMS; + } + + @Override + public boolean canTripCircuitBreaker() { + return false; + } + + private RestResponseListener<TopQueriesResponse> topQueriesResponse(final RestChannel channel) { + return new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final TopQueriesResponse response) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)); + } + }; + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java new file mode 100644 index 0000000000000..087cf7d765f8c --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Rest Handlers for Top N Queries + */ +package org.opensearch.plugin.insights.rules.resthandler.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java new file mode 100644 index 0000000000000..f3a1c70b9af57 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Query Insights. + */ +package org.opensearch.plugin.insights.rules.transport; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java new file mode 100644 index 0000000000000..ddf614211bc41 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesAction.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +/** + * Transport action for cluster/node level top queries information. + * + * @opensearch.internal + */ +public class TransportTopQueriesAction extends TransportNodesAction< + TopQueriesRequest, + TopQueriesResponse, + TransportTopQueriesAction.NodeRequest, + TopQueries> { + + private final QueryInsightsService queryInsightsService; + + /** + * Create the TransportTopQueriesAction Object + + * @param threadPool The OpenSearch thread pool to run async tasks + * @param clusterService The clusterService of this node + * @param transportService The TransportService of this node + * @param queryInsightsService The topQueriesByLatencyService associated with this Transport Action + * @param actionFilters the action filters + */ + @Inject + public TransportTopQueriesAction( + final ThreadPool threadPool, + final ClusterService clusterService, + final TransportService transportService, + final QueryInsightsService queryInsightsService, + final ActionFilters actionFilters + ) { + super( + TopQueriesAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + TopQueriesRequest::new, + NodeRequest::new, + ThreadPool.Names.GENERIC, + TopQueries.class + ); + this.queryInsightsService = queryInsightsService; + } + + @Override + protected TopQueriesResponse newResponse( + final TopQueriesRequest topQueriesRequest, + final List<TopQueries> responses, + final List<FailedNodeException> failures + ) { + if (topQueriesRequest.getMetricType() == MetricType.LATENCY) { + return new TopQueriesResponse( + clusterService.getClusterName(), + responses, + failures, + clusterService.getClusterSettings().get(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE), + MetricType.LATENCY + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", topQueriesRequest.getMetricType())); + } + } + + @Override + protected NodeRequest newNodeRequest(final TopQueriesRequest request) { + return new NodeRequest(request); + } + + @Override + protected TopQueries newNodeResponse(final StreamInput in) throws IOException { + return new TopQueries(in); + } + + @Override + protected TopQueries nodeOperation(final NodeRequest nodeRequest) { + final TopQueriesRequest request = nodeRequest.request; + if (request.getMetricType() == MetricType.LATENCY) { + return new TopQueries( + clusterService.localNode(), + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(true) + ); + } else { + throw new OpenSearchException(String.format(Locale.ROOT, "invalid metric type %s", request.getMetricType())); + } + + } + + /** + * Inner Node Top Queries Request + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + + final TopQueriesRequest request; + + /** + * Create the NodeResponse object from StreamInput + * + * @param in the StreamInput to read the object + * @throws IOException IOException + */ + public NodeRequest(StreamInput in) throws IOException { + super(in); + request = new TopQueriesRequest(in); + } + + /** + * Create the NodeResponse object from a TopQueriesRequest + * @param request the TopQueriesRequest object + */ + public NodeRequest(final TopQueriesRequest request) { + this.request = request; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + } +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java new file mode 100644 index 0000000000000..54da0980deff8 --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/rules/transport/top_queries/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Transport Actions for Top N Queries. + */ +package org.opensearch.plugin.insights.rules.transport.top_queries; diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java new file mode 100644 index 0000000000000..52cc1fbde790f --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/QueryInsightsSettings.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Settings for Query Insights Plugin + * + * @opensearch.api + * @opensearch.experimental + */ +public class QueryInsightsSettings { + /** + * Executors settings + */ + public static final String QUERY_INSIGHTS_EXECUTOR = "query_insights_executor"; + /** + * Max number of thread + */ + public static final int MAX_THREAD_COUNT = 5; + /** + * Max number of requests for the consumer to collect at one time + */ + public static final int QUERY_RECORD_QUEUE_CAPACITY = 1000; + /** + * Time interval for record queue consumer to run + */ + public static final TimeValue QUERY_RECORD_QUEUE_DRAIN_INTERVAL = new TimeValue(5, TimeUnit.SECONDS); + /** + * Default Values and Settings + */ + public static final TimeValue MAX_WINDOW_SIZE = new TimeValue(1, TimeUnit.DAYS); + /** + * Minimal window size + */ + public static final TimeValue MIN_WINDOW_SIZE = new TimeValue(1, TimeUnit.MINUTES); + /** + * Valid window sizes + */ + public static final Set<TimeValue> VALID_WINDOW_SIZES_IN_MINUTES = new HashSet<>( + Arrays.asList( + new TimeValue(1, TimeUnit.MINUTES), + new TimeValue(5, TimeUnit.MINUTES), + new TimeValue(10, TimeUnit.MINUTES), + new TimeValue(30, TimeUnit.MINUTES) + ) + ); + + /** Default N size for top N queries */ + public static final int MAX_N_SIZE = 100; + /** Default window size in seconds to keep the top N queries with latency data in query insight store */ + public static final TimeValue DEFAULT_WINDOW_SIZE = new TimeValue(60, TimeUnit.SECONDS); + /** Default top N size to keep the data in query insight store */ + public static final int DEFAULT_TOP_N_SIZE = 3; + /** + * Query Insights base uri + */ + public static final String PLUGINS_BASE_URI = "/_insights"; + + /** + * Settings for Top Queries + * + */ + public static final String TOP_QUERIES_BASE_URI = PLUGINS_BASE_URI + "/top_queries"; + /** Default prefix for top N queries feature */ + public static final String TOP_N_QUERIES_SETTING_PREFIX = "search.insights.top_queries"; + /** Default prefix for top N queries by latency feature */ + public static final String TOP_N_LATENCY_QUERIES_PREFIX = TOP_N_QUERIES_SETTING_PREFIX + ".latency"; + /** + * Boolean setting for enabling top queries by latency. + */ + public static final Setting<Boolean> TOP_N_LATENCY_QUERIES_ENABLED = Setting.boolSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".enabled", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Int setting to define the top n size for top queries by latency. + */ + public static final Setting<Integer> TOP_N_LATENCY_QUERIES_SIZE = Setting.intSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".top_n_size", + DEFAULT_TOP_N_SIZE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Time setting to define the window size in seconds for top queries by latency. + */ + public static final Setting<TimeValue> TOP_N_LATENCY_QUERIES_WINDOW_SIZE = Setting.positiveTimeSetting( + TOP_N_LATENCY_QUERIES_PREFIX + ".window_size", + DEFAULT_WINDOW_SIZE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * Default constructor + */ + public QueryInsightsSettings() {} +} diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java new file mode 100644 index 0000000000000..f3152bbf966cb --- /dev/null +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/settings/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Settings for Query Insights Plugin + */ +package org.opensearch.plugin.insights.settings; diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java new file mode 100644 index 0000000000000..273b69e483e8c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsPluginTests.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.ActionRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.plugin.insights.core.listener.QueryInsightsListener; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesAction; +import org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.rest.RestHandler; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class QueryInsightsPluginTests extends OpenSearchTestCase { + + private QueryInsightsPlugin queryInsightsPlugin; + + private final Client client = mock(Client.class); + private ClusterService clusterService; + private final ThreadPool threadPool = mock(ThreadPool.class); + + @Before + public void setup() { + queryInsightsPlugin = new QueryInsightsPlugin(); + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + + clusterService = new ClusterService(settings, clusterSettings, threadPool); + + } + + public void testGetSettings() { + assertEquals( + Arrays.asList( + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE, + QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE + ), + queryInsightsPlugin.getSettings() + ); + } + + public void testCreateComponent() { + List<Object> components = (List<Object>) queryInsightsPlugin.createComponents( + client, + clusterService, + threadPool, + null, + null, + null, + null, + null, + null, + null, + null + ); + assertEquals(2, components.size()); + assertTrue(components.get(0) instanceof QueryInsightsService); + assertTrue(components.get(1) instanceof QueryInsightsListener); + } + + public void testGetExecutorBuilders() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + List<ExecutorBuilder<?>> executorBuilders = queryInsightsPlugin.getExecutorBuilders(settings); + assertEquals(1, executorBuilders.size()); + assertTrue(executorBuilders.get(0) instanceof ScalingExecutorBuilder); + } + + public void testGetRestHandlers() { + List<RestHandler> components = queryInsightsPlugin.getRestHandlers(Settings.EMPTY, null, null, null, null, null, null); + assertEquals(1, components.size()); + assertTrue(components.get(0) instanceof RestTopQueriesAction); + } + + public void testGetActions() { + List<ActionPlugin.ActionHandler<? extends ActionRequest, ? extends ActionResponse>> components = queryInsightsPlugin.getActions(); + assertEquals(1, components.size()); + assertTrue(components.get(0).getAction() instanceof TopQueriesAction); + } + +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java new file mode 100644 index 0000000000000..870ef5b9c8be9 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/QueryInsightsTestUtils.java @@ -0,0 +1,189 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights; + +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.Maps; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueries; +import org.opensearch.plugin.insights.rules.model.Attribute; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.OpenSearchTestCase.buildNewFakeTransportAddress; +import static org.opensearch.test.OpenSearchTestCase.random; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLengthBetween; +import static org.opensearch.test.OpenSearchTestCase.randomArray; +import static org.opensearch.test.OpenSearchTestCase.randomDouble; +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; +import static org.opensearch.test.OpenSearchTestCase.randomLong; +import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +final public class QueryInsightsTestUtils { + + public QueryInsightsTestUtils() {} + + public static List<SearchQueryRecord> generateQueryInsightRecords(int count) { + return generateQueryInsightRecords(count, count, System.currentTimeMillis(), 0); + } + + /** + * Creates a List of random Query Insight Records for testing purpose + */ + public static List<SearchQueryRecord> generateQueryInsightRecords(int lower, int upper, long startTimeStamp, long interval) { + List<SearchQueryRecord> records = new ArrayList<>(); + int countOfRecords = randomIntBetween(lower, upper); + long timestamp = startTimeStamp; + for (int i = 0; i < countOfRecords; ++i) { + Map<MetricType, Number> measurements = Map.of( + MetricType.LATENCY, + randomLongBetween(1000, 10000), + MetricType.CPU, + randomDouble(), + MetricType.JVM, + randomDouble() + ); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + int countOfPhases = randomIntBetween(2, 5); + for (int j = 0; j < countOfPhases; ++j) { + phaseLatencyMap.put(randomAlphaOfLengthBetween(5, 10), randomLong()); + } + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + attributes.put(Attribute.SOURCE, "{\"size\":20}"); + attributes.put(Attribute.TOTAL_SHARDS, randomIntBetween(1, 100)); + attributes.put(Attribute.INDICES, randomArray(1, 3, Object[]::new, () -> randomAlphaOfLengthBetween(5, 10))); + attributes.put(Attribute.PHASE_LATENCY_MAP, phaseLatencyMap); + + records.add(new SearchQueryRecord(timestamp, measurements, attributes)); + timestamp += interval; + } + return records; + } + + public static TopQueries createRandomTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = generateQueryInsightRecords(10); + + return new TopQueries(node, records); + } + + public static TopQueries createFixedTopQueries() { + DiscoveryNode node = new DiscoveryNode( + "node_for_top_queries_test", + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + VersionUtils.randomVersion(random()) + ); + List<SearchQueryRecord> records = new ArrayList<>(); + records.add(createFixedSearchQueryRecord()); + + return new TopQueries(node, records); + } + + public static SearchQueryRecord createFixedSearchQueryRecord() { + long timestamp = 1706574180000L; + Map<MetricType, Number> measurements = Map.of(MetricType.LATENCY, 1L); + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + Map<Attribute, Object> attributes = new HashMap<>(); + attributes.put(Attribute.SEARCH_TYPE, SearchType.QUERY_THEN_FETCH.toString().toLowerCase(Locale.ROOT)); + + return new SearchQueryRecord(timestamp, measurements, attributes); + } + + public static void compareJson(ToXContent param1, ToXContent param2) throws IOException { + if (param1 == null || param2 == null) { + assertNull(param1); + assertNull(param2); + return; + } + + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + + XContentBuilder param2Builder = jsonBuilder(); + param2.toXContent(param2Builder, params); + + assertEquals(param1Builder.toString(), param2Builder.toString()); + } + + @SuppressWarnings("unchecked") + public static boolean checkRecordsEquals(List<SearchQueryRecord> records1, List<SearchQueryRecord> records2) { + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!records1.get(i).equals(records2.get(i))) { + return false; + } + Map<Attribute, Object> attributes1 = records1.get(i).getAttributes(); + Map<Attribute, Object> attributes2 = records2.get(i).getAttributes(); + for (Map.Entry<Attribute, Object> entry : attributes1.entrySet()) { + Attribute attribute = entry.getKey(); + Object value = entry.getValue(); + if (!attributes2.containsKey(attribute)) { + return false; + } + if (value instanceof Object[] && !Arrays.deepEquals((Object[]) value, (Object[]) attributes2.get(attribute))) { + return false; + } else if (value instanceof Map + && !Maps.deepEquals((Map<Object, Object>) value, (Map<Object, Object>) attributes2.get(attribute))) { + return false; + } + } + } + return true; + } + + public static boolean checkRecordsEqualsWithoutOrder( + List<SearchQueryRecord> records1, + List<SearchQueryRecord> records2, + MetricType metricType + ) { + Set<SearchQueryRecord> set2 = new TreeSet<>((a, b) -> SearchQueryRecord.compare(a, b, metricType)); + set2.addAll(records2); + if (records1.size() != records2.size()) { + return false; + } + for (int i = 0; i < records1.size(); i++) { + if (!set2.contains(records1.get(i))) { + return false; + } + } + return true; + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java new file mode 100644 index 0000000000000..f340950017a5c --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/listener/QueryInsightsListenerTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchType; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.core.service.TopQueriesService; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Unit Tests for {@link QueryInsightsListener}. + */ +public class QueryInsightsListenerTests extends OpenSearchTestCase { + private final SearchRequestContext searchRequestContext = mock(SearchRequestContext.class); + private final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); + private final SearchRequest searchRequest = mock(SearchRequest.class); + private final QueryInsightsService queryInsightsService = mock(QueryInsightsService.class); + private final TopQueriesService topQueriesService = mock(TopQueriesService.class); + private ClusterService clusterService; + + @Before + public void setup() { + Settings.Builder settingsBuilder = Settings.builder(); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + clusterService = new ClusterService(settings, clusterSettings, null); + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + when(queryInsightsService.getTopQueriesService(MetricType.LATENCY)).thenReturn(topQueriesService); + } + + public void testOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + queryInsightsListener.onRequestEnd(searchPhaseContext, searchRequestContext); + + verify(queryInsightsService, times(1)).addRecord(any()); + } + + public void testConcurrentOnRequestEnd() throws InterruptedException { + Long timestamp = System.currentTimeMillis() - 100L; + SearchType searchType = SearchType.QUERY_THEN_FETCH; + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1").userValueTypeHint(ValueType.STRING).field("type.keyword")); + searchSourceBuilder.size(0); + + String[] indices = new String[] { "index-1", "index-2" }; + + Map<String, Long> phaseLatencyMap = new HashMap<>(); + phaseLatencyMap.put("expand", 0L); + phaseLatencyMap.put("query", 20L); + phaseLatencyMap.put("fetch", 1L); + + int numberOfShards = 10; + + final List<QueryInsightsListener> searchListenersList = new ArrayList<>(); + + when(searchRequest.getOrCreateAbsoluteStartMillis()).thenReturn(timestamp); + when(searchRequest.searchType()).thenReturn(searchType); + when(searchRequest.source()).thenReturn(searchSourceBuilder); + when(searchRequest.indices()).thenReturn(indices); + when(searchRequestContext.phaseTookMap()).thenReturn(phaseLatencyMap); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getNumShards()).thenReturn(numberOfShards); + + int numRequests = 50; + Thread[] threads = new Thread[numRequests]; + Phaser phaser = new Phaser(numRequests + 1); + CountDownLatch countDownLatch = new CountDownLatch(numRequests); + + for (int i = 0; i < numRequests; i++) { + searchListenersList.add(new QueryInsightsListener(clusterService, queryInsightsService)); + } + + for (int i = 0; i < numRequests; i++) { + int finalI = i; + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + QueryInsightsListener thisListener = searchListenersList.get(finalI); + thisListener.onRequestEnd(searchPhaseContext, searchRequestContext); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + verify(queryInsightsService, times(numRequests)).addRecord(any()); + } + + public void testSetEnabled() { + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(true); + QueryInsightsListener queryInsightsListener = new QueryInsightsListener(clusterService, queryInsightsService); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, true); + assertTrue(queryInsightsListener.isEnabled()); + + when(queryInsightsService.isCollectionEnabled(MetricType.LATENCY)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.CPU)).thenReturn(false); + when(queryInsightsService.isCollectionEnabled(MetricType.JVM)).thenReturn(false); + queryInsightsListener.setEnableTopQueries(MetricType.LATENCY, false); + assertFalse(queryInsightsListener.isEnabled()); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java new file mode 100644 index 0000000000000..c29b48b9690d1 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/QueryInsightsServiceTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import static org.mockito.Mockito.mock; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class QueryInsightsServiceTests extends OpenSearchTestCase { + private final ThreadPool threadPool = mock(ThreadPool.class); + private QueryInsightsService queryInsightsService; + + @Before + public void setup() { + queryInsightsService = new QueryInsightsService(threadPool); + queryInsightsService.enableCollection(MetricType.LATENCY, true); + queryInsightsService.enableCollection(MetricType.CPU, true); + queryInsightsService.enableCollection(MetricType.JVM, true); + } + + public void testAddRecordToLimitAndDrain() { + SearchQueryRecord record = QueryInsightsTestUtils.generateQueryInsightRecords(1, 1, System.currentTimeMillis(), 0).get(0); + for (int i = 0; i < QueryInsightsSettings.QUERY_RECORD_QUEUE_CAPACITY; i++) { + assertTrue(queryInsightsService.addRecord(record)); + } + // exceed capacity + assertFalse(queryInsightsService.addRecord(record)); + queryInsightsService.drainRecords(); + assertEquals( + QueryInsightsSettings.DEFAULT_TOP_N_SIZE, + queryInsightsService.getTopQueriesService(MetricType.LATENCY).getTopQueriesRecords(false).size() + ); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java new file mode 100644 index 0000000000000..060df84a89485 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.core.service; + +import org.opensearch.cluster.coordination.DeterministicTaskQueue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.rules.model.SearchQueryRecord; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Unit Tests for {@link QueryInsightsService}. + */ +public class TopQueriesServiceTests extends OpenSearchTestCase { + private TopQueriesService topQueriesService; + + @Before + public void setup() { + topQueriesService = new TopQueriesService(MetricType.LATENCY); + topQueriesService.setTopNSize(Integer.MAX_VALUE); + topQueriesService.setWindowSize(new TimeValue(Long.MAX_VALUE)); + topQueriesService.setEnabled(true); + } + + public void testIngestQueryDataWithLargeWindow() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.consumeRecords(records); + assertTrue( + QueryInsightsTestUtils.checkRecordsEqualsWithoutOrder( + topQueriesService.getTopQueriesRecords(false), + records, + MetricType.LATENCY + ) + ); + } + + public void testRollingWindows() { + List<SearchQueryRecord> records; + // Create 5 records at Now - 10 minutes to make sure they belong to the last window + records = QueryInsightsTestUtils.generateQueryInsightRecords(5, 5, System.currentTimeMillis() - 1000 * 60 * 10, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(0, topQueriesService.getTopQueriesRecords(true).size()); + + // Create 10 records at now + 1 minute, to make sure they belong to the current window + records = QueryInsightsTestUtils.generateQueryInsightRecords(10, 10, System.currentTimeMillis() + 1000 * 60, 0); + topQueriesService.setWindowSize(TimeValue.timeValueMinutes(10)); + topQueriesService.consumeRecords(records); + assertEquals(10, topQueriesService.getTopQueriesRecords(true).size()); + } + + public void testSmallNSize() { + final List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + topQueriesService.setTopNSize(1); + topQueriesService.consumeRecords(records); + assertEquals(1, topQueriesService.getTopQueriesRecords(false).size()); + } + + public void testValidateTopNSize() { + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); + } + + public void testGetTopQueriesWhenNotEnabled() { + topQueriesService.setEnabled(false); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); + } + + public void testValidateWindowSize() { + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MAX_WINDOW_SIZE.getSeconds() + 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { + topQueriesService.validateWindowSize(new TimeValue(QueryInsightsSettings.MIN_WINDOW_SIZE.getSeconds() - 1, TimeUnit.SECONDS)); + }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(2, TimeUnit.DAYS)); }); + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateWindowSize(new TimeValue(7, TimeUnit.MINUTES)); }); + } + + private static void runUntilTimeoutOrFinish(DeterministicTaskQueue deterministicTaskQueue, long duration) { + final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + duration; + while (deterministicTaskQueue.getCurrentTimeMillis() < endTime + && (deterministicTaskQueue.hasRunnableTasks() || deterministicTaskQueue.hasDeferredTasks())) { + if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { + deterministicTaskQueue.advanceTime(); + } else if (deterministicTaskQueue.hasRunnableTasks()) { + deterministicTaskQueue.runRandomTask(); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java new file mode 100644 index 0000000000000..619fd4b33a3dc --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesRequestTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +/** + * Granular tests for the {@link TopQueriesRequest} class. + */ +public class TopQueriesRequestTests extends OpenSearchTestCase { + + /** + * Check that we can set the metric type + */ + public void testSetMetricType() throws Exception { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY, randomAlphaOfLength(5)); + TopQueriesRequest deserializedRequest = roundTripRequest(request); + assertEquals(request.getMetricType(), deserializedRequest.getMetricType()); + } + + /** + * Serialize and deserialize a request. + * @param request A request to serialize. + * @return The deserialized, "round-tripped" request. + */ + private static TopQueriesRequest roundTripRequest(TopQueriesRequest request) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesRequest(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java new file mode 100644 index 0000000000000..eeee50d3da703 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesResponseTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Granular tests for the {@link TopQueriesResponse} class. + */ +public class TopQueriesResponseTests extends OpenSearchTestCase { + + /** + * Check serialization and deserialization + */ + public void testSerialize() throws Exception { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + TopQueriesResponse deserializedResponse = roundTripResponse(response); + assertEquals(response.toString(), deserializedResponse.toString()); + } + + public void testToXContent() throws IOException { + char[] expectedXcontent = + "{\"top_queries\":[{\"timestamp\":1706574180000,\"node_id\":\"node_for_top_queries_test\",\"search_type\":\"query_then_fetch\",\"latency\":1}]}" + .toCharArray(); + TopQueries topQueries = QueryInsightsTestUtils.createFixedTopQueries(); + ClusterName clusterName = new ClusterName("test-cluster"); + TopQueriesResponse response = new TopQueriesResponse(clusterName, List.of(topQueries), new ArrayList<>(), 10, MetricType.LATENCY); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + char[] xContent = BytesReference.bytes(response.toXContent(builder, ToXContent.EMPTY_PARAMS)).utf8ToString().toCharArray(); + Arrays.sort(expectedXcontent); + Arrays.sort(xContent); + + assertEquals(Arrays.hashCode(expectedXcontent), Arrays.hashCode(xContent)); + } + + /** + * Serialize and deserialize a TopQueriesResponse. + * @param response A response to serialize. + * @return The deserialized, "round-tripped" response. + */ + private static TopQueriesResponse roundTripResponse(TopQueriesResponse response) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + response.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new TopQueriesResponse(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java new file mode 100644 index 0000000000000..7db08b53ad1df --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/action/top_queries/TopQueriesTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.action.top_queries; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +/** + * Tests for {@link TopQueries}. + */ +public class TopQueriesTests extends OpenSearchTestCase { + + public void testTopQueries() throws IOException { + TopQueries topQueries = QueryInsightsTestUtils.createRandomTopQueries(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + topQueries.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + TopQueries readTopQueries = new TopQueries(in); + assertTrue( + QueryInsightsTestUtils.checkRecordsEquals(topQueries.getTopQueriesRecord(), readTopQueries.getTopQueriesRecord()) + ); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java new file mode 100644 index 0000000000000..793d5878e2300 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/model/SearchQueryRecordTests.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.model; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.insights.QueryInsightsTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Granular tests for the {@link SearchQueryRecord} class. + */ +public class SearchQueryRecordTests extends OpenSearchTestCase { + + /** + * Check that if the serialization, deserialization and equals functions are working as expected + */ + public void testSerializationAndEquals() throws Exception { + List<SearchQueryRecord> records = QueryInsightsTestUtils.generateQueryInsightRecords(10); + List<SearchQueryRecord> copiedRecords = new ArrayList<>(); + for (SearchQueryRecord record : records) { + copiedRecords.add(roundTripRecord(record)); + } + assertTrue(QueryInsightsTestUtils.checkRecordsEquals(records, copiedRecords)); + + } + + public void testAllMetricTypes() { + Set<MetricType> allMetrics = MetricType.allMetricTypes(); + Set<MetricType> expected = new HashSet<>(Arrays.asList(MetricType.LATENCY, MetricType.CPU, MetricType.JVM)); + assertEquals(expected, allMetrics); + } + + public void testCompare() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(0, SearchQueryRecord.compare(record1, record2, MetricType.LATENCY)); + } + + public void testEqual() { + SearchQueryRecord record1 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + SearchQueryRecord record2 = QueryInsightsTestUtils.createFixedSearchQueryRecord(); + assertEquals(record1, record2); + } + + /** + * Serialize and deserialize a SearchQueryRecord. + * @param record A SearchQueryRecord to serialize. + * @return The deserialized, "round-tripped" record. + */ + private static SearchQueryRecord roundTripRecord(SearchQueryRecord record) throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + record.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + return new SearchQueryRecord(in); + } + } + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java new file mode 100644 index 0000000000000..ac19fa2a7348f --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/resthandler/top_queries/RestTopQueriesActionTests.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.resthandler.top_queries; + +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.opensearch.plugin.insights.rules.resthandler.top_queries.RestTopQueriesAction.ALLOWED_METRICS; + +public class RestTopQueriesActionTests extends OpenSearchTestCase { + + public void testEmptyNodeIdsValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertEquals(0, actual.nodesIds().length); + } + + public void testNodeIdsValid() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomFrom(ALLOWED_METRICS)); + String[] nodes = randomArray(1, 10, String[]::new, () -> randomAlphaOfLengthBetween(5, 10)); + params.put("nodeId", String.join(",", nodes)); + + RestRequest restRequest = buildRestRequest(params); + TopQueriesRequest actual = RestTopQueriesAction.prepareRequest(restRequest); + assertArrayEquals(nodes, actual.nodesIds()); + } + + public void testInValidType() { + Map<String, String> params = new HashMap<>(); + params.put("type", randomAlphaOfLengthBetween(5, 10).toUpperCase(Locale.ROOT)); + + RestRequest restRequest = buildRestRequest(params); + Exception exception = assertThrows(IllegalArgumentException.class, () -> { RestTopQueriesAction.prepareRequest(restRequest); }); + assertEquals( + String.format(Locale.ROOT, "request [/_insights/top_queries] contains invalid metric type [%s]", params.get("type")), + exception.getMessage() + ); + } + + public void testGetRoutes() { + RestTopQueriesAction action = new RestTopQueriesAction(); + List<RestHandler.Route> routes = action.routes(); + assertEquals(2, routes.size()); + assertEquals("query_insights_top_queries_action", action.getName()); + } + + private FakeRestRequest buildRestRequest(Map<String, String> params) { + return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.GET) + .withPath("/_insights/top_queries") + .withParams(params) + .build(); + } +} diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java new file mode 100644 index 0000000000000..a5f36b6e8cce0 --- /dev/null +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/rules/transport/top_queries/TransportTopQueriesActionTests.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.insights.rules.transport.top_queries; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugin.insights.core.service.QueryInsightsService; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesRequest; +import org.opensearch.plugin.insights.rules.action.top_queries.TopQueriesResponse; +import org.opensearch.plugin.insights.rules.model.MetricType; +import org.opensearch.plugin.insights.settings.QueryInsightsSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class TransportTopQueriesActionTests extends OpenSearchTestCase { + + private final ThreadPool threadPool = mock(ThreadPool.class); + + private final Settings.Builder settingsBuilder = Settings.builder(); + private final Settings settings = settingsBuilder.build(); + private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final ClusterService clusterService = new ClusterService(settings, clusterSettings, threadPool); + private final TransportService transportService = mock(TransportService.class); + private final QueryInsightsService topQueriesByLatencyService = mock(QueryInsightsService.class); + private final ActionFilters actionFilters = mock(ActionFilters.class); + private final TransportTopQueriesAction transportTopQueriesAction = new TransportTopQueriesAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + private final DummyParentAction dummyParentAction = new DummyParentAction( + threadPool, + clusterService, + transportService, + topQueriesByLatencyService, + actionFilters + ); + + class DummyParentAction extends TransportTopQueriesAction { + public DummyParentAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + QueryInsightsService topQueriesByLatencyService, + ActionFilters actionFilters + ) { + super(threadPool, clusterService, transportService, topQueriesByLatencyService, actionFilters); + } + + public TopQueriesResponse createNewResponse() { + TopQueriesRequest request = new TopQueriesRequest(MetricType.LATENCY); + return newResponse(request, List.of(), List.of()); + } + } + + @Before + public void setup() { + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_ENABLED); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_SIZE); + clusterSettings.registerSetting(QueryInsightsSettings.TOP_N_LATENCY_QUERIES_WINDOW_SIZE); + } + + public void testNewResponse() { + TopQueriesResponse response = dummyParentAction.createNewResponse(); + assertNotNull(response); + } + +} diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 9ec1b4ee50569..31db767b2c68e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.39.0' + api 'com.azure:azure-core:1.47.0' api 'com.azure:azure-json:1.0.1' api 'com.azure:azure-storage-common:12.21.2' api 'com.azure:azure-core-http-netty:1.12.8' @@ -55,22 +55,19 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.22.3' - api 'org.reactivestreams:reactive-streams:1.0.4' - api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.8' - api 'io.projectreactor.netty:reactor-netty-core:1.1.8' - api 'io.projectreactor.netty:reactor-netty-http:1.1.8' + api 'com.azure:azure-storage-blob:12.23.0' + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" - api 'org.codehaus.woodstox:stax2-api:4.2.1' + api 'org.codehaus.woodstox:stax2-api:4.2.2' implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly "com.google.guava:guava:${versions.guava}" - api 'org.apache.commons:commons-lang3:3.12.0' + api "org.apache.commons:commons-lang3:${versions.commonslang}" testImplementation project(':test:fixtures:azure-fixture') } @@ -101,10 +98,6 @@ thirdPartyAudit { 'com.azure.storage.internal.avro.implementation.schema.AvroSchema', 'com.ctc.wstx.shaded.msv_core.driver.textui.Driver', 'io.micrometer.context.ContextAccessor', - 'io.micrometer.context.ContextRegistry', - 'io.micrometer.context.ContextSnapshot', - 'io.micrometer.context.ContextSnapshot$Scope', - 'io.micrometer.core.instrument.Clock', 'io.micrometer.core.instrument.Counter', 'io.micrometer.core.instrument.Counter$Builder', 'io.micrometer.core.instrument.DistributionSummary', @@ -114,14 +107,10 @@ thirdPartyAudit { 'io.micrometer.core.instrument.Meter', 'io.micrometer.core.instrument.MeterRegistry', 'io.micrometer.core.instrument.Metrics', - 'io.micrometer.core.instrument.Tag', - 'io.micrometer.core.instrument.Tags', 'io.micrometer.core.instrument.Timer', 'io.micrometer.core.instrument.Timer$Builder', 'io.micrometer.core.instrument.Timer$Sample', - 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', - 'io.micrometer.core.instrument.search.Search', 'io.netty.channel.epoll.Epoll', 'io.netty.channel.epoll.EpollDatagramChannel', 'io.netty.channel.epoll.EpollServerSocketChannel', @@ -168,9 +157,6 @@ thirdPartyAudit { 'org.slf4j.impl.StaticLoggerBinder', 'org.slf4j.impl.StaticMDCBinder', 'org.slf4j.impl.StaticMarkerBinder', - 'reactor.blockhound.BlockHound$Builder', - 'reactor.blockhound.integration.BlockHoundIntegration', - 'io.micrometer.context.ThreadLocalAccessor', 'io.micrometer.common.KeyValue', 'io.micrometer.common.KeyValues', 'io.micrometer.common.docs.KeyName', @@ -190,6 +176,7 @@ thirdPartyAudit { 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', 'io.micrometer.tracing.propagation.Propagator', 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.core.instrument.Tags', 'io.micrometer.observation.ObservationHandler', 'io.micrometer.observation.ObservationRegistry', 'io.micrometer.observation.ObservationRegistry$ObservationConfig', @@ -210,8 +197,7 @@ thirdPartyAudit { 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', - 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' ) } diff --git a/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 deleted file mode 100644 index c91498a464b3d..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39765fb88a90174628b31ddf6ff9f8d63462e080 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 new file mode 100644 index 0000000000000..42e35aacc63b1 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.47.0.jar.sha1 @@ -0,0 +1 @@ +6b300175826f0bb0916fca2fa5f70885b716e93f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.22.3.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.22.3.jar.sha1 deleted file mode 100644 index f6c3cc6e579fa..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.22.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0df12462c2eac3beaf25d283f707a0560853228b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 new file mode 100644 index 0000000000000..5f32d64b00918 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.23.0.jar.sha1 @@ -0,0 +1 @@ +3eeb49d5109e812343fb436e4bbb2eecac8fe386 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt index 078282451b679..13a3140897472 100644 --- a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt +++ b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt @@ -1,8 +1,5 @@ Apache Commons Lang -Copyright 2001-2014 The Apache Software Foundation +Copyright 2001-2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - -This product includes software from the Spring Framework, -under the Apache License 2.0 (see: StringUtils.containsWhitespace()) diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 deleted file mode 100644 index 18c388b84f333..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7e9038dee5c1adb1ebd07d3669e0e1182ac5b60 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..4f353d6166121 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.2.jar.sha1 @@ -0,0 +1 @@ +796c3141d3bbcf67dc06751695dca116b2838a73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 deleted file mode 100644 index 6aa4f9b99c274..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30d16ec2aef6d8094c5e2dce1d95034ca8b6cb42 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..94266f5d40e62 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.2.jar.sha1 @@ -0,0 +1 @@ +58e86108e4b1b1e893e7a69b1bbca880acfca143 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 deleted file mode 100644 index 80da08928f855..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a22fd1c7b0f9788e81eea32c11dc8c1ba421f18 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..7fa3014c053a1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +d1274db656edefe242fbd26d3266f7b4abb6f57b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 deleted file mode 100644 index 670bd4c98a044..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9180660dc8479e1594b60b02fc27404af0ea43a6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..114d77a1bb95f --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 deleted file mode 100644 index de2c4d00aef09..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9192c7cda295d75f236a13a0b1f5a008f05d516 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5a4bde479eb38 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 deleted file mode 100644 index a2db8bece8f6f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -26ba9d30b8f7b095155b9ac63378d6d9386d85c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 deleted file mode 100644 index 2fa927b3b77ba..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25bbe90e10685ce63c32bd0db56574cffffa28de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 deleted file mode 100644 index ad9b7263e7b38..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.5.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -027fdc551537b349389176a23a192f11a7a3d7de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 deleted file mode 100644 index 6b6bf1903b16c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 deleted file mode 100644 index 707631f4dfe0c..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 deleted file mode 100644 index 5092608c90eba..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -696ea25658295e49906c6aad13fa70acbdeb2359 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-azure/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-azure/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 67e79addfedc5..1ba16422c9214 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -31,25 +31,25 @@ package org.opensearch.repositories.azure; -import com.azure.storage.blob.models.ParallelTransferOptions; -import com.azure.storage.common.implementation.Constants; -import com.azure.storage.common.policy.RequestRetryOptions; -import com.azure.storage.common.policy.RetryPolicyType; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import fixture.azure.AzureHttpHandler; -import reactor.core.scheduler.Schedulers; -import org.junit.AfterClass; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.common.implementation.Constants; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.rest.RestStatus; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.opensearch.core.rest.RestStatus; +import org.junit.AfterClass; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -59,6 +59,9 @@ import java.util.Map; import java.util.regex.Pattern; +import fixture.azure.AzureHttpHandler; +import reactor.core.scheduler.Schedulers; + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") public class AzureBlobStoreRepositoryTests extends OpenSearchMockAPIBasedRepositoryIntegTestCase { @AfterClass @@ -153,7 +156,7 @@ private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implemen /** * HTTP handler that injects random Azure service errors - * + * <p> * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -187,6 +190,7 @@ protected String requestUniqueId(final HttpExchange exchange) { @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHandler { + private static final Logger testLogger = LogManager.getLogger(AzureHTTPStatsCollectorHandler.class); private static final Pattern listPattern = Pattern.compile("GET /[a-zA-Z0-9]+\\??.+"); private static final Pattern getPattern = Pattern.compile("GET /[^?/]+/[^?/]+\\??.*"); @@ -196,6 +200,7 @@ private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { @Override protected void maybeTrack(String request, Headers headers) { + testLogger.info(request, headers); if (getPattern.matcher(request).matches()) { trackRequest("GetBlob"); } else if (Regex.simpleMatch("HEAD /*/*", request)) { diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 1dc411a5a27fb..176e60a667aef 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -32,15 +32,10 @@ package org.opensearch.repositories.azure; -import org.opensearch.core.common.Strings; -import reactor.core.scheduler.Schedulers; - import com.azure.core.util.Context; import com.azure.storage.blob.BlobContainerClient; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.blob.models.BlobStorageException; - -import org.junit.AfterClass; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; @@ -48,14 +43,18 @@ import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.AfterClass; import java.net.HttpURLConnection; import java.util.Collection; import java.util.function.Supplier; +import reactor.core.scheduler.Schedulers; + import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java index d6fa72221f408..2b4654d220061 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobContainer.java @@ -35,10 +35,8 @@ import com.azure.storage.blob.models.BlobStorageException; import com.azure.storage.blob.specialized.BlobInputStream; import com.azure.storage.common.implementation.Constants; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; @@ -48,6 +46,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.support.AbstractBlobContainer; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import java.io.FileInputStream; diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java index 060ffdda79196..e76a6bdd16764 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureBlobStore.java @@ -50,7 +50,6 @@ import com.azure.storage.blob.models.ListBlobsOptions; import com.azure.storage.blob.options.BlobParallelUploadOptions; import com.azure.storage.common.implementation.Constants; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.util.Throwables; diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index 2677604ecb622..47a5536a6cd8a 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -41,12 +41,14 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -114,14 +116,7 @@ public AzureRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - COMPRESS_SETTING.get(metadata.settings()), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; @@ -192,4 +187,13 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + public List<Setting<?>> getRestrictedSystemRepositorySettings() { + List<Setting<?>> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(Repository.BASE_PATH_SETTING); + restrictedSettings.add(Repository.LOCATION_MODE_SETTING); + return restrictedSettings; + } } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index c518cc2716db6..74edd4f3eb23c 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -32,11 +32,6 @@ package org.opensearch.repositories.azure; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.util.concurrent.Future; -import reactor.core.publisher.Mono; - import com.azure.core.http.HttpPipelineCallContext; import com.azure.core.http.HttpPipelineNextPolicy; import com.azure.core.http.HttpPipelinePosition; @@ -56,14 +51,13 @@ import com.azure.storage.common.implementation.connectionstring.StorageEndpoint; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; - import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import java.net.Authenticator; import java.net.PasswordAuthentication; @@ -79,6 +73,11 @@ import java.util.function.BiConsumer; import java.util.function.Supplier; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; +import reactor.core.publisher.Mono; + import static java.util.Collections.emptyMap; public class AzureStorageService implements AutoCloseable { @@ -213,8 +212,8 @@ protected PasswordAuthentication getPasswordAuthentication() { /** * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using - * retry options and combination of primary / secondary endpoints. Refer to migration guide for mode details: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * retry options and combination of primary / secondary endpoints. Refer to + * <a href="https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous">migration guide</a> for mode details: */ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilder builder, final AzureStorageSettings settings) { final StorageConnectionString storageConnectionString = StorageConnectionString.create(settings.getConnectString(), logger); @@ -336,8 +335,8 @@ private void closeInternally(ClientState state) { } /** - * Implements HTTP pipeline policy to collect statistics on API calls. See please: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * Implements HTTP pipeline policy to collect statistics on API calls. See : + * <a href="https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous">migration guide</a> */ private static class HttpStatsPolicy implements HttpPipelinePolicy { private final BiConsumer<HttpRequest, HttpResponse> statsCollector; @@ -390,6 +389,7 @@ private static class NioThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; + @SuppressWarnings("removal") NioThreadFactory() { SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java index 1bc0e955715f1..e73ded679cf2b 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageSettings.java @@ -35,7 +35,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.AffixSetting; import org.opensearch.common.settings.Setting.Property; @@ -43,6 +42,7 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; import java.net.InetAddress; import java.net.UnknownHostException; diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java index 0fbe9797f726f..a206c3b883870 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/SocketAccess.java @@ -49,6 +49,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index 8b68ccebf8c53..71ffd0fd959f1 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -31,21 +31,17 @@ package org.opensearch.repositories.azure; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpServer; + import com.azure.storage.blob.BlobClient; import com.azure.storage.blob.models.ParallelTransferOptions; import com.azure.storage.common.policy.RequestRetryOptions; import com.azure.storage.common.policy.RetryPolicyType; -import com.sun.net.httpserver.HttpExchange; -import com.sun.net.httpserver.HttpServer; -import fixture.azure.AzureHttpHandler; -import org.opensearch.core.common.Strings; -import reactor.core.scheduler.Schedulers; - import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.Streams; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -53,9 +49,11 @@ import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.RestUtils; import org.opensearch.test.OpenSearchTestCase; @@ -88,6 +86,9 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import fixture.azure.AzureHttpHandler; +import reactor.core.scheduler.Schedulers; + import static java.nio.charset.StandardCharsets.UTF_8; import static org.opensearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING; import static org.opensearch.repositories.azure.AzureStorageSettings.ACCOUNT_SETTING; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index 24a226290985a..3356e5174592a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -32,19 +32,23 @@ package org.opensearch.repositories.azure; -import reactor.core.scheduler.Schedulers; - -import org.junit.AfterClass; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; +import org.junit.AfterClass; + +import java.util.List; + +import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -179,4 +183,21 @@ public void testChunkSize() { ); } + public void testSystemRepositoryDefault() { + assertThat(azureRepository(Settings.EMPTY).isSystemRepository(), is(false)); + } + + public void testSystemRepositoryOn() { + assertThat(azureRepository(Settings.builder().put("system_repository", true).build()).isSystemRepository(), is(true)); + } + + public void testRestrictedSettingsDefault() { + List<Setting<?>> restrictedSettings = azureRepository(Settings.EMPTY).getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.BASE_PATH_SETTING)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.LOCATION_MODE_SETTING)); + } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 1ddf8bb1e94e1..bb0eafc7d1d4a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -32,13 +32,9 @@ package org.opensearch.repositories.azure; -import reactor.core.scheduler.Schedulers; - import com.azure.core.http.policy.HttpPipelinePolicy; import com.azure.storage.blob.BlobServiceClient; import com.azure.storage.common.policy.RequestRetryPolicy; - -import org.junit.AfterClass; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; @@ -46,6 +42,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.test.OpenSearchTestCase; +import org.junit.AfterClass; import java.io.IOException; import java.io.UncheckedIOException; @@ -60,6 +57,8 @@ import java.util.Collections; import java.util.Map; +import reactor.core.scheduler.Schedulers; + import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; @@ -452,7 +451,8 @@ private static RequestRetryPolicy requestRetryOptions(BlobServiceClient client) } /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile + * Extract the blob name from a URI like : + * {@code https://myservice.azure.net/container/path/to/myfile } * It should remove the container part (first part of the path) and gives path/to/myfile * @param uri URI to parse * @return The blob name relative to the container diff --git a/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java b/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java index 7ce28855417c3..31ecd60602aa7 100644 --- a/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java +++ b/plugins/repository-azure/src/yamlRestTest/java/org/opensearch/repositories/azure/RepositoryAzureClientYamlTestSuiteIT.java @@ -34,6 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index fd2f713dd33cd..0ddcf0f6dddca 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -53,32 +53,32 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.27.0' - api 'com.google.api:gax-httpjson:0.103.1' + api 'com.google.api:gax:2.35.0' + api 'com.google.api:gax-httpjson:2.42.0' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.10.0' + api 'com.google.api.grpc:proto-google-common-protos:2.33.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.cloud:google-cloud-core:2.5.10' - api 'com.google.cloud:google-cloud-core-http:2.21.0' + api 'com.google.cloud:google-cloud-core:2.30.0' + api 'com.google.cloud:google-cloud-core-http:2.23.0' api 'com.google.cloud:google-cloud-storage:1.113.1' - api 'com.google.code.gson:gson:2.9.0' + api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.43.2' - api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client:1.43.3' + api 'com.google.http-client:google-http-client-appengine:1.43.3' api 'com.google.http-client:google-http-client-gson:1.43.3' - api 'com.google.http-client:google-http-client-jackson2:1.43.2' + api 'com.google.http-client:google-http-client-jackson2:1.44.1' api 'com.google.oauth-client:google-oauth-client:1.34.1' @@ -86,7 +86,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api 'org.threeten:threetenbp:1.4.4' - api 'io.grpc:grpc-context:1.56.1' + api 'io.grpc:grpc-api:1.57.2' api 'io.opencensus:opencensus-api:0.31.1' api 'io.opencensus:opencensus-contrib-http-util:0.31.1' @@ -148,6 +148,7 @@ thirdPartyAudit { 'com.google.appengine.api.urlfetch.HTTPResponse', 'com.google.appengine.api.urlfetch.URLFetchService', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'com.google.auth.oauth2.GdchCredentials', 'com.google.protobuf.util.JsonFormat', 'com.google.protobuf.util.JsonFormat$Parser', 'com.google.protobuf.util.JsonFormat$Printer', @@ -206,6 +207,9 @@ thirdPartyAudit { 'javax.jms.Message', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', + // Bump for gax 2.42.0 + 'com.google.api.gax.rpc.EndpointContext', + 'com.google.api.gax.rpc.RequestMutator' ) } diff --git a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt index 57bc88a15a0ee..d645695673349 100644 --- a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt +++ b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt index 72eb32a902458..d3d6e140ce4f3 100644 --- a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons CLI -Copyright 2001-2009 The Apache Software Foundation +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation -This product includes software developed by +This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 deleted file mode 100644 index 1813a3aa94404..0000000000000 --- a/plugins/repository-gcs/licenses/gax-2.27.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04a27757c9240da71f896be39f47aaa6e23ef989 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 new file mode 100644 index 0000000000000..778922c637dc1 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-2.35.0.jar.sha1 @@ -0,0 +1 @@ +98d52034cfa6d1b881e16f418894afcfacd89b7a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 deleted file mode 100644 index 11315004e233d..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -041d99172fda933bc879bdfd8de9420c5c34107e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 new file mode 100644 index 0000000000000..672506572ed4d --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-2.42.0.jar.sha1 @@ -0,0 +1 @@ +4db06bc31c2fb34b0490362e8666c20fdc1fb3f2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 new file mode 100644 index 0000000000000..10f8f90df108f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-2.30.0.jar.sha1 @@ -0,0 +1 @@ +b48ea27cbdccd5f225d8a35ea28e2cd01c25918b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 deleted file mode 100644 index 34c3dc6805500..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-2.5.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d979bfe28551eb78cddae9282833ede147a9331 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.0.jar.sha1 deleted file mode 100644 index 2ef0a9bf9b33e..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -07da4710ccdbcfee253672c0b9e00e7370626c26 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 new file mode 100644 index 0000000000000..9db3cbcbec35b --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 @@ -0,0 +1 @@ +9913d0806fcfbfbc4a775f29865126ed8465464b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 deleted file mode 100644 index a576a74c62542..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2520469ebd8c0675f0d2aeafd2da665228320fcf \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..800467de8bdf3 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.43.3.jar.sha1 @@ -0,0 +1 @@ +a758b82e55a2f5f681e289c5ed384d3dbda6f3cd \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 deleted file mode 100644 index d8a9dba20070b..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fb548c5264227813fd83991b94a705b0841c15f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 new file mode 100644 index 0000000000000..4adcca6a55902 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.43.3.jar.sha1 @@ -0,0 +1 @@ +09d6cbdde6ea3469a67601a811b4e83de3e68a79 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 deleted file mode 100644 index 7b606a07651ed..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a9967ebd8246fc4cca64df5f03608db5ac6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 new file mode 100644 index 0000000000000..4472ffbbebe1c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.44.1.jar.sha1 @@ -0,0 +1 @@ +3f1947de0fd9eb250af16abe6103c11e68d11635 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 new file mode 100644 index 0000000000000..8b320fdd2f9cc --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 @@ -0,0 +1 @@ +c71a006b81ddae7bc4b7cb1d2da78c1b173761f4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-LICENSE.txt b/plugins/repository-gcs/licenses/grpc-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-azure/licenses/reactive-streams-NOTICE.txt b/plugins/repository-gcs/licenses/grpc-api-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/reactive-streams-NOTICE.txt rename to plugins/repository-gcs/licenses/grpc-api-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/grpc-context-1.56.1.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.56.1.jar.sha1 deleted file mode 100644 index dbc1ea470dadd..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-context-1.56.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3681b1caf41af1da0c4a3ffec47ab4a3d907c190 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 new file mode 100644 index 0000000000000..9810309d1013a --- /dev/null +++ b/plugins/repository-gcs/licenses/gson-2.10.1.jar.sha1 @@ -0,0 +1 @@ +b3add478d4382b78ea20b1671390a858002feb6c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gson-2.9.0.jar.sha1 b/plugins/repository-gcs/licenses/gson-2.9.0.jar.sha1 deleted file mode 100644 index 8e9626b0c949b..0000000000000 --- a/plugins/repository-gcs/licenses/gson-2.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a1167e089096758b49f9b34066ef98b2f4b37aa \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 deleted file mode 100644 index bf97707836c70..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf5ac081c05682b0eba6659dee55352fde5852e1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 new file mode 100644 index 0000000000000..746e4e99fd881 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.33.0.jar.sha1 @@ -0,0 +1 @@ +644e11df1cec6d38a63a9a06a701e48c398b87d0 \ No newline at end of file diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f0f0fb7681c1a..d223f7989c688 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -32,14 +32,13 @@ package org.opensearch.repositories.gcs; -import com.google.api.gax.retrying.RetrySettings; -import com.google.cloud.http.HttpTransportOptions; -import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import fixture.gcs.FakeOAuth2HttpHandler; -import fixture.gcs.GoogleCloudStorageHttpHandler; + +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.StorageOptions; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.action.ActionRunnable; @@ -50,13 +49,13 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.Streams; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; @@ -66,8 +65,6 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.threeten.bp.Duration; - import java.io.IOException; import java.io.InputStream; import java.util.Arrays; @@ -78,6 +75,10 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import fixture.gcs.FakeOAuth2HttpHandler; +import fixture.gcs.GoogleCloudStorageHttpHandler; +import org.threeten.bp.Duration; + import static org.opensearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; import static org.opensearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; import static org.opensearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING; @@ -297,7 +298,7 @@ private static class GoogleCloudStorageBlobStoreHttpHandler extends GoogleCloudS /** * HTTP handler that injects random Google Cloud Storage service errors - * + * <p> * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java new file mode 100644 index 0000000000000..5002ab9a2e704 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.gcs; + +import com.google.auth.oauth2.GoogleCredentials; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.IOException; + +/** + * This class facilitates to fetch Application Default Credentials + * see <a href="https://cloud.google.com/docs/authentication/application-default-credentials">How Application Default Credentials works</a> + */ +public class GoogleApplicationDefaultCredentials { + private static final Logger logger = LogManager.getLogger(GoogleApplicationDefaultCredentials.class); + + public GoogleCredentials get() { + GoogleCredentials credentials = null; + try { + credentials = SocketAccess.doPrivilegedIOException(GoogleCredentials::getApplicationDefault); + } catch (IOException e) { + logger.error("Failed to retrieve \"Application Default Credentials\"", e); + } + return credentials; + } +} diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 226aec437fc0f..f5c20003ea7b6 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -55,8 +55,8 @@ import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.io.Streams; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import java.io.ByteArrayInputStream; import java.io.IOException; diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java index 4325708596870..7463bd4ff26fe 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -33,14 +33,13 @@ import com.google.api.services.storage.StorageScopes; import com.google.auth.oauth2.ServiceAccountCredentials; - import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; import java.io.IOException; import java.io.InputStream; diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index a743ac72bdb8b..f6d078868b875 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -38,14 +38,16 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Setting; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -92,14 +94,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - getSetting(COMPRESS_SETTING, metadata), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.storageService = storageService; String basePath = BASE_PATH.get(metadata.settings()); @@ -138,6 +133,15 @@ protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List<Setting<?>> getRestrictedSystemRepositorySettings() { + List<Setting<?>> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET); + restrictedSettings.add(BASE_PATH); + return restrictedSettings; + } + /** * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index e15b37f209c5f..620f8e98d5f20 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -106,6 +106,7 @@ class GoogleCloudStorageRetryingInputStream extends InputStream { currentStream = openStream(); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "need access to storage client") private static com.google.api.services.storage.Storage getStorage(Storage client) { return AccessController.doPrivileged((PrivilegedAction<com.google.api.services.storage.Storage>) () -> { diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 7211eac59ad4e..83a4146c99b99 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -36,12 +36,12 @@ import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.ServiceOptions; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -71,6 +71,16 @@ public class GoogleCloudStorageService { */ private volatile Map<String, Storage> clientCache = emptyMap(); + final private GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials; + + public GoogleCloudStorageService() { + this.googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + } + + public GoogleCloudStorageService(GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials) { + this.googleApplicationDefaultCredentials = googleApplicationDefaultCredentials; + } + /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed @@ -214,10 +224,11 @@ StorageOptions createStorageOptions( storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } if (clientSettings.getCredential() == null) { - logger.warn( - "\"Application Default Credentials\" are not supported out of the box." - + " Additional file system permissions have to be granted to the plugin." - ); + logger.info("\"Application Default Credentials\" will be in use"); + final GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + if (credentials != null) { + storageOptionsBuilder.setCredentials(credentials); + } } else { ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); // override token server URI @@ -229,7 +240,7 @@ StorageOptions createStorageOptions( } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build(); + return SocketAccess.doPrivilegedException(() -> storageOptionsBuilder.build()); } /** diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 197e772df30d5..f8c451749480b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.gcs; +import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.SpecialPermission; import org.opensearch.common.CheckedRunnable; @@ -47,6 +48,7 @@ * needs {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access * in {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") final class SocketAccess { private SocketAccess() {} @@ -71,4 +73,16 @@ public static void doPrivilegedVoidIOException(CheckedRunnable<IOException> acti throw (IOException) e.getCause(); } } + + public static <T> T doPrivilegedException(PrivilegedExceptionAction<T> operation) { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + Throwables.rethrow(e.getCause()); + assert false : "always throws"; + return null; + } + } + } diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 488376d36cdc4..23c006c9d2ce6 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -31,21 +31,18 @@ package org.opensearch.repositories.gcs; +import com.sun.net.httpserver.HttpHandler; + import com.google.api.gax.retrying.RetrySettings; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; -import com.sun.net.httpserver.HttpHandler; -import fixture.gcs.FakeOAuth2HttpHandler; - import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.Streams; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -53,15 +50,16 @@ import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.RestUtils; -import org.threeten.bp.Duration; import java.io.IOException; import java.io.InputStream; @@ -77,10 +75,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeEnd; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeLimit; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeStart; -import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody; +import fixture.gcs.FakeOAuth2HttpHandler; +import org.threeten.bp.Duration; + import static java.nio.charset.StandardCharsets.UTF_8; import static org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase.randomBytes; import static org.opensearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; @@ -95,6 +92,10 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeEnd; +import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeLimit; +import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeStart; +import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody; @SuppressForbidden(reason = "use a http server") public class GoogleCloudStorageBlobContainerRetriesTests extends AbstractBlobContainerRetriesTestCase { diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 183793049fb8e..1c4e0e4a5a400 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -50,11 +50,11 @@ import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index abf63e5525d4d..dc12456c641ae 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -33,7 +33,6 @@ import com.google.api.services.storage.StorageScopes; import com.google.auth.oauth2.ServiceAccountCredentials; - import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index fc6cce9b8ae17..58e412684ed5a 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -33,39 +33,49 @@ package org.opensearch.repositories.gcs; import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; -import org.hamcrest.Matchers; -import org.opensearch.core.common.bytes.BytesReference; +import com.google.cloud.storage.StorageOptions; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import java.io.IOException; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.util.Base64; import java.util.Locale; import java.util.UUID; +import org.mockito.Mockito; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; public class GoogleCloudStorageServiceTests extends OpenSearchTestCase { + final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String endpoint = randomFrom("http://", "https://") + + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + public void testClientInitializer() throws Exception { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final String endpoint = randomFrom("http://", "https://") - + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") - + ":" - + randomIntBetween(1, 65535); - final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put( GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), @@ -82,31 +92,35 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - final GoogleCloudStorageService service = new GoogleCloudStorageService(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + final GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> service.client("another_client", "repo", statsCollector) ); - assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + MatcherAssert.assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( new Setting<?>[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) } ); final Storage storage = service.client(clientName, "repo", statsCollector); - assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); - assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); - assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); - assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); - assertThat( + MatcherAssert.assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + MatcherAssert.assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); + MatcherAssert.assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); + MatcherAssert.assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), Matchers.is((int) connectTimeValue.millis()) ); - assertThat( + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), Matchers.is((int) readTimeValue.millis()) ); - assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); + MatcherAssert.assertThat(storage.getOptions().getCredentials(), Matchers.instanceOf(Credentials.class)); } public void testReinitClientSettings() throws Exception { @@ -122,33 +136,33 @@ public void testReinitClientSettings() throws Exception { final GoogleCloudStorageService storageService = plugin.storageService; GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final Storage client11 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); final Storage client12 = storageService.client("gcs2", "repo2", statsCollector); - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // client 3 is missing final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs3", "repo3", statsCollector) ); - assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); + MatcherAssert.assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); // update client settings plugin.reload(settings2); // old client 1 not changed - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); // new client 1 is changed final Storage client21 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); + MatcherAssert.assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); // old client 2 not changed - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // new client2 is gone final IllegalArgumentException e2 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs2", "repo2", statsCollector) ); - assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); + MatcherAssert.assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); // client 3 emerged final Storage client23 = storageService.client("gcs3", "repo3", statsCollector); - assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); + MatcherAssert.assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); } } @@ -193,4 +207,72 @@ public void testToTimeout() { assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); assertEquals(0, GoogleCloudStorageService.toTimeout(TimeValue.MINUS_ONE).intValue()); } + + /** + * The following method test the Google Application Default Credential instead of + * using service account file. + * Considered use of JUnit Mocking due to static method GoogleCredentials.getApplicationDefault + * and avoiding environment variables to set which later use GCE. + * @throws Exception + */ + public void testApplicationDefaultCredential() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + assertNotNull(storageOptions); + assertEquals(storageOptions.getCredentials().toString(), mockGoogleCredentials.toString()); + } + + /** + * The application default credential throws exception when there are + * no Environment Variables provided or Google Compute Engine is not running + * @throws Exception + */ + public void testApplicationDefaultCredentialsWhenNoSettingProvided() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleCloudStorageService service = new GoogleCloudStorageService(); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + assertNotNull(storageOptions); + assertNull(storageOptions.getCredentials()); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * The application default credential throws IOException when it is + * used without GoogleCloudStorageService + */ + public void testDefaultCredentialsThrowsExceptionWithoutGCStorageService() { + GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + assertNull(credentials); + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * This is a helper method to provide GCS Client settings without credentials + * @return GoogleCloudStorageClientSettings + * @throws URISyntaxException + */ + private GoogleCloudStorageClientSettings getGCSClientSettingsWithoutCredentials() throws URISyntaxException { + return new GoogleCloudStorageClientSettings( + null, + endpoint, + projectIdName, + connectTimeValue, + readTimeValue, + applicationName, + new URI(""), + new ProxySettings(Proxy.Type.DIRECT, null, 0, null, null) + ); + } + } diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/TestUtils.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/TestUtils.java index bc5a542972e83..648955c079b3e 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/TestUtils.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/TestUtils.java @@ -31,8 +31,8 @@ package org.opensearch.repositories.gcs; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import java.io.ByteArrayOutputStream; import java.security.KeyPairGenerator; @@ -54,7 +54,7 @@ static byte[] createServiceAccount(final Random random) { final String privateKey = Base64.getEncoder().encodeToString(keyPairGenerator.generateKeyPair().getPrivate().getEncoded()); final ByteArrayOutputStream out = new ByteArrayOutputStream(); - try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), out)) { + try (XContentBuilder builder = new XContentBuilder(MediaTypeRegistry.JSON.xContent(), out)) { builder.startObject(); { builder.field("type", "service_account"); diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6626bfccc6662..36843e3bc8700 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,17 +66,17 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.2' + api 'org.apache.avro:avro:1.11.3' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.5.0' + api 'commons-cli:commons-cli:1.6.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' - api 'org.apache.commons:commons-compress:1.23.0' + api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.9.0' - api 'commons-io:commons-io:2.13.0' - api 'org.apache.commons:commons-lang3:3.12.0' + api 'commons-io:commons-io:2.15.1' + api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.5.0' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 deleted file mode 100644 index ce1a894e0ce6d..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97e62e8be2b37e849f1bdb5a4f08121d47cc9806 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 new file mode 100644 index 0000000000000..fb43ecbcf22c9 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 @@ -0,0 +1 @@ +02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 deleted file mode 100644 index 8f9e064eda2d0..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc98be5d5390230684a092589d70ea76a147925c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 new file mode 100644 index 0000000000000..bb94eda6814ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 @@ -0,0 +1 @@ +38166a23afb5bd5520f739b87b3be87f7f0fb96d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.23.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.23.0.jar.sha1 deleted file mode 100644 index 48dba88409c17..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4af2060ea9b0c8b74f1854c6cafe4d43cfc161fc \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt index 8dfa22157abc3..13a3140897472 100644 --- a/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt +++ b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt @@ -1,9 +1,5 @@ Apache Commons Lang -Copyright 2001-2015 The Apache Software Foundation +Copyright 2001-2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - -This product includes software from the Spring Framework, -under the Apache License 2.0 (see: StringUtils.containsWhitespace()) - diff --git a/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt index 556bd03951d4b..d3d6e140ce4f3 100644 --- a/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt @@ -3,4 +3,3 @@ Copyright 2003-2014 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 deleted file mode 100644 index 800a4aa87ba0e..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ab4f082fd162f60afcaf2b8744a3d959feab3e8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..0e22f98daa61c --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 @@ -0,0 +1 @@ +911fdb5b1a1df36719c579ecc6f2957b88bce1ab \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..0e3595fecb0d2 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 deleted file mode 100644 index 6766770f61e78..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a7df0424eed81818157f22613f36b72487ceb34 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-hdfs/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java index dcbd52d311230..669190f4e2490 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsBlobContainer.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.hdfs; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -46,6 +47,7 @@ import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.support.AbstractBlobContainer; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.io.Streams; import org.opensearch.repositories.hdfs.HdfsBlobStore.Operation; import java.io.FileNotFoundException; @@ -125,8 +127,23 @@ public InputStream readBlob(String blobName) throws IOException { } @Override - public InputStream readBlob(String blobName, long position, long length) { - throw new UnsupportedOperationException(); + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return store.execute(fileContext -> { + final FSDataInputStream stream; + try { + stream = fileContext.open(new Path(path, blobName), bufferSize); + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); + } + // Seek to the desired start position, closing the stream if any error occurs + try { + stream.seek(position); + } catch (Exception e) { + stream.close(); + throw e; + } + return Streams.limitStream(new HDFSPrivilegedInputSteam(stream, securityContext), length); + }); } @Override diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java index f45b9fddcdf70..af49cd3c579e6 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsPlugin.java @@ -31,14 +31,6 @@ package org.opensearch.repositories.hdfs; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Collections; -import java.util.Map; - import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityUtil; @@ -52,6 +44,15 @@ import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.Repository; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Collections; +import java.util.Map; + +@SuppressWarnings("removal") public final class HdfsPlugin extends Plugin implements RepositoryPlugin { // initialize some problematic classes with elevated privileges diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index 88c58942e9bbf..4b38e62b2525a 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -48,8 +48,8 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; @@ -83,7 +83,7 @@ public HdfsRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super(metadata, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); @@ -254,6 +254,7 @@ private static String getHostName() { } } + @SuppressWarnings("removal") @Override protected HdfsBlobStore createBlobStore() { // initialize our blobstore using elevated privileges. diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 03abb94e1263c..5a27eb937ff9c 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -31,6 +31,14 @@ package org.opensearch.repositories.hdfs; +import org.apache.hadoop.security.UserGroupInformation; +import org.opensearch.SpecialPermission; +import org.opensearch.env.Environment; + +import javax.security.auth.AuthPermission; +import javax.security.auth.PrivateCredentialPermission; +import javax.security.auth.kerberos.ServicePermission; + import java.io.IOException; import java.io.UncheckedIOException; import java.lang.reflect.ReflectPermission; @@ -42,20 +50,14 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; -import javax.security.auth.AuthPermission; -import javax.security.auth.PrivateCredentialPermission; -import javax.security.auth.kerberos.ServicePermission; - -import org.apache.hadoop.security.UserGroupInformation; -import org.opensearch.SpecialPermission; -import org.opensearch.env.Environment; /** * Oversees all the security specific logic for the HDFS Repository plugin. - * + * <p> * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ +@SuppressWarnings("removal") class HdfsSecurityContext { private static final Permission[] SIMPLE_AUTH_PERMISSIONS; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index d0b63f17e3887..89ba8d51cf7f7 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -62,6 +62,7 @@ /** * Integration test that runs against an HA-Enabled HDFS instance */ +@SuppressWarnings("removal") public class HaHdfsFailoverTestSuiteIT extends OpenSearchRestTestCase { public void testHAFailoverWithRepository() throws Exception { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 0ad61906a2104..5f7454df4ecfc 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -33,6 +33,7 @@ package org.opensearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; @@ -63,6 +64,7 @@ @ThreadLeakFilters(filters = { HdfsClientThreadLeakFilter.class }) public class HdfsBlobStoreContainerTests extends OpenSearchTestCase { + @SuppressWarnings("removal") private FileContext createTestContext() { FileContext fileContext; try { diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 9196a8f2b0558..0df39636b8ffa 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java index 2758bd020e979..856cdf1eb565e 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java @@ -42,7 +42,7 @@ * thread leaks out of the client and is picked up by the test framework. This thread filter is meant * to ignore the offending thread until a version of Hadoop is released that addresses the incorrect * interrupt handling. - * + * <p> * In Hadoop 3.3.6, the org.apache.hadoop.fs.statistics.impl.EvaluatingStatisticsMap uses ForkJoinPool * to perform statistics calculation, leaving dangling workers. * diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index 4e12de7cce212..ab10691240649 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -32,8 +32,8 @@ package org.opensearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; +import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index d46d0b2092d2a..ce456f26af3a4 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -32,9 +32,9 @@ package org.opensearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; - import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java index 8fca4f9afd771..faa8d1d891c1f 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/TestingFs.java @@ -43,11 +43,11 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.spi.FileSystemProvider; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; /** * Extends LFS to improve some operations to keep the security permissions at diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 44fd45b265e82..560d12d14395d 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -70,7 +70,6 @@ dependencies { api "software.amazon.awssdk:sts:${versions.aws}" api "software.amazon.awssdk:netty-nio-client:${versions.aws}" - api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 deleted file mode 100644 index 5a626eeb5725b..0000000000000 --- a/plugins/repository-s3/licenses/annotations-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8d30ad649916b --- /dev/null +++ b/plugins/repository-s3/licenses/annotations-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7e785e9ecb1230e52e9daa713335f38809ddcb74 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 deleted file mode 100644 index 3ee96bb6e4076..0000000000000 --- a/plugins/repository-s3/licenses/apache-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e7ae36581925c --- /dev/null +++ b/plugins/repository-s3/licenses/apache-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +af31c4d3abec23b73061c6965364a6e3abbcc01a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 deleted file mode 100644 index 010464bdf9fd1..0000000000000 --- a/plugins/repository-s3/licenses/auth-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..e4c1b29cea894 --- /dev/null +++ b/plugins/repository-s3/licenses/auth-2.20.86.jar.sha1 @@ -0,0 +1 @@ +f2da82f33776ce4814a3ab53b5ccb82a5d135936 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 deleted file mode 100644 index 4b4ee1db864a8..0000000000000 --- a/plugins/repository-s3/licenses/aws-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..d42a15c4da413 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +ea126147c3d17a8b3075e3122ec9c2b94fe1f6d5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 deleted file mode 100644 index 45a88305c1928..0000000000000 --- a/plugins/repository-s3/licenses/aws-json-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..ee08d240fbfba --- /dev/null +++ b/plugins/repository-s3/licenses/aws-json-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +8b9d09c1aa9d3f2119267f0b6549ae1810512c7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 deleted file mode 100644 index ba5f43378730c..0000000000000 --- a/plugins/repository-s3/licenses/aws-query-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9b19f570d56fb --- /dev/null +++ b/plugins/repository-s3/licenses/aws-query-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +e001792ec1a681f5bc6ee4157d572173416304ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 deleted file mode 100644 index fc65ee07c40c6..0000000000000 --- a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6a3b5f607ece38536f17d869b82c669c6339f9ad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..50940d73f4f7b --- /dev/null +++ b/plugins/repository-s3/licenses/aws-xml-protocol-2.20.86.jar.sha1 @@ -0,0 +1 @@ +b78a1182a9cf3cccf416cc5a441d08174b08682d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-logging-LICENSE.txt b/plugins/repository-s3/licenses/commons-logging-LICENSE.txt index 57bc88a15a0ee..d645695673349 100644 --- a/plugins/repository-s3/licenses/commons-logging-LICENSE.txt +++ b/plugins/repository-s3/licenses/commons-logging-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/plugins/repository-s3/licenses/commons-logging-NOTICE.txt b/plugins/repository-s3/licenses/commons-logging-NOTICE.txt index 72eb32a902458..d3d6e140ce4f3 100644 --- a/plugins/repository-s3/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-s3/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons CLI -Copyright 2001-2009 The Apache Software Foundation +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation -This product includes software developed by +This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 deleted file mode 100644 index 5bc0e31166c77..0000000000000 --- a/plugins/repository-s3/licenses/endpoints-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..16f9db1fd6327 --- /dev/null +++ b/plugins/repository-s3/licenses/endpoints-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +2b9075dd0ed32da97f95229f55c01425353e8cba \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 deleted file mode 100644 index 523cf43dcb2e9..0000000000000 --- a/plugins/repository-s3/licenses/http-client-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..0662e15b1f3e6 --- /dev/null +++ b/plugins/repository-s3/licenses/http-client-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +776bfc86fabc6e8c792ea4650a281d0bec5e9708 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 deleted file mode 100644 index f63416ddb8ceb..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..d7dfc5ff83706 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.16.2.jar.sha1 @@ -0,0 +1 @@ +dfcd11c847ea7276aa073c25f5fe8ee361748d7f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 deleted file mode 100644 index f16d80af8dce6..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.15.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 new file mode 100644 index 0000000000000..86998b4558461 --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.16.2.jar.sha1 @@ -0,0 +1 @@ +7fda67535b54d74eebf6157682b835c847410932 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 deleted file mode 100644 index a19b00e62f8b5..0000000000000 --- a/plugins/repository-s3/licenses/json-utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7011f8c3e6c78 --- /dev/null +++ b/plugins/repository-s3/licenses/json-utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5dd418ad48e3bfd8c3fa05ff29a955b91c1af666 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 deleted file mode 100644 index db6701d87892a..0000000000000 --- a/plugins/repository-s3/licenses/metrics-spi-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..bbd88bb9e1b0c --- /dev/null +++ b/plugins/repository-s3/licenses/metrics-spi-2.20.86.jar.sha1 @@ -0,0 +1 @@ +74a65d0f8decd0b3057fb500ca5409ff5778752a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 deleted file mode 100644 index 489f18e0bceaa..0000000000000 --- a/plugins/repository-s3/licenses/netty-nio-client-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c269571ad2fb19851ebd7c7856aa2975fe0bab3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..4ae8b2ec5a23c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-nio-client-2.20.86.jar.sha1 @@ -0,0 +1 @@ +29195a65eeea36cf1960d1939bca6586d5842dad \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..6b9a35acb2c20 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 deleted file mode 100644 index 72a392ea2917d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -240e36cd5c2ffaf655913f8857f2d58b26394679 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 deleted file mode 100644 index b7104cf0939e6..0000000000000 --- a/plugins/repository-s3/licenses/profiles-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..425ce9b92f9f2 --- /dev/null +++ b/plugins/repository-s3/licenses/profiles-2.20.86.jar.sha1 @@ -0,0 +1 @@ +27a8f4aa488d1d3ef947865ee0190f16d10a3cc7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 deleted file mode 100644 index 4dee45f4d9dd3..0000000000000 --- a/plugins/repository-s3/licenses/protocol-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..8de58699d8d82 --- /dev/null +++ b/plugins/repository-s3/licenses/protocol-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +bd85984ac6327a50d20e7957ecebf4fa3ad7766b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 deleted file mode 100644 index 45a80e3f7e361..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-1.0.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt b/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt deleted file mode 100644 index 1e3c7e7c77495..0000000000000 --- a/plugins/repository-s3/licenses/reactive-streams-LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT No Attribution - -Copyright 2014 Reactive Streams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 deleted file mode 100644 index 993fc2f97de62..0000000000000 --- a/plugins/repository-s3/licenses/regions-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..266bc76ad6f77 --- /dev/null +++ b/plugins/repository-s3/licenses/regions-2.20.86.jar.sha1 @@ -0,0 +1 @@ +04fd460ce1c633986ecef1b4218d3e7067a7087d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 deleted file mode 100644 index b7f3157995aa6..0000000000000 --- a/plugins/repository-s3/licenses/s3-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69e7df4c7c170867dc246c0205c5e0b6099e8a6f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..7125793759db5 --- /dev/null +++ b/plugins/repository-s3/licenses/s3-2.20.86.jar.sha1 @@ -0,0 +1 @@ +6a37f591abd11a3f848f091f1724825741daaeb2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 deleted file mode 100644 index 5f12be9c08c5b..0000000000000 --- a/plugins/repository-s3/licenses/sdk-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..9eca40e6b9a9a --- /dev/null +++ b/plugins/repository-s3/licenses/sdk-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +1b9df9ca5e4918fab05db3b703b2873e83104c30 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 deleted file mode 100644 index ec53fa0db623e..0000000000000 --- a/plugins/repository-s3/licenses/signer-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a44e55775ae429931287f81a634eeb67bd607a9f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..cb73b19e14fcf --- /dev/null +++ b/plugins/repository-s3/licenses/signer-2.20.86.jar.sha1 @@ -0,0 +1 @@ +52b92753b944d3e1b8c6814bc9d6c93119ca6421 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt index 8fda22f4d72f6..2be7689435062 100644 --- a/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt +++ b/plugins/repository-s3/licenses/slf4j-api-LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2004-2014 QOS.ch +Copyright (c) 2004-2022 QOS.ch All rights reserved. Permission is hereby granted, free of charge, to any person obtaining @@ -18,4 +18,4 @@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 deleted file mode 100644 index 9f4bbdd0f22ad..0000000000000 --- a/plugins/repository-s3/licenses/sts-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adc350996b6f8481a32c8e73598138fc32826584 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..1f40b6dcd8417 --- /dev/null +++ b/plugins/repository-s3/licenses/sts-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7415d850a4aea10935f84766065dd76a3d327a54 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 deleted file mode 100644 index e7eebbb98f1fe..0000000000000 --- a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..c9c3d4dc53505 --- /dev/null +++ b/plugins/repository-s3/licenses/third-party-jackson-core-2.20.86.jar.sha1 @@ -0,0 +1 @@ +5ad6e7f7d52d8a5390b2daf2fd8ffcab97fe3102 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 deleted file mode 100644 index fc4cde604e33c..0000000000000 --- a/plugins/repository-s3/licenses/utils-2.20.55.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 new file mode 100644 index 0000000000000..b91a3b3047570 --- /dev/null +++ b/plugins/repository-s3/licenses/utils-2.20.86.jar.sha1 @@ -0,0 +1 @@ +7a61f8b3c54ecf3dc785830d4f482f19ca52bc57 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 3070c654a96ee..da2c6e8c1b0ee 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -34,7 +34,11 @@ import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import fixture.s3.S3HttpHandler; + +import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; + +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -45,27 +49,39 @@ import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; +import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; -import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.StreamSupport; + +import fixture.s3.S3HttpHandler; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") // Need to set up a new cluster for each test because cluster settings use randomized authentication settings @@ -149,6 +165,67 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") + @Override + public void testRequestStats() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + final String snapshot = "snapshot"; + assertSuccessfulSnapshot( + client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get()); + + final RepositoryStats repositoryStats = StreamSupport.stream( + internalCluster().getInstances(RepositoriesService.class).spliterator(), + false + ).map(repositoriesService -> { + try { + return repositoriesService.repository(repository); + } catch (RepositoryMissingException e) { + return null; + } + }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + + Map<BlobStore.Metric, Map<String, Long>> extendedStats = repositoryStats.extendedStats; + Map<String, Long> aggregatedStats = new HashMap<>(); + extendedStats.forEach((k, v) -> { + if (k == BlobStore.Metric.RETRY_COUNT || k == BlobStore.Metric.REQUEST_SUCCESS || k == BlobStore.Metric.REQUEST_FAILURE) { + for (Map.Entry<String, Long> entry : v.entrySet()) { + aggregatedStats.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + } + + }); + final Map<String, Long> mockCalls = getMockRequestCounts(); + + String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", aggregatedStats, mockCalls); + + assertEquals(assertionErrorMsg, mockCalls, aggregatedStats); + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -172,7 +249,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override public BlobStore blobStore() { @@ -222,7 +299,7 @@ private void validateAuthHeader(HttpExchange exchange) { /** * HTTP handler that injects random S3 service errors - * + * <p> * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -260,6 +337,8 @@ public void maybeTrack(final String request, Headers requestHeaders) { trackRequest("PutMultipartObject"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutObject"); + } else if (Regex.simpleMatch("POST /*?delete*", request)) { + trackRequest("DeleteObjects"); } } diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index c8b1670bfdd83..f7a84864a8569 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -31,7 +31,8 @@ package org.opensearch.repositories.s3; -import org.junit.Before; +import software.amazon.awssdk.services.s3.model.StorageClass; + import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobMetadata; @@ -42,7 +43,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import software.amazon.awssdk.services.s3.model.StorageClass; +import org.junit.Before; import java.util.Collection; import java.util.Map; diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java index 258b00bde75f0..45170ea1ad209 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java @@ -8,10 +8,11 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.concurrent.RefCountedReleasable; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import java.io.Closeable; import java.io.IOException; @@ -28,6 +29,7 @@ public class AmazonAsyncS3Reference extends RefCountedReleasable<AmazonAsyncS3Wi super("AWS_S3_CLIENT", client, () -> { client.client().close(); client.priorityClient().close(); + client.urgentClient().close(); AwsCredentialsProvider credentials = client.credentials(); if (credentials instanceof Closeable) { try { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java index 15f104f51a067..f8a313b55d945 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java @@ -8,26 +8,30 @@ package org.opensearch.repositories.s3; -import org.opensearch.common.Nullable; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.services.s3.S3AsyncClient; +import org.opensearch.common.Nullable; + /** * The holder of the AmazonS3 and AWSCredentialsProvider */ final class AmazonAsyncS3WithCredentials { private final S3AsyncClient client; private final S3AsyncClient priorityClient; + private final S3AsyncClient urgentClient; private final AwsCredentialsProvider credentials; private AmazonAsyncS3WithCredentials( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { this.client = client; this.credentials = credentials; this.priorityClient = priorityClient; + this.urgentClient = urgentClient; } S3AsyncClient client() { @@ -38,6 +42,10 @@ S3AsyncClient priorityClient() { return priorityClient; } + S3AsyncClient urgentClient() { + return urgentClient; + } + AwsCredentialsProvider credentials() { return credentials; } @@ -45,8 +53,9 @@ AwsCredentialsProvider credentials() { static AmazonAsyncS3WithCredentials create( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { - return new AmazonAsyncS3WithCredentials(client, priorityClient, credentials); + return new AmazonAsyncS3WithCredentials(client, priorityClient, urgentClient, credentials); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/ProxySettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/ProxySettings.java index 8f7d566cac758..94327872bdd72 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/ProxySettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/ProxySettings.java @@ -8,10 +8,11 @@ package org.opensearch.repositories.s3; -import org.opensearch.core.common.Strings; +import software.amazon.awssdk.core.exception.SdkException; + import org.opensearch.common.settings.SettingsException; +import org.opensearch.core.common.Strings; import org.opensearch.repositories.s3.utils.Protocol; -import software.amazon.awssdk.core.exception.SdkException; import java.net.InetAddress; import java.net.InetSocketAddress; diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 653034ee9afde..d691cad9c9d03 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -8,17 +8,6 @@ package org.opensearch.repositories.s3; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.common.Nullable; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.MapBuilder; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.Strings; -import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; -import org.opensearch.repositories.s3.async.AsyncExecutorContainer; -import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.ContainerCredentialsProvider; @@ -45,6 +34,18 @@ import software.amazon.awssdk.services.sts.auth.StsWebIdentityTokenFileCredentialsProvider; import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; +import org.opensearch.repositories.s3.async.AsyncExecutorContainer; +import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; + import java.io.Closeable; import java.io.IOException; import java.net.URI; @@ -102,6 +103,7 @@ public synchronized void refreshAndClearCache(Map<String, S3ClientSettings> clie */ public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -118,7 +120,7 @@ public AmazonAsyncS3Reference client( return existing; } final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, priorityExecutorBuilder, normalExecutorBuilder) + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); clientReference.incRef(); clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); @@ -164,6 +166,7 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -194,6 +197,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.asyncConfiguration( + ClientAsyncConfiguration.builder() + .advancedOption( + SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, + urgentExecutorBuilder.getFutureCompletionExecutor() + ) + .build() + ); + final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); builder.asyncConfiguration( ClientAsyncConfiguration.builder() @@ -216,7 +230,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, credentials); + return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { @@ -360,7 +374,7 @@ private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonAsyncS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 81a902a6992d8..25f361b40636e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -32,29 +32,11 @@ package org.opensearch.repositories.s3; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.common.Nullable; -import org.opensearch.common.SetOnce; -import org.opensearch.common.StreamContext; -import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobMetadata; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.BlobStoreException; -import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; -import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.support.AbstractBlobContainer; -import org.opensearch.common.blobstore.support.PlainBlobMetadata; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CommonPrefix; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; @@ -64,10 +46,15 @@ import software.amazon.awssdk.services.s3.model.Delete; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.ObjectAttributes; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Error; @@ -75,10 +62,36 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; +import org.opensearch.common.StreamContext; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStoreException; +import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.support.AbstractBlobContainer; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.UploadRequest; -import software.amazon.awssdk.services.s3.S3AsyncClient; +import org.opensearch.repositories.s3.utils.HttpRangeUtils; +import java.io.BufferedInputStream; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -97,17 +110,10 @@ import static org.opensearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; import static org.opensearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING_MULTIPART; -class S3BlobContainer extends AbstractBlobContainer implements VerifyingMultiStreamBlobContainer { +class S3BlobContainer extends AbstractBlobContainer implements AsyncMultiStreamBlobContainer { private static final Logger logger = LogManager.getLogger(S3BlobContainer.class); - /** - * Maximum number of deletes in a {@link DeleteObjectsRequest}. - * - * @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>. - */ - private static final int MAX_BULK_DELETES = 1000; - private final S3BlobStore blobStore; private final String keyPath; @@ -183,18 +189,51 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> comp writeContext.getWritePriority(), writeContext.getUploadFinalizer(), writeContext.doRemoteDataIntegrityCheck(), - writeContext.getExpectedChecksum() + writeContext.getExpectedChecksum(), + blobStore.isUploadRetryEnabled() ); try { - long partSize = blobStore.getAsyncTransferManager().calculateOptimalPartSize(writeContext.getFileSize()); + if (uploadRequest.getContentLength() > ByteSizeUnit.GB.toBytes(10) && blobStore.isRedirectLargeUploads()) { + StreamContext streamContext = SocketAccess.doPrivileged( + () -> writeContext.getStreamProvider(uploadRequest.getContentLength()) + ); + InputStreamContainer inputStream = streamContext.provideStream(0); + try { + executeMultipartUpload( + blobStore, + uploadRequest.getKey(), + inputStream.getInputStream(), + uploadRequest.getContentLength() + ); + completionListener.onResponse(null); + } catch (Exception ex) { + logger.error( + () -> new ParameterizedMessage( + "Failed to upload large file {} of size {} ", + uploadRequest.getKey(), + uploadRequest.getContentLength() + ), + ex + ); + completionListener.onFailure(ex); + } + return; + } + long partSize = blobStore.getAsyncTransferManager() + .calculateOptimalPartSize(writeContext.getFileSize(), writeContext.getWritePriority(), blobStore.isUploadRetryEnabled()); StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { - S3AsyncClient s3AsyncClient = writeContext.getWritePriority() == WritePriority.HIGH - ? amazonS3Reference.get().priorityClient() - : amazonS3Reference.get().client(); + S3AsyncClient s3AsyncClient; + if (writeContext.getWritePriority() == WritePriority.URGENT) { + s3AsyncClient = amazonS3Reference.get().urgentClient(); + } else if (writeContext.getWritePriority() == WritePriority.HIGH) { + s3AsyncClient = amazonS3Reference.get().priorityClient(); + } else { + s3AsyncClient = amazonS3Reference.get().client(); + } CompletableFuture<Void> completableFuture = blobStore.getAsyncTransferManager() - .uploadObject(s3AsyncClient, uploadRequest, streamContext); + .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { if (throwable == null) { completionListener.onResponse(response); @@ -210,6 +249,56 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> comp } } + @ExperimentalApi + @Override + public void readBlobAsync(String blobName, ActionListener<ReadContext> listener) { + try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { + final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); + final String bucketName = blobStore.bucket(); + final String blobKey = buildKey(blobName); + + final CompletableFuture<GetObjectAttributesResponse> blobMetadataFuture = getBlobMetadata(s3AsyncClient, bucketName, blobKey); + + blobMetadataFuture.whenComplete((blobMetadata, throwable) -> { + if (throwable != null) { + Exception ex = throwable.getCause() instanceof Exception + ? (Exception) throwable.getCause() + : new Exception(throwable.getCause()); + listener.onFailure(ex); + return; + } + + try { + final List<ReadContext.StreamPartCreator> blobPartInputStreamFutures = new ArrayList<>(); + final long blobSize = blobMetadata.objectSize(); + final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum() == null ? null : blobMetadata.checksum().checksumCRC32(); + + if (numberOfParts == null) { + blobPartInputStreamFutures.add(() -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + } else { + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + final int innerPartNumber = partNumber; + blobPartInputStreamFutures.add( + () -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, innerPartNumber) + ); + } + } + listener.onResponse(new ReadContext(blobSize, blobPartInputStreamFutures, blobChecksum)); + } catch (Exception ex) { + listener.onFailure(ex); + } + }); + } catch (Exception ex) { + listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); + } + } + + public boolean remoteIntegrityCheckSupported() { + return true; + } + // package private for testing long getLargeBlobThresholdInBytes() { return blobStore.bufferSizeInBytes(); @@ -277,12 +366,12 @@ private void doDeleteBlobs(List<String> blobNames, boolean relative) throws IOEx outstanding = new HashSet<>(blobNames); } try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes + // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>(); final List<String> partition = new ArrayList<>(); for (String key : outstanding) { partition.add(key); - if (partition.size() == MAX_BULK_DELETES) { + if (partition.size() == blobStore.getBulkDeletesSize()) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); } @@ -329,7 +418,7 @@ private void doDeleteBlobs(List<String> blobNames, boolean relative) throws IOEx assert outstanding.isEmpty(); } - private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) { + private DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) { return DeleteObjectsRequest.builder() .bucket(bucket) .delete( @@ -338,21 +427,18 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs .quiet(true) .build() ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) .build(); } @Override - public void listBlobsByPrefixInSortedOrder( - String blobNamePrefix, - int limit, - BlobNameSortOrder blobNameSortOrder, - ActionListener<List<BlobMetadata>> listener - ) { + public List<BlobMetadata> listBlobsByPrefixInSortedOrder(String blobNamePrefix, int limit, BlobNameSortOrder blobNameSortOrder) + throws IOException { // As AWS S3 returns list of keys in Lexicographic order, we don't have to fetch all the keys in order to sort them // We fetch only keys as per the given limit to optimize the fetch. If provided sort order is not Lexicographic, // we fall-back to default implementation of fetching all the keys and sorting them. if (blobNameSortOrder != BlobNameSortOrder.LEXICOGRAPHIC) { - super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener); + return super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder); } else { if (limit < 0) { throw new IllegalArgumentException("limit should not be a negative value"); @@ -363,9 +449,9 @@ public void listBlobsByPrefixInSortedOrder( .flatMap(listing -> listing.contents().stream()) .map(s3Object -> new PlainBlobMetadata(s3Object.key().substring(keyPath.length()), s3Object.size())) .collect(Collectors.toList()); - listener.onResponse(blobs.subList(0, Math.min(limit, blobs.size()))); + return blobs.subList(0, Math.min(limit, blobs.size())); } catch (final Exception e) { - listener.onFailure(new IOException("Exception when listing blobs by prefix [" + prefix + "]", e)); + throw new IOException("Exception when listing blobs by prefix [" + prefix + "]", e); } } } @@ -428,7 +514,7 @@ private static List<ListObjectsV2Response> executeListing( for (ListObjectsV2Response listObjectsV2Response : listObjectsIterable) { results.add(listObjectsV2Response); totalObjects += listObjectsV2Response.contents().size(); - if (limit != -1 && totalObjects > limit) { + if (limit != -1 && totalObjects >= limit) { break; } } @@ -480,8 +566,14 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin PutObjectRequest putObjectRequest = putObjectRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (blobSize + 1)); + } else { + requestInputStream = input; + } SocketAccess.doPrivilegedVoid( - () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(input, blobSize)) + () -> clientReference.get().putObject(putObjectRequest, RequestBody.fromInputStream(requestInputStream, blobSize)) ); } catch (final SdkException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); @@ -521,6 +613,13 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, createMultipartUploadRequestBuilder.serverSideEncryption(ServerSideEncryption.AES256); } + final InputStream requestInputStream; + if (blobStore.isUploadRetryEnabled()) { + requestInputStream = new BufferedInputStream(input, (int) (partSize + 1)); + } else { + requestInputStream = input; + } + CreateMultipartUploadRequest createMultipartUploadRequest = createMultipartUploadRequestBuilder.build(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { uploadId.set( @@ -544,10 +643,9 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, .build(); bytesCount += uploadPartRequest.contentLength(); - final UploadPartResponse uploadResponse = SocketAccess.doPrivileged( () -> clientReference.get() - .uploadPart(uploadPartRequest, RequestBody.fromInputStream(input, uploadPartRequest.contentLength())) + .uploadPart(uploadPartRequest, RequestBody.fromInputStream(requestInputStream, uploadPartRequest.contentLength())) ); parts.add(CompletedPart.builder().partNumber(uploadPartRequest.partNumber()).eTag(uploadResponse.eTag()).build()); } @@ -626,4 +724,71 @@ static Tuple<Long, Long> numberOfMultiparts(final long totalSize, final long par return Tuple.tuple(parts + 1, remaining); } } + + /** + * Fetches a part of the blob from the S3 bucket and transforms it to an {@link InputStreamContainer}, which holds + * the stream and its related metadata. + * @param s3AsyncClient Async client to be utilized to fetch the object part + * @param bucketName Name of the S3 bucket + * @param blobKey Identifier of the blob for which the parts will be fetched + * @param partNumber Optional part number for the blob to be retrieved + * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. + */ + CompletableFuture<InputStreamContainer> getBlobPartInputStreamContainer( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobKey, + @Nullable Integer partNumber + ) { + final boolean isMultipartObject = partNumber != null; + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder().bucket(bucketName).key(blobKey); + + if (isMultipartObject) { + getObjectRequestBuilder.partNumber(partNumber); + } + + return SocketAccess.doPrivileged( + () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) + .thenApply(response -> transformResponseToInputStreamContainer(response, isMultipartObject)) + ); + } + + /** + * Transforms the stream response object from S3 into an {@link InputStreamContainer} + * @param streamResponse Response stream object from S3 + * @param isMultipartObject Flag to denote a multipart object response + * @return {@link InputStreamContainer} containing the stream and stream metadata + */ + // Package-Private for testing. + static InputStreamContainer transformResponseToInputStreamContainer( + ResponseInputStream<GetObjectResponse> streamResponse, + boolean isMultipartObject + ) { + final GetObjectResponse getObjectResponse = streamResponse.response(); + final String contentRange = getObjectResponse.contentRange(); + final Long contentLength = getObjectResponse.contentLength(); + if ((isMultipartObject && contentRange == null) || contentLength == null) { + throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); + } + final long offset = isMultipartObject ? HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()) : 0L; + return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); + } + + /** + * Retrieves the metadata like checksum, object size and parts for the provided blob within the S3 bucket. + * @param s3AsyncClient Async client to be utilized to fetch the metadata + * @param bucketName Name of the S3 bucket + * @param blobName Identifier of the blob for which the metadata will be fetched + * @return A future containing the metadata within {@link GetObjectAttributesResponse} + */ + CompletableFuture<GetObjectAttributesResponse> getBlobMetadata(S3AsyncClient s3AsyncClient, String bucketName, String blobName) { + // Fetch blob metadata - part info, size, checksum + final GetObjectAttributesRequest getObjectAttributesRequest = GetObjectAttributesRequest.builder() + .bucket(bucketName) + .key(blobName) + .objectAttributes(ObjectAttributes.CHECKSUM, ObjectAttributes.OBJECT_SIZE, ObjectAttributes.OBJECT_PARTS) + .build(); + + return SocketAccess.doPrivileged(() -> s3AsyncClient.getObjectAttributes(getObjectAttributesRequest)); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 30040e182cbc9..fc70fbb0db00e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -32,6 +32,9 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.StorageClass; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -39,16 +42,25 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.BlobStoreException; -import org.opensearch.common.unit.ByteSizeValue; -import software.amazon.awssdk.services.s3.model.ObjectCannedACL; -import software.amazon.awssdk.services.s3.model.StorageClass; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.AsyncExecutorContainer; import org.opensearch.repositories.s3.async.AsyncTransferManager; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static org.opensearch.repositories.s3.S3Repository.BUCKET_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; +import static org.opensearch.repositories.s3.S3Repository.REDIRECT_LARGE_S3_UPLOAD; +import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; +import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; +import static org.opensearch.repositories.s3.S3Repository.UPLOAD_RETRY_ENABLED; + class S3BlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(S3BlobStore.class); @@ -57,21 +69,28 @@ class S3BlobStore implements BlobStore { private final S3AsyncService s3AsyncService; - private final String bucket; + private volatile String bucket; - private final ByteSizeValue bufferSize; + private volatile ByteSizeValue bufferSize; - private final boolean serverSideEncryption; + private volatile boolean redirectLargeUploads; - private final ObjectCannedACL cannedACL; + private volatile boolean uploadRetryEnabled; - private final StorageClass storageClass; + private volatile boolean serverSideEncryption; - private final RepositoryMetadata repositoryMetadata; + private volatile ObjectCannedACL cannedACL; + + private volatile StorageClass storageClass; + + private volatile int bulkDeletesSize; + + private volatile RepositoryMetadata repositoryMetadata; private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); private final AsyncTransferManager asyncTransferManager; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final boolean multipartUploadEnabled; @@ -85,8 +104,10 @@ class S3BlobStore implements BlobStore { ByteSizeValue bufferSize, String cannedACL, String storageClass, + int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -98,14 +119,28 @@ class S3BlobStore implements BlobStore { this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); + this.bulkDeletesSize = bulkDeletesSize; this.repositoryMetadata = repositoryMetadata; this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; + this.urgentExecutorBuilder = urgentExecutorBuilder; + // Settings to initialize blobstore with. + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } - public boolean isMultipartUploadEnabled() { - return multipartUploadEnabled; + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + this.repositoryMetadata = repositoryMetadata; + this.bucket = BUCKET_SETTING.get(repositoryMetadata.settings()); + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(repositoryMetadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(repositoryMetadata.settings()); + this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); + this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); + this.redirectLargeUploads = REDIRECT_LARGE_S3_UPLOAD.get(repositoryMetadata.settings()); + this.uploadRetryEnabled = UPLOAD_RETRY_ENABLED.get(repositoryMetadata.settings()); } @Override @@ -118,13 +153,21 @@ public AmazonS3Reference clientReference() { } public AmazonAsyncS3Reference asyncClientReference() { - return s3AsyncService.client(repositoryMetadata, priorityExecutorBuilder, normalExecutorBuilder); + return s3AsyncService.client(repositoryMetadata, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder); } int getMaxRetries() { return service.settings(repositoryMetadata).maxRetries; } + public boolean isRedirectLargeUploads() { + return redirectLargeUploads; + } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } + public String bucket() { return bucket; } @@ -137,6 +180,10 @@ public long bufferSizeInBytes() { return bufferSize.getBytes(); } + public int getBulkDeletesSize() { + return bulkDeletesSize; + } + @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); @@ -157,6 +204,16 @@ public Map<String, Long> stats() { return statsMetricPublisher.getStats().toMap(); } + @Override + public Map<Metric, Map<String, Long>> extendedStats() { + if (statsMetricPublisher.getExtendedStats() == null || statsMetricPublisher.getExtendedStats().isEmpty()) { + return Collections.emptyMap(); + } + Map<Metric, Map<String, Long>> extendedStats = new HashMap<>(); + statsMetricPublisher.getExtendedStats().forEach((k, v) -> extendedStats.put(k, v.toMap())); + return extendedStats; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 8097629ed0773..e44f408e6dd12 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -32,21 +32,22 @@ package org.opensearch.repositories.s3; -import org.opensearch.core.common.Strings; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.repositories.s3.utils.Protocol; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import java.net.InetAddress; import java.net.UnknownHostException; @@ -176,7 +177,7 @@ final class S3ClientSettings { static final Setting.AffixSetting<TimeValue> REQUEST_TIMEOUT_SETTING = Setting.affixKeySetting( PREFIX, "request_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(5), Property.NodeScope) ); /** The connection timeout for connecting to s3. */ @@ -197,14 +198,20 @@ final class S3ClientSettings { static final Setting.AffixSetting<Integer> MAX_CONNECTIONS_SETTING = Setting.affixKeySetting( PREFIX, "max_connections", - key -> Setting.intSetting(key, 100, Property.NodeScope) + key -> Setting.intSetting(key, 500, Property.NodeScope) + ); + + static final Setting.AffixSetting<Integer> MAX_SYNC_CONNECTIONS_SETTING = Setting.affixKeySetting( + PREFIX, + "max_sync_connections", + key -> Setting.intSetting(key, 500, Property.NodeScope) ); /** Connection acquisition timeout for new connections to S3. */ static final Setting.AffixSetting<TimeValue> CONNECTION_ACQUISITION_TIMEOUT = Setting.affixKeySetting( PREFIX, "connection_acquisition_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.NodeScope) + key -> Setting.timeSetting(key, TimeValue.timeValueMinutes(15), Property.NodeScope) ); /** The maximum pending connections to S3. */ @@ -283,10 +290,13 @@ final class S3ClientSettings { /** The connection TTL for the s3 client */ final int connectionTTLMillis; - /** The max number of connections for the s3 client */ + /** The max number of connections for the s3 async client */ final int maxConnections; - /** The connnection acquisition timeout for the s3 async client */ + /** The max number of connections for the s3 sync client */ + final int maxSyncConnections; + + /** The connnection acquisition timeout for the s3 sync and async client */ final int connectionAcquisitionTimeoutMillis; /** The number of retries to use for the s3 client. */ @@ -317,6 +327,7 @@ private S3ClientSettings( int connectionTimeoutMillis, int connectionTTLMillis, int maxConnections, + int maxSyncConnections, int connectionAcquisitionTimeoutMillis, int maxRetries, boolean throttleRetries, @@ -335,6 +346,7 @@ private S3ClientSettings( this.connectionTimeoutMillis = connectionTimeoutMillis; this.connectionTTLMillis = connectionTTLMillis; this.maxConnections = maxConnections; + this.maxSyncConnections = maxSyncConnections; this.connectionAcquisitionTimeoutMillis = connectionAcquisitionTimeoutMillis; this.maxRetries = maxRetries; this.throttleRetries = throttleRetries; @@ -385,6 +397,9 @@ S3ClientSettings refine(Settings repositorySettings) { ).millis() ); final int newMaxConnections = Math.toIntExact(getRepoSettingOrDefault(MAX_CONNECTIONS_SETTING, normalizedSettings, maxConnections)); + final int newMaxSyncConnections = Math.toIntExact( + getRepoSettingOrDefault(MAX_SYNC_CONNECTIONS_SETTING, normalizedSettings, maxConnections) + ); final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); final boolean newPathStyleAccess = getRepoSettingOrDefault(USE_PATH_STYLE_ACCESS, normalizedSettings, pathStyleAccess); @@ -432,6 +447,7 @@ S3ClientSettings refine(Settings repositorySettings) { newConnectionTimeoutMillis, newConnectionTTLMillis, newMaxConnections, + newMaxSyncConnections, newConnectionAcquisitionTimeoutMillis, newMaxRetries, newThrottleRetries, @@ -445,7 +461,7 @@ S3ClientSettings refine(Settings repositorySettings) { /** * Load all client settings from the given settings. - * + * <p> * Note this will always at least return a client named "default". */ static Map<String, S3ClientSettings> load(final Settings settings, final Path configPath) { @@ -562,6 +578,7 @@ static S3ClientSettings getClientSettings(final Settings settings, final String Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TIMEOUT_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_TTL_SETTING).millis()), Math.toIntExact(getConfigValue(settings, clientName, MAX_CONNECTIONS_SETTING)), + Math.toIntExact(getConfigValue(settings, clientName, MAX_SYNC_CONNECTIONS_SETTING)), Math.toIntExact(getConfigValue(settings, clientName, CONNECTION_ACQUISITION_TIMEOUT).millis()), getConfigValue(settings, clientName, MAX_RETRIES_SETTING), getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING), diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index d42bfc0be7e4f..f7772a57c9afd 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -32,23 +32,28 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.StorageClass; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.monitor.jvm.JvmInfo; @@ -62,7 +67,11 @@ import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.threadpool.Scheduler; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -77,7 +86,6 @@ * <dt>{@code concurrent_streams}</dt><dd>Number of concurrent read/write stream (per repository on each node). Defaults to 5.</dd> * <dt>{@code chunk_size}</dt> * <dd>Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.</dd> - * <dt>{@code compress}</dt><dd>If set to true metadata files will be stored compressed. Defaults to false.</dd> * </dl> */ class S3Repository extends MeteredBlobStoreRepository { @@ -139,6 +147,20 @@ class S3Repository extends MeteredBlobStoreRepository { */ static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB); + /** + * Whether large uploads need to be redirected to slow sync s3 client. + */ + static final Setting<Boolean> REDIRECT_LARGE_S3_UPLOAD = Setting.boolSetting( + "redirect_large_s3_upload", + true, + Setting.Property.NodeScope + ); + + /** + * Whether retry on uploads are enabled. This setting wraps inputstream with buffered stream to enable retries. + */ + static final Setting<Boolean> UPLOAD_RETRY_ENABLED = Setting.boolSetting("s3_upload_retry_enabled", true, Setting.Property.NodeScope); + /** * Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and @@ -182,6 +204,13 @@ class S3Repository extends MeteredBlobStoreRepository { new ByteSizeValue(5, ByteSizeUnit.TB) ); + /** + * Maximum number of deletes in a DeleteObjectsRequest. + * + * @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>. + */ + static final Setting<Integer> BULK_DELETE_SIZE = Setting.intSetting("bulk_delete_size", 1000, 1, 1000); + /** * Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia, onezone_ia and intelligent_tiering. Defaults to standard. @@ -203,31 +232,30 @@ class S3Repository extends MeteredBlobStoreRepository { private final S3Service service; - private final String bucket; - - private final ByteSizeValue bufferSize; - - private final ByteSizeValue chunkSize; + private volatile String bucket; - private final BlobPath basePath; + private volatile ByteSizeValue bufferSize; - private final boolean serverSideEncryption; + private volatile ByteSizeValue chunkSize; - private final String storageClass; + private volatile BlobPath basePath; - private final String cannedACL; + private volatile boolean serverSideEncryption; - private final RepositoryMetadata repositoryMetadata; + private volatile String storageClass; + private volatile String cannedACL; private final AsyncTransferManager asyncUploadUtils; private final S3AsyncService s3AsyncService; private final boolean multipartUploadEnabled; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; + private final Path pluginConfigPath; - /** - * Constructs an s3 backed repository - */ + private volatile int bulkDeletesSize; + + // Used by test classes S3Repository( final RepositoryMetadata metadata, final NamedXContentRegistry namedXContentRegistry, @@ -235,82 +263,57 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, final boolean multipartUploadEnabled ) { - super( + this( metadata, - COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, + service, clusterService, recoverySettings, - buildLocation(metadata) + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + multipartUploadEnabled, + Path.of("") ); + } + + /** + * Constructs an s3 backed repository + */ + S3Repository( + final RepositoryMetadata metadata, + final NamedXContentRegistry namedXContentRegistry, + final S3Service service, + final ClusterService clusterService, + final RecoverySettings recoverySettings, + final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, + final AsyncExecutorContainer priorityExecutorBuilder, + final AsyncExecutorContainer normalExecutorBuilder, + final S3AsyncService s3AsyncService, + final boolean multipartUploadEnabled, + Path pluginConfigPath + ) { + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.service = service; this.s3AsyncService = s3AsyncService; this.multipartUploadEnabled = multipartUploadEnabled; - - this.repositoryMetadata = metadata; + this.pluginConfigPath = pluginConfigPath; this.asyncUploadUtils = asyncUploadUtils; + this.urgentExecutorBuilder = urgentExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.normalExecutorBuilder = normalExecutorBuilder; - // Parse and validate the user's S3 Storage Class setting - this.bucket = BUCKET_SETTING.get(metadata.settings()); - if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); - } - - this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - - // We make sure that chunkSize is bigger or equal than/to bufferSize - if (this.chunkSize.getBytes() < bufferSize.getBytes()) { - throw new RepositoryException( - metadata.name(), - CHUNK_SIZE_SETTING.getKey() - + " (" - + this.chunkSize - + ") can't be lower than " - + BUFFER_SIZE_SETTING.getKey() - + " (" - + bufferSize - + ")." - ); - } - - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - - this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - - this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - // provided repository settings - deprecationLogger.deprecate( - "s3_repository_secret_settings", - "Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the opensearch keystore for secure settings." - ); - } - - logger.debug( - "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", - bucket, - chunkSize, - serverSideEncryption, - bufferSize, - cannedACL, - storageClass - ); + validateRepositoryMetadata(metadata); + readRepositoryMetadata(); } private static Map<String, String> buildLocation(RepositoryMetadata metadata) { @@ -365,14 +368,16 @@ protected S3BlobStore createBlobStore() { bufferSize, cannedACL, storageClass, - repositoryMetadata, + bulkDeletesSize, + metadata, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder ); } - // only use for testing + // only use for testing (S3RepositoryTests) @Override protected BlobStore getBlobStore() { return super.getBlobStore(); @@ -383,11 +388,144 @@ public BlobPath basePath() { return basePath; } + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata newRepositoryMetadata) { + if (isReloadable() == false) { + return; + } + + // Reload configs for S3Repository + super.reload(newRepositoryMetadata); + readRepositoryMetadata(); + + // Reload configs for S3RepositoryPlugin + service.settings(metadata); + service.releaseCachedClients(); + s3AsyncService.settings(metadata); + s3AsyncService.releaseCachedClients(); + + // Reload configs for S3BlobStore + BlobStore blobStore = getBlobStore(); + blobStore.reload(metadata); + } + + /** + * Reloads the values derived from the Repository Metadata + */ + private void readRepositoryMetadata() { + this.bucket = BUCKET_SETTING.get(metadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(metadata.settings()); + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + // provided repository settings + deprecationLogger.deprecate( + "s3_repository_secret_settings", + "Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the opensearch keystore for secure settings." + ); + } + + logger.debug( + "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", + bucket, + chunkSize, + serverSideEncryption, + bufferSize, + cannedACL, + storageClass + ); + } + + @Override + public void validateMetadata(RepositoryMetadata newRepositoryMetadata) { + super.validateMetadata(newRepositoryMetadata); + validateRepositoryMetadata(newRepositoryMetadata); + } + + private void validateRepositoryMetadata(RepositoryMetadata newRepositoryMetadata) { + Settings settings = newRepositoryMetadata.settings(); + if (BUCKET_SETTING.get(settings) == null) { + throw new RepositoryException(newRepositoryMetadata.name(), "No bucket defined for s3 repository"); + } + + // We make sure that chunkSize is bigger or equal than/to bufferSize + if (CHUNK_SIZE_SETTING.get(settings).getBytes() < BUFFER_SIZE_SETTING.get(settings).getBytes()) { + throw new RepositoryException( + newRepositoryMetadata.name(), + CHUNK_SIZE_SETTING.getKey() + + " (" + + CHUNK_SIZE_SETTING.get(settings) + + ") can't be lower than " + + BUFFER_SIZE_SETTING.getKey() + + " (" + + BUFFER_SIZE_SETTING.get(settings) + + ")." + ); + } + + validateStorageClass(STORAGE_CLASS_SETTING.get(settings)); + validateCannedACL(CANNED_ACL_SETTING.get(settings)); + } + + private static void validateStorageClass(String storageClassStringValue) { + if ((storageClassStringValue == null) || storageClassStringValue.equals("")) { + return; + } + + final StorageClass storageClass = StorageClass.fromValue(storageClassStringValue.toUpperCase(Locale.ENGLISH)); + if (storageClass.equals(StorageClass.GLACIER)) { + throw new BlobStoreException("Glacier storage class is not supported"); + } + + if (storageClass == StorageClass.UNKNOWN_TO_SDK_VERSION) { + throw new BlobStoreException("`" + storageClassStringValue + "` is not a valid S3 Storage Class."); + } + } + + private static void validateCannedACL(String cannedACLStringValue) { + if ((cannedACLStringValue == null) || cannedACLStringValue.equals("")) { + return; + } + + for (final ObjectCannedACL cur : ObjectCannedACL.values()) { + if (cur.toString().equalsIgnoreCase(cannedACLStringValue)) { + return; + } + } + + throw new BlobStoreException("cannedACL is not valid: [" + cannedACLStringValue + "]"); + } + @Override protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List<Setting<?>> getRestrictedSystemRepositorySettings() { + List<Setting<?>> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET_SETTING); + restrictedSettings.add(BASE_PATH_SETTING); + return restrictedSettings; + } + @Override protected void doClose() { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 30f792346f9be..e7d2a4d024e60 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -36,10 +36,11 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; @@ -55,6 +56,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -73,6 +75,9 @@ * A plugin to add a repository type that writes to and from the AWS S3. */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + private static final String URGENT_STREAM_READER = "urgent_stream_reader"; private static final String PRIORITY_FUTURE_COMPLETION = "priority_future_completion"; private static final String PRIORITY_STREAM_READER = "priority_stream_reader"; private static final String FUTURE_COMPLETION = "future_completion"; @@ -83,6 +88,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private final Path configPath; + private AsyncExecutorContainer urgentExecutorBuilder; private AsyncExecutorContainer priorityExecutorBuilder; private AsyncExecutorContainer normalExecutorBuilder; @@ -93,17 +99,34 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) { List<ExecutorBuilder<?>> executorBuilders = new ArrayList<>(); + int halfProc = halfNumberOfProcessors(allocatedProcessors(settings)); + executorBuilders.add( + new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) + ); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); + executorBuilders.add( + new ScalingExecutorBuilder(PRIORITY_FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) + ); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProc, TimeValue.timeValueMinutes(5))); + executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new ScalingExecutorBuilder(FUTURE_COMPLETION, 1, allocatedProcessors(settings), TimeValue.timeValueMinutes(5)) ); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_STREAM_READER, priorityPoolCount(settings), 10_000, PRIORITY_STREAM_READER) + new ScalingExecutorBuilder( + STREAM_READER, + allocatedProcessors(settings), + 4 * allocatedProcessors(settings), + TimeValue.timeValueMinutes(5) + ) ); - executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new FixedExecutorBuilder(settings, STREAM_READER, normalPoolCount(settings), 10_000, STREAM_READER)); return executorBuilders; } + static int halfNumberOfProcessors(int numberOfProcessors) { + return (numberOfProcessors + 1) / 2; + } + S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { this.service = Objects.requireNonNull(service, "S3 service must not be null"); this.configPath = configPath; @@ -122,6 +145,10 @@ private static int allocatedProcessors(Settings settings) { return OpenSearchExecutors.allocatedProcessors(settings); } + private static int urgentPoolCount(Settings settings) { + return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + } + private static int priorityPoolCount(Settings settings) { return boundedBy((allocatedProcessors(settings) + 1) / 2, 2, 4); } @@ -144,8 +171,14 @@ public Collection<Object> createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier<RepositoriesService> repositoriesServiceSupplier ) { + int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( + threadPool.executor(URGENT_FUTURE_COMPLETION), + threadPool.executor(URGENT_STREAM_READER), + new AsyncTransferEventLoopGroup(urgentEventLoopThreads) + ); this.priorityExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(PRIORITY_FUTURE_COMPLETION), threadPool.executor(PRIORITY_STREAM_READER), @@ -170,7 +203,8 @@ protected S3Repository createRepository( AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), normalExecutorBuilder.getStreamReader(), - priorityExecutorBuilder.getStreamReader() + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader() ); return new S3Repository( metadata, @@ -179,10 +213,12 @@ protected S3Repository createRepository( clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()) + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + configPath ); } @@ -225,7 +261,9 @@ public List<Setting<?>> getSettings() { S3ClientSettings.IDENTITY_TOKEN_FILE_SETTING, S3ClientSettings.ROLE_SESSION_NAME_SETTING, S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING, + S3Repository.REDIRECT_LARGE_S3_UPLOAD, + S3Repository.UPLOAD_RETRY_ENABLED ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index e4c0089fdbe69..d7e47e0ab1bcc 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -31,18 +31,18 @@ package org.opensearch.repositories.s3; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.repositories.s3.utils.HttpRangeUtils; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.S3Exception; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.repositories.s3.utils.HttpRangeUtils; + import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; @@ -54,7 +54,7 @@ * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. - * + * <p> * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ class S3RetryingInputStream extends InputStream { @@ -119,7 +119,7 @@ private void openStream() throws IOException { ); this.currentStreamLastOffset = Math.addExact( Math.addExact(start, currentOffset), - getStreamLength(getObjectResponseInputStream.response()) + getObjectResponseInputStream.response().contentLength() ); this.currentStream = getObjectResponseInputStream; this.isStreamAborted.set(false); @@ -133,29 +133,6 @@ private void openStream() throws IOException { } } - private long getStreamLength(final GetObjectResponse getObjectResponse) { - try { - // Returns the content range of the object if response contains the Content-Range header. - if (getObjectResponse.contentRange() != null) { - final Tuple<Long, Long> s3ResponseRange = HttpRangeUtils.fromHttpRangeHeader(getObjectResponse.contentRange()); - assert s3ResponseRange.v2() >= s3ResponseRange.v1() : s3ResponseRange.v2() + " vs " + s3ResponseRange.v1(); - assert s3ResponseRange.v1() == start + currentOffset : "Content-Range start value [" - + s3ResponseRange.v1() - + "] exceeds start [" - + start - + "] + current offset [" - + currentOffset - + ']'; - assert s3ResponseRange.v2() == end : "Content-Range end value [" + s3ResponseRange.v2() + "] exceeds end [" + end + ']'; - return s3ResponseRange.v2() - s3ResponseRange.v1() + 1L; - } - return getObjectResponse.contentLength(); - } catch (Exception e) { - assert false : e; - return Long.MAX_VALUE - 1L; // assume a large stream so that the underlying stream is aborted on closing, unless eof is reached - } - } - @Override public int read() throws IOException { ensureOpen(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index c13e5b76b9269..fe81da31432f4 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -32,20 +32,6 @@ package org.opensearch.repositories.s3; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.SSLConnectionSocketFactory; -import org.apache.http.protocol.HttpContext; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.common.Nullable; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.MapBuilder; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.Strings; -import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; -import org.opensearch.repositories.s3.utils.Protocol; -import org.opensearch.repositories.s3.utils.AwsRequestSigner; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.ContainerCredentialsProvider; @@ -72,7 +58,23 @@ import software.amazon.awssdk.services.sts.auth.StsWebIdentityTokenFileCredentialsProvider; import software.amazon.awssdk.services.sts.model.AssumeRoleRequest; +import org.apache.http.conn.ssl.DefaultHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.protocol.HttpContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; +import org.opensearch.repositories.s3.utils.AwsRequestSigner; +import org.opensearch.repositories.s3.utils.Protocol; + import javax.net.ssl.SSLContext; + import java.io.Closeable; import java.io.IOException; import java.net.Authenticator; @@ -88,6 +90,7 @@ import java.security.SecureRandom; import java.time.Duration; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static java.util.Collections.emptyMap; @@ -98,7 +101,7 @@ class S3Service implements Closeable { private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com"; - private volatile Map<S3ClientSettings, AmazonS3Reference> clientsCache = emptyMap(); + private volatile Map<S3ClientSettings, AmazonS3Reference> clientsCache = new ConcurrentHashMap<>(); /** * Client settings calculated from static configuration and settings in the keystore. @@ -109,7 +112,7 @@ class S3Service implements Closeable { * Client settings derived from those in {@link #staticClientSettings} by combining them with settings * in the {@link RepositoryMetadata}. */ - private volatile Map<Settings, S3ClientSettings> derivedClientSettings = emptyMap(); + private volatile Map<Settings, S3ClientSettings> derivedClientSettings = new ConcurrentHashMap<>(); S3Service(final Path configPath) { staticClientSettings = MapBuilder.<String, S3ClientSettings>newMapBuilder() @@ -276,6 +279,8 @@ protected PasswordAuthentication getPasswordAuthentication() { } clientBuilder.socketTimeout(Duration.ofMillis(clientSettings.readTimeoutMillis)); + clientBuilder.maxConnections(clientSettings.maxSyncConnections); + clientBuilder.connectionAcquisitionTimeout(Duration.ofMillis(clientSettings.connectionAcquisitionTimeoutMillis)); return clientBuilder; } @@ -435,7 +440,7 @@ private static IrsaCredentials buildFromEnviroment(IrsaCredentials defaults) { return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); } - private synchronized void releaseCachedClients() { + public synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java index 4888764dbc720..f88aa46e61806 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/SocketAccess.java @@ -46,6 +46,7 @@ * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ +@SuppressWarnings("removal") public final class SocketAccess { private SocketAccess() {} diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index cad0037f99249..8d2772d42ebca 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -8,30 +8,87 @@ package org.opensearch.repositories.s3; -import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.blobstore.BlobStore; + +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; public class StatsMetricPublisher { + private static final Logger LOGGER = LogManager.getLogger(StatsMetricPublisher.class); private final Stats stats = new Stats(); + private final Map<BlobStore.Metric, Stats> extendedStats = new HashMap<>() { + { + put(BlobStore.Metric.REQUEST_LATENCY, new Stats()); + put(BlobStore.Metric.REQUEST_SUCCESS, new Stats()); + put(BlobStore.Metric.REQUEST_FAILURE, new Stats()); + put(BlobStore.Metric.RETRY_COUNT, new Stats()); + } + }; + public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.listCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + LOGGER.debug(() -> "List objects request metrics: " + metricCollection); + for (MetricRecord<?> metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).listMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).listMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).listMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).listMetrics.addAndGet(1); + } + stats.listMetrics.addAndGet(1); + break; + } + } + } + + @Override + public void close() {} + }; + + public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + LOGGER.debug(() -> "Delete objects request metrics: " + metricCollection); + for (MetricRecord<?> metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).deleteMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).deleteMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).deleteMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).deleteMetrics.addAndGet(1); + } + stats.deleteMetrics.addAndGet(1); + break; + } + } } @Override @@ -41,15 +98,27 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.getCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + LOGGER.debug(() -> "Get object request metrics: " + metricCollection); + for (MetricRecord<?> metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).getMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).getMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).getMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).getMetrics.addAndGet(1); + } + stats.getMetrics.addAndGet(1); + break; + } + } } @Override @@ -59,15 +128,27 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.putCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + LOGGER.debug(() -> "Put object request metrics: " + metricCollection); + for (MetricRecord<?> metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).putMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).putMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).putMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).putMetrics.addAndGet(1); + } + stats.putMetrics.addAndGet(1); + break; + } + } } @Override @@ -77,15 +158,27 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.postCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + LOGGER.debug(() -> "Multi-part request metrics: " + metricCollection); + for (MetricRecord<?> metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).multiPartPutMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).multiPartPutMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).multiPartPutMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).multiPartPutMetrics.addAndGet(1); + } + stats.multiPartPutMetrics.addAndGet(1); + break; + } + } } @Override @@ -96,22 +189,29 @@ public Stats getStats() { return stats; } + public Map<BlobStore.Metric, Stats> getExtendedStats() { + return extendedStats; + } + static class Stats { - final AtomicLong listCount = new AtomicLong(); + final AtomicLong listMetrics = new AtomicLong(); + + final AtomicLong getMetrics = new AtomicLong(); - final AtomicLong getCount = new AtomicLong(); + final AtomicLong putMetrics = new AtomicLong(); - final AtomicLong putCount = new AtomicLong(); + final AtomicLong deleteMetrics = new AtomicLong(); - final AtomicLong postCount = new AtomicLong(); + final AtomicLong multiPartPutMetrics = new AtomicLong(); Map<String, Long> toMap() { final Map<String, Long> results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); + results.put("GetObject", getMetrics.get()); + results.put("ListObjects", listMetrics.get()); + results.put("PutObject", putMetrics.get()); + results.put("DeleteObjects", deleteMetrics.get()); + results.put("PutMultipartObject", multiPartPutMetrics.get()); return results; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index b6af91a08ac2b..b4c4ed0ecaa75 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -8,14 +8,6 @@ package org.opensearch.repositories.s3.async; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.StreamContext; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.repositories.s3.SocketAccess; -import org.opensearch.repositories.s3.io.CheckedContainer; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; @@ -25,7 +17,19 @@ import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.utils.CompletableFutureUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; +import org.opensearch.repositories.s3.io.CheckedContainer; + +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -44,11 +48,13 @@ public class AsyncPartsHandler { * @param s3AsyncClient S3 client to use for upload * @param executorService Thread pool for regular upload * @param priorityExecutorService Thread pool for priority uploads + * @param urgentExecutorService Thread pool for urgent uploads * @param uploadRequest request for upload * @param streamContext Stream context used in supplying individual file parts * @param uploadId Upload Id against which multi-part is being performed * @param completedParts Reference of completed parts * @param inputStreamContainers Checksum containers + * @param statsMetricPublisher sdk metric publisher * @return list of completable futures * @throws IOException thrown in case of an IO error */ @@ -56,11 +62,14 @@ public static List<CompletableFuture<CompletedPart>> uploadParts( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, UploadRequest uploadRequest, StreamContext streamContext, String uploadId, AtomicReferenceArray<CompletedPart> completedParts, - AtomicReferenceArray<CheckedContainer> inputStreamContainers + AtomicReferenceArray<CheckedContainer> inputStreamContainers, + StatsMetricPublisher statsMetricPublisher, + boolean uploadRetryEnabled ) throws IOException { List<CompletableFuture<CompletedPart>> futures = new ArrayList<>(); for (int partIdx = 0; partIdx < streamContext.getNumberOfParts(); partIdx++) { @@ -71,6 +80,7 @@ public static List<CompletableFuture<CompletedPart>> uploadParts( .partNumber(partIdx + 1) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .contentLength(inputStreamContainer.getContentLength()); if (uploadRequest.doRemoteDataIntegrityCheck()) { uploadPartRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); @@ -79,12 +89,14 @@ public static List<CompletableFuture<CompletedPart>> uploadParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, completedParts, inputStreamContainers, futures, uploadPartRequestBuilder.build(), inputStreamContainer, - uploadRequest + uploadRequest, + uploadRetryEnabled ); } @@ -121,42 +133,78 @@ public static void cleanUpParts(S3AsyncClient s3AsyncClient, UploadRequest uploa })); } + public static InputStream maybeRetryInputStream( + InputStream inputStream, + WritePriority writePriority, + boolean uploadRetryEnabled, + long contentLength + ) { + if (uploadRetryEnabled == true && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new BufferedInputStream(inputStream, (int) (contentLength + 1)); + } + return inputStream; + } + private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, AtomicReferenceArray<CompletedPart> completedParts, AtomicReferenceArray<CheckedContainer> inputStreamContainers, List<CompletableFuture<CompletedPart>> futures, UploadPartRequest uploadPartRequest, InputStreamContainer inputStreamContainer, - UploadRequest uploadRequest + UploadRequest uploadRequest, + boolean uploadRetryEnabled ) { Integer partNumber = uploadPartRequest.partNumber(); - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + + InputStream inputStream = maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRetryEnabled, + uploadPartRequest.contentLength() + ); CompletableFuture<UploadPartResponse> uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ) ); - CompletableFuture<CompletedPart> convertFuture = uploadPartResponseFuture.thenApply( - uploadPartResponse -> convertUploadPartResponse( - completedParts, - inputStreamContainers, - uploadPartResponse, - partNumber, - uploadRequest.doRemoteDataIntegrityCheck() - ) - ); + CompletableFuture<CompletedPart> convertFuture = uploadPartResponseFuture.whenComplete((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException ex) { + log.error( + () -> new ParameterizedMessage( + "Failed to close stream while uploading a part of idx {} and file {}.", + uploadPartRequest.partNumber(), + uploadPartRequest.key() + ), + ex + ); + } + }) + .thenApply( + uploadPartResponse -> convertUploadPartResponse( + completedParts, + inputStreamContainers, + uploadPartResponse, + partNumber, + uploadRequest.doRemoteDataIntegrityCheck() + ) + ); futures.add(convertFuture); CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartResponseFuture); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferEventLoopGroup.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferEventLoopGroup.java index 381a9671d669a..7a99989796444 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferEventLoopGroup.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferEventLoopGroup.java @@ -8,11 +8,6 @@ package org.opensearch.repositories.s3.async; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.util.concurrent.Future; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -22,6 +17,12 @@ import java.io.Closeable; import java.util.concurrent.TimeUnit; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.epoll.Epoll; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + /** * AsyncTransferEventLoopGroup is an encapsulation for netty {@link EventLoopGroup} */ diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 5b43ae84c51dc..2259780c95276 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -8,19 +8,6 @@ package org.opensearch.repositories.s3.async; -import com.jcraft.jzlib.JZlib; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.StreamContext; -import org.opensearch.common.blobstore.exception.CorruptFileException; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.util.ByteUtils; -import org.opensearch.repositories.s3.io.CheckedContainer; -import org.opensearch.repositories.s3.SocketAccess; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.http.HttpStatusCode; @@ -37,7 +24,23 @@ import software.amazon.awssdk.services.s3.model.S3Exception; import software.amazon.awssdk.utils.CompletableFutureUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.exception.CorruptFileException; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.common.util.ByteUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; +import org.opensearch.repositories.s3.io.CheckedContainer; + import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -49,6 +52,8 @@ import java.util.function.Supplier; import java.util.stream.IntStream; +import com.jcraft.jzlib.JZlib; + /** * A helper class that automatically uses multipart upload based on the size of the source object */ @@ -56,6 +61,7 @@ public final class AsyncTransferManager { private static final Logger log = LogManager.getLogger(AsyncTransferManager.class); private final ExecutorService executorService; private final ExecutorService priorityExecutorService; + private final ExecutorService urgentExecutorService; private final long minimumPartSize; /** @@ -70,10 +76,16 @@ public final class AsyncTransferManager { * @param executorService The stream reader {@link ExecutorService} for normal priority uploads * @param priorityExecutorService The stream read {@link ExecutorService} for high priority uploads */ - public AsyncTransferManager(long minimumPartSize, ExecutorService executorService, ExecutorService priorityExecutorService) { + public AsyncTransferManager( + long minimumPartSize, + ExecutorService executorService, + ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService + ) { this.executorService = executorService; this.priorityExecutorService = priorityExecutorService; this.minimumPartSize = minimumPartSize; + this.urgentExecutorService = urgentExecutorService; } /** @@ -84,16 +96,21 @@ public AsyncTransferManager(long minimumPartSize, ExecutorService executorServic * @param streamContext The {@link StreamContext} to supply streams during upload * @return A {@link CompletableFuture} to listen for upload completion */ - public CompletableFuture<Void> uploadObject(S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext) { + public CompletableFuture<Void> uploadObject( + S3AsyncClient s3AsyncClient, + UploadRequest uploadRequest, + StreamContext streamContext, + StatsMetricPublisher statsMetricPublisher + ) { CompletableFuture<Void> returnFuture = new CompletableFuture<>(); try { if (streamContext.getNumberOfParts() == 1) { log.debug(() -> "Starting the upload as a single upload part request"); - uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture); + uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture, statsMetricPublisher); } else { log.debug(() -> "Starting the upload as multipart upload request"); - uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture); + uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, statsMetricPublisher); } } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); @@ -106,12 +123,14 @@ private void uploadInParts( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext, - CompletableFuture<Void> returnFuture + CompletableFuture<Void> returnFuture, + StatsMetricPublisher statsMetricPublisher ) { CreateMultipartUploadRequest.Builder createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() .bucket(uploadRequest.getBucket()) - .key(uploadRequest.getKey()); + .key(uploadRequest.getKey()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -127,7 +146,14 @@ private void uploadInParts( handleException(returnFuture, () -> "Failed to initiate multipart upload", throwable); } else { log.debug(() -> "Initiated new multipart upload, uploadId: " + createMultipartUploadResponse.uploadId()); - doUploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, createMultipartUploadResponse.uploadId()); + doUploadInParts( + s3AsyncClient, + uploadRequest, + streamContext, + returnFuture, + createMultipartUploadResponse.uploadId(), + statsMetricPublisher + ); } }); } @@ -137,7 +163,8 @@ private void doUploadInParts( UploadRequest uploadRequest, StreamContext streamContext, CompletableFuture<Void> returnFuture, - String uploadId + String uploadId, + StatsMetricPublisher statsMetricPublisher ) { // The list of completed parts must be sorted @@ -150,11 +177,14 @@ private void doUploadInParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, uploadRequest, streamContext, uploadId, completedParts, - inputStreamContainers + inputStreamContainers, + statsMetricPublisher, + uploadRequest.isUploadRetryEnabled() ); } catch (Exception ex) { try { @@ -178,7 +208,7 @@ private void doUploadInParts( } return null; }) - .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts)) + .thenCompose(ignore -> completeMultipartUpload(s3AsyncClient, uploadRequest, uploadId, completedParts, statsMetricPublisher)) .handle(handleExceptionOrResponse(s3AsyncClient, uploadRequest, returnFuture, uploadId)) .exceptionally(throwable -> { handleException(returnFuture, () -> "Unexpected exception occurred", throwable); @@ -225,7 +255,8 @@ private CompletableFuture<CompleteMultipartUploadResponse> completeMultipartUplo S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, String uploadId, - AtomicReferenceArray<CompletedPart> completedParts + AtomicReferenceArray<CompletedPart> completedParts, + StatsMetricPublisher statsMetricPublisher ) { log.debug(() -> new ParameterizedMessage("Sending completeMultipartUploadRequest, uploadId: {}", uploadId)); @@ -234,6 +265,7 @@ private CompletableFuture<CompleteMultipartUploadResponse> completeMultipartUplo .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) .uploadId(uploadId) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)) .multipartUpload(CompletedMultipartUpload.builder().parts(parts).build()) .build(); @@ -271,10 +303,13 @@ private static void handleException(CompletableFuture<Void> returnFuture, Suppli /** * Calculates the optimal part size of each part request if the upload operation is carried out as multipart upload. */ - public long calculateOptimalPartSize(long contentLengthOfSource) { + public long calculateOptimalPartSize(long contentLengthOfSource, WritePriority writePriority, boolean uploadRetryEnabled) { if (contentLengthOfSource < ByteSizeUnit.MB.toBytes(5)) { return contentLengthOfSource; } + if (uploadRetryEnabled && (writePriority == WritePriority.HIGH || writePriority == WritePriority.URGENT)) { + return new ByteSizeValue(5, ByteSizeUnit.MB).getBytes(); + } double optimalPartSize = contentLengthOfSource / (double) MAX_UPLOAD_PARTS; optimalPartSize = Math.ceil(optimalPartSize); return (long) Math.max(optimalPartSize, minimumPartSize); @@ -284,28 +319,46 @@ private void uploadInOneChunk( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, InputStreamContainer inputStreamContainer, - CompletableFuture<Void> returnFuture + CompletableFuture<Void> returnFuture, + StatsMetricPublisher statsMetricPublisher ) { PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder() .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) - .contentLength(uploadRequest.getContentLength()); + .contentLength(uploadRequest.getContentLength()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); } - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + + InputStream inputStream = AsyncPartsHandler.maybeRetryInputStream( + inputStreamContainer.getInputStream(), + uploadRequest.getWritePriority(), + uploadRequest.isUploadRetryEnabled(), + uploadRequest.getContentLength() + ); CompletableFuture<Void> putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ).handle((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException e) { + log.error( + () -> new ParameterizedMessage("Failed to close stream while uploading single file {}.", uploadRequest.getKey()), + e + ); + } if (throwable != null) { Throwable unwrappedThrowable = ExceptionsHelper.unwrap(throwable, S3Exception.class); if (unwrappedThrowable != null) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java index 3804c8417eb9f..a5304dc4a97d6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/UploadRequest.java @@ -25,6 +25,8 @@ public class UploadRequest { private final boolean doRemoteDataIntegrityCheck; private final Long expectedChecksum; + private boolean uploadRetryEnabled; + /** * Construct a new UploadRequest object * @@ -43,7 +45,8 @@ public UploadRequest( WritePriority writePriority, CheckedConsumer<Boolean, IOException> uploadFinalizer, boolean doRemoteDataIntegrityCheck, - Long expectedChecksum + Long expectedChecksum, + boolean uploadRetryEnabled ) { this.bucket = bucket; this.key = key; @@ -52,6 +55,7 @@ public UploadRequest( this.uploadFinalizer = uploadFinalizer; this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; this.expectedChecksum = expectedChecksum; + this.uploadRetryEnabled = uploadRetryEnabled; } public String getBucket() { @@ -81,4 +85,8 @@ public boolean doRemoteDataIntegrityCheck() { public Long getExpectedChecksum() { return expectedChecksum; } + + public boolean isUploadRetryEnabled() { + return uploadRetryEnabled; + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java index aafa0c2650c77..2e2fc9b86a45b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java @@ -8,24 +8,36 @@ package org.opensearch.repositories.s3.utils; -import org.opensearch.common.collect.Tuple; import software.amazon.awssdk.core.exception.SdkException; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class HttpRangeUtils { +public final class HttpRangeUtils { + private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes\\s+(\\d+)-\\d+[/\\d*]+$"); - private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); - - public static Tuple<Long, Long> fromHttpRangeHeader(String headerValue) { + /** + * Parses the content range header string value to calculate the start (offset) of the HTTP response. + * Tests against the RFC9110 specification of content range string. + * Sample values: "bytes 0-10/200", "bytes 0-10/*" + * <a href="https://www.rfc-editor.org/rfc/rfc9110.html#name-content-range">Details here</a> + * @param headerValue Header content range string value from the HTTP response + * @return Start (Offset) value of the HTTP response + */ + public static Long getStartOffsetFromRangeHeader(String headerValue) { Matcher matcher = RANGE_PATTERN.matcher(headerValue); if (!matcher.find()) { throw SdkException.create("Regex match for Content-Range header {" + headerValue + "} failed", new RuntimeException()); } - return new Tuple<>(Long.parseLong(matcher.group(1)), Long.parseLong(matcher.group(2))); + return Long.parseLong(matcher.group(1)); } + /** + * Provides a byte range string per <a href="https://www.rfc-editor.org/rfc/rfc9110.html#name-byte-ranges">RFC 9110</a> + * @param start start position (inclusive) + * @param end end position (inclusive) + * @return A 'bytes=start-end' string + */ public static String toHttpRangeHeader(long start, long end) { return "bytes=" + start + "-" + end; } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AbstractS3RepositoryTestCase.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AbstractS3RepositoryTestCase.java index 7a62e6decfc8f..aae86c4f93587 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AbstractS3RepositoryTestCase.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AbstractS3RepositoryTestCase.java @@ -8,11 +8,13 @@ package org.opensearch.repositories.s3; -import org.opensearch.test.OpenSearchTestCase; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.test.OpenSearchTestCase; + import java.nio.file.Path; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; public abstract class AbstractS3RepositoryTestCase extends OpenSearchTestCase { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java index c9e7f58ba0778..b80b857644f2a 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java @@ -32,28 +32,29 @@ package org.opensearch.repositories.s3; -import org.junit.Before; -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.settings.MockSecureSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.repositories.s3.utils.Protocol; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; import software.amazon.awssdk.core.retry.backoff.BackoffStrategy; import software.amazon.awssdk.http.apache.ProxyConfiguration; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.s3.utils.Protocol; +import org.junit.Before; + import java.io.Closeable; import java.io.IOException; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static org.opensearch.repositories.s3.S3ClientSettings.PROTOCOL_SETTING; +import static org.opensearch.repositories.s3.S3ClientSettings.PROXY_TYPE_SETTING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.opensearch.repositories.s3.S3ClientSettings.PROTOCOL_SETTING; -import static org.opensearch.repositories.s3.S3ClientSettings.PROXY_TYPE_SETTING; public class AwsS3ServiceImplTests extends AbstractS3RepositoryTestCase { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 46e589f7fa41f..f84d953baae8e 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -32,6 +32,11 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.services.s3.DelegatingS3Client; +import software.amazon.awssdk.services.s3.S3Client; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.client.node.NodeClient; @@ -52,10 +57,6 @@ import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.rest.FakeRestRequest; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.services.s3.DelegatingS3Client; -import software.amazon.awssdk.services.s3.S3Client; import java.nio.file.Path; import java.security.AccessController; @@ -65,15 +66,16 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.opensearch.repositories.s3.S3ClientSettings.ACCESS_KEY_SETTING; +import static org.opensearch.repositories.s3.S3ClientSettings.SECRET_KEY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.repositories.s3.S3ClientSettings.ACCESS_KEY_SETTING; -import static org.opensearch.repositories.s3.S3ClientSettings.SECRET_KEY_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +@SuppressWarnings("removal") @SuppressForbidden(reason = "test requires to set a System property to allow insecure settings when running in IDE") public class RepositoryCredentialsTests extends OpenSearchSingleNodeTestCase implements ConfigPathSupport { @@ -301,7 +303,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index a401ba06728d7..de9ad46bb222d 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -8,7 +8,6 @@ package org.opensearch.repositories.s3; -import org.junit.Before; import org.opensearch.cli.SuppressForbidden; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.MockSecureSettings; @@ -16,6 +15,7 @@ import org.opensearch.repositories.s3.async.AsyncExecutorContainer; import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Map; import java.util.concurrent.Executors; @@ -44,12 +44,12 @@ public void testCachedClientsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); @@ -79,12 +79,12 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 10137f0475177..8c7e196d7c812 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -8,11 +8,27 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.async.AsyncRequestBody; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; +import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectResponse; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.PutObjectRequest; +import software.amazon.awssdk.services.s3.model.PutObjectResponse; +import software.amazon.awssdk.services.s3.model.StorageClass; +import software.amazon.awssdk.services.s3.model.UploadPartRequest; +import software.amazon.awssdk.services.s3.model.UploadPartResponse; + import org.apache.lucene.store.IndexInput; -import org.junit.After; -import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.StreamContext; @@ -24,26 +40,16 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.AsyncExecutorContainer; -import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; +import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; -import software.amazon.awssdk.core.async.AsyncRequestBody; -import software.amazon.awssdk.services.s3.S3AsyncClient; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; -import software.amazon.awssdk.services.s3.model.DeleteObjectRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectResponse; -import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.PutObjectResponse; -import software.amazon.awssdk.services.s3.model.UploadPartRequest; -import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.InputStream; @@ -61,12 +67,21 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.IntStream; + +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class S3BlobContainerMockClientTests extends OpenSearchTestCase implements ConfigPathSupport { @@ -263,10 +278,11 @@ public void verifySingleChunkUploadCallCount(boolean finalizeUploadFailure) { @Override public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { - return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, null)); + return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, asyncClient, null)); } } @@ -385,13 +401,16 @@ private S3BlobStore createBlobStore() { S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY), S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ); } @@ -509,7 +528,7 @@ public InputStreamContainer apply(Integer partNo, Long size, Long position) thro } }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); } - }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { + }, blobSize, false, WritePriority.NORMAL, uploadSuccess -> { assertTrue(uploadSuccess); if (throwExceptionOnFinalizeUpload) { throw new RuntimeException(); @@ -539,4 +558,115 @@ private long calculateLastPartSize(long totalSize, long partSize) { private int calculateNumberOfParts(long contentLength, long partSize) { return (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); } + + public void testFailureWhenLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(true); + } + + public void testLargeFileRedirected() throws IOException, ExecutionException, InterruptedException { + testLargeFilesRedirectedToSlowSyncClient(false); + } + + private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException) throws IOException, InterruptedException { + final ByteSizeValue partSize = new ByteSizeValue(1024, ByteSizeUnit.MB); + + int numberOfParts = 20; + final long lastPartSize = new ByteSizeValue(20, ByteSizeUnit.MB).getBytes(); + final long blobSize = ((numberOfParts - 1) * partSize.getBytes()) + lastPartSize; + CountDownLatch countDownLatch = new CountDownLatch(1); + AtomicReference<Exception> exceptionRef = new AtomicReference<>(); + ActionListener<Void> completionListener = ActionListener.wrap(resp -> { countDownLatch.countDown(); }, ex -> { + exceptionRef.set(ex); + countDownLatch.countDown(); + }); + + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final BlobPath blobPath = new BlobPath(); + if (randomBoolean()) { + IntStream.of(randomIntBetween(1, 5)).forEach(value -> blobPath.add("path_" + value)); + } + + final long bufferSize = ByteSizeUnit.MB.toBytes(randomIntBetween(5, 1024)); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.bufferSizeInBytes()).thenReturn(bufferSize); + + final boolean serverSideEncryption = randomBoolean(); + when(blobStore.serverSideEncryption()).thenReturn(serverSideEncryption); + + final StorageClass storageClass = randomFrom(StorageClass.values()); + when(blobStore.getStorageClass()).thenReturn(storageClass); + when(blobStore.isRedirectLargeUploads()).thenReturn(true); + + final ObjectCannedACL cannedAccessControlList = randomBoolean() ? randomFrom(ObjectCannedACL.values()) : null; + if (cannedAccessControlList != null) { + when(blobStore.getCannedACL()).thenReturn(cannedAccessControlList); + } + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = Mockito.spy(new AmazonS3Reference(client)); + doNothing().when(clientReference).close(); + when(blobStore.clientReference()).thenReturn(clientReference); + final CreateMultipartUploadResponse createMultipartUploadResponse = CreateMultipartUploadResponse.builder() + .uploadId(randomAlphaOfLength(10)) + .build(); + when(client.createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(createMultipartUploadResponse); + if (expectException) { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenThrow( + SdkException.create("Expected upload part request to fail", new RuntimeException()) + ); + } else { + when(client.uploadPart(any(UploadPartRequest.class), any(RequestBody.class))).thenReturn(UploadPartResponse.builder().build()); + } + + // Fail the completion request + when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn( + CompleteMultipartUploadResponse.builder().build() + ); + when(client.abortMultipartUpload(any(AbortMultipartUploadRequest.class))).thenReturn( + AbortMultipartUploadResponse.builder().build() + ); + + List<InputStream> openInputStreams = new ArrayList<>(); + final S3BlobContainer s3BlobContainer = Mockito.spy(new S3BlobContainer(blobPath, blobStore)); + s3BlobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { + @Override + public StreamContext supplyStreamContext(long partSize) { + return new StreamContext(new CheckedTriFunction<Integer, Long, Long, InputStreamContainer, IOException>() { + @Override + public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + } + }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); + } + }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { assertTrue(uploadSuccess); }, false, null), completionListener); + + assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); + if (expectException) { + assertNotNull(exceptionRef.get()); + } else { + assertNull(exceptionRef.get()); + } + verify(s3BlobContainer, times(1)).executeMultipartUpload(any(S3BlobStore.class), anyString(), any(InputStream.class), anyLong()); + + if (expectException) { + verify(client, times(1)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } else { + verify(client, times(0)).abortMultipartUpload(any(AbortMultipartUploadRequest.class)); + } + + openInputStreams.forEach(inputStream -> { + try { + inputStream.close(); + } catch (IOException ex) { + logger.error("Error closing input stream"); + } + }); + } + } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index 1a1fb123aa5ea..ceab06bd051e9 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -31,24 +31,23 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.core.io.SdkDigestInputStream; +import software.amazon.awssdk.utils.internal.Base16; + import org.apache.http.HttpStatus; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.Nullable; import org.opensearch.common.StreamContext; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.StreamContextSupplier; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.hash.MessageDigests; import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.io.Streams; @@ -57,19 +56,21 @@ import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.opensearch.repositories.blobstore.ZeroInputStream; import org.opensearch.repositories.s3.async.AsyncExecutorContainer; -import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.repositories.s3.async.AsyncTransferEventLoopGroup; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.core.io.SdkDigestInputStream; -import software.amazon.awssdk.utils.internal.Base16; +import org.opensearch.repositories.s3.async.AsyncTransferManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; import java.io.ByteArrayInputStream; import java.io.FilterInputStream; @@ -89,16 +90,17 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.opensearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; +import static org.opensearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; +import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; +import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; +import static org.opensearch.repositories.s3.S3ClientSettings.REGION; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.opensearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; -import static org.opensearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; -import static org.opensearch.repositories.s3.S3ClientSettings.REGION; -import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; -import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; /** * This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs. @@ -160,7 +162,7 @@ protected Class<? extends Exception> unresponsiveExceptionType() { } @Override - protected VerifyingMultiStreamBlobContainer createBlobContainer( + protected AsyncMultiStreamBlobContainer createBlobContainer( final @Nullable Integer maxRetries, final @Nullable TimeValue readTimeout, final @Nullable Boolean disableChunkedEncoding, @@ -214,13 +216,16 @@ protected VerifyingMultiStreamBlobContainer createBlobContainer( bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ) ) { @@ -319,7 +324,7 @@ public void testWriteBlobByStreamsWithRetries() throws Exception { } }); - final VerifyingMultiStreamBlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); + final AsyncMultiStreamBlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); List<InputStream> openInputStreams = new ArrayList<>(); CountDownLatch countDownLatch = new CountDownLatch(1); AtomicReference<Exception> exceptionRef = new AtomicReference<>(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index a2a7ca8d8bdd5..2b45e9cfe2d4b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -32,21 +32,15 @@ package org.opensearch.repositories.s3; -import org.mockito.ArgumentCaptor; -import org.opensearch.action.ActionListener; -import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobMetadata; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.BlobStoreException; -import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.test.OpenSearchTestCase; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.Checksum; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; @@ -54,6 +48,11 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; @@ -71,6 +70,19 @@ import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStoreException; +import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.test.OpenSearchTestCase; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -85,18 +97,24 @@ import java.util.NoSuchElementException; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.mock; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class S3BlobStoreContainerTests extends OpenSearchTestCase { @@ -258,10 +276,12 @@ public void testDelete() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); final S3Client client = mock(S3Client.class); doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); @@ -279,8 +299,11 @@ public void testDelete() throws IOException { when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); final List<String> keysDeleted = new ArrayList<>(); + AtomicInteger deleteCount = new AtomicInteger(); doAnswer(invocation -> { DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); + deleteCount.getAndIncrement(); + logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); return DeleteObjectsResponse.builder().build(); }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); @@ -293,6 +316,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); + // keysDeleted will have blobPath also + assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); keysDeleted.remove(blobPath.buildAsString()); assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } @@ -891,6 +916,15 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanPageSize() testListBlobsByPrefixInLexicographicOrder(2, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + /** + * Test the boundary value at page size to ensure + * unnecessary calls are not made to S3 by fetching the next page. + * @throws IOException + */ + public void testListBlobsByPrefixInLexicographicOrderWithLimitEqualToPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(5, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSize() throws IOException { testListBlobsByPrefixInLexicographicOrder(8, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } @@ -898,4 +932,418 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSiz public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + + public void testReadBlobAsyncMultiPart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + final int partSize = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture<GetObjectAttributesResponse> getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectPartResponse(s3AsyncClient, bucketName, blobName, objectPartCount, partSize, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener<ReadContext> readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener<ReadContext> listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(objectPartCount, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + for (int partNumber = 1; partNumber < objectPartCount; partNumber++) { + InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber).get().join(); + final int offset = partNumber * partSize; + assertEquals(partSize, inputStreamContainer.getContentLength()); + assertEquals(offset, inputStreamContainer.getOffset()); + assertEquals(partSize, inputStreamContainer.getInputStream().readAllBytes().length); + } + } + + public void testReadBlobAsyncSinglePart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final int objectSize = 100; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture<GetObjectAttributesResponse> getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize((long) objectSize) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener<ReadContext> readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener<ReadContext> listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(1, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get().get().join(); + assertEquals(objectSize, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); + + } + + public void testReadBlobAsyncFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture<GetObjectAttributesResponse> getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenThrow(new RuntimeException()); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener<ReadContext> readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener<ReadContext> listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + + public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture<GetObjectAttributesResponse> getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().build()) + .objectSize(null) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener<ReadContext> readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener<ReadContext> listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + + public void testGetBlobMetadata() throws Exception { + final String checksum = randomAlphaOfLengthBetween(1, 10); + final long objectSize = 100L; + final int objectPartCount = 10; + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CompletableFuture<GetObjectAttributesResponse> getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CompletableFuture<GetObjectAttributesResponse> responseFuture = blobContainer.getBlobMetadata(s3AsyncClient, bucketName, blobName); + GetObjectAttributesResponse objectAttributesResponse = responseFuture.get(); + + assertEquals(checksum, objectAttributesResponse.checksum().checksumCRC32()); + assertEquals(Long.valueOf(objectSize), objectAttributesResponse.objectSize()); + assertEquals(Integer.valueOf(objectPartCount), objectAttributesResponse.objectParts().totalPartsCount()); + } + + public void testGetBlobPartInputStream() throws Exception { + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final long contentLength = 10L; + final String contentRange = "bytes 10-20/100"; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).contentRange(contentRange).build(); + + CompletableFuture<ResponseInputStream<GetObjectResponse>> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream<GetObjectResponse> responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + when( + s3AsyncClient.getObject( + any(GetObjectRequest.class), + ArgumentMatchers.<AsyncResponseTransformer<GetObjectResponse, ResponseInputStream<GetObjectResponse>>>any() + ) + ).thenReturn(getObjectPartResponse); + + // Header based offset in case of a multi part object request + InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) + .get(); + + assertEquals(10, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + + // 0 offset in case of a single part object request + inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get(); + + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + public void testTransformResponseToInputStreamContainer() throws Exception { + final String contentRange = "bytes 0-10/100"; + final long contentLength = 10L; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + + // Exception when content range absent for multipart object + ResponseInputStream<GetObjectResponse> responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true)); + + // No exception when content range absent for single part object + ResponseInputStream<GetObjectResponse> responseInputStreamNoRangeSinglePart = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer( + responseInputStreamNoRangeSinglePart, + false + ); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + + // Exception when length is absent + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); + ResponseInputStream<GetObjectResponse> responseInputStreamNoContentLength = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + assertThrows( + SdkException.class, + () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true) + ); + + // No exception when range and length both are present + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); + ResponseInputStream<GetObjectResponse> responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { + + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build(); + + CompletableFuture<ResponseInputStream<GetObjectResponse>> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream<GetObjectResponse> responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.<AsyncResponseTransformer<GetObjectResponse, ResponseInputStream<GetObjectResponse>>>any() + ) + ).thenReturn(getObjectPartResponse); + + } + + private void mockObjectPartResponse( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobName, + int totalNumberOfParts, + int partSize, + long objectSize + ) { + for (int partNumber = 1; partNumber <= totalNumberOfParts; partNumber++) { + final int start = (partNumber - 1) * partSize; + final int end = partNumber * partSize; + final String contentRange = "bytes " + start + "-" + end + "/" + objectSize; + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(partSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder() + .contentLength((long) partSize) + .contentRange(contentRange) + .build(); + + CompletableFuture<ResponseInputStream<GetObjectResponse>> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream<GetObjectResponse> responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).partNumber(partNumber).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.<AsyncResponseTransformer<GetObjectResponse, ResponseInputStream<GetObjectResponse>>>any() + ) + ).thenReturn(getObjectPartResponse); + } + } + + private static class CountingCompletionListener<T> implements ActionListener<T> { + private int responseCount; + private int failureCount; + private T response; + private Exception exception; + + @Override + public void onResponse(T response) { + this.response = response; + responseCount++; + } + + @Override + public void onFailure(Exception e) { + exception = e; + failureCount++; + } + + public int getResponseCount() { + return responseCount; + } + + public int getFailureCount() { + return failureCount; + } + + public T getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 1edf8d53c1e73..b47749553aeba 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -32,11 +32,6 @@ package org.opensearch.repositories.s3; -import org.opensearch.common.settings.MockSecureSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.repositories.s3.utils.AwsRequestSigner; -import org.opensearch.repositories.s3.utils.Protocol; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; @@ -45,6 +40,12 @@ import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.repositories.s3.utils.AwsRequestSigner; +import org.opensearch.repositories.s3.utils.Protocol; + import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Locale; @@ -69,10 +70,12 @@ public void testThereIsADefaultClientByDefault() { assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); assertThat(defaultSettings.proxySettings, is(ProxySettings.NO_PROXY_SETTINGS)); assertThat(defaultSettings.readTimeoutMillis, is(50 * 1000)); - assertThat(defaultSettings.requestTimeoutMillis, is(120 * 1000)); + assertThat(defaultSettings.requestTimeoutMillis, is(5 * 60 * 1000)); assertThat(defaultSettings.connectionTimeoutMillis, is(10 * 1000)); assertThat(defaultSettings.connectionTTLMillis, is(5 * 1000)); - assertThat(defaultSettings.maxConnections, is(100)); + assertThat(defaultSettings.maxConnections, is(500)); + assertThat(defaultSettings.maxSyncConnections, is(500)); + assertThat(defaultSettings.connectionAcquisitionTimeoutMillis, is(15 * 60 * 1000)); assertThat(defaultSettings.maxRetries, is(3)); assertThat(defaultSettings.throttleRetries, is(true)); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index 84d56c7ae2854..6fec535ae6301 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -32,20 +32,24 @@ package org.opensearch.repositories.s3; -import org.hamcrest.Matchers; +import software.amazon.awssdk.services.s3.S3Client; + import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; -import software.amazon.awssdk.services.s3.S3Client; +import org.hamcrest.Matchers; import java.nio.file.Path; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -121,7 +125,8 @@ public void testBasePathSetting() { } public void testDefaultBufferSize() { - final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + Settings settings = Settings.builder().build(); + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", settings); try (S3Repository s3repo = createS3Repo(metadata)) { assertThat(s3repo.getBlobStore(), is(nullValue())); s3repo.start(); @@ -132,6 +137,26 @@ public void testDefaultBufferSize() { } } + public void testIsReloadable() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + assertTrue(s3repo.isReloadable()); + } + } + + public void testRestrictedSettingsDefault() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + List<Setting<?>> restrictedSettings = s3repo.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(S3Repository.BUCKET_SETTING)); + assertTrue(restrictedSettings.contains(S3Repository.BASE_PATH_SETTING)); + } + } + private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, @@ -143,6 +168,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { null, null, null, + null, false ) { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java index a62035bde8307..b38d5119b4108 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java @@ -32,14 +32,14 @@ package org.opensearch.repositories.s3; -import org.opensearch.common.io.Streams; -import org.opensearch.repositories.s3.utils.HttpRangeUtils; -import org.opensearch.test.OpenSearchTestCase; import software.amazon.awssdk.core.ResponseInputStream; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.GetObjectRequest; import software.amazon.awssdk.services.s3.model.GetObjectResponse; +import org.opensearch.common.io.Streams; +import org.opensearch.test.OpenSearchTestCase; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.Arrays; @@ -103,11 +103,11 @@ public void testRangeInputStreamIsAborted() throws IOException { } private S3RetryingInputStream createInputStream(final byte[] data, final Long start, final Long length) throws IOException { - long end = Math.addExact(start, length - 1); + final long end = Math.addExact(start, length - 1); final S3Client client = mock(S3Client.class); when(client.getObject(any(GetObjectRequest.class))).thenReturn( new ResponseInputStream<>( - GetObjectResponse.builder().contentLength(length).contentRange(HttpRangeUtils.toHttpRangeHeader(start, end)).build(), + GetObjectResponse.builder().contentLength(length).build(), new ByteArrayInputStream(data, Math.toIntExact(start), Math.toIntExact(length)) ) ); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 596291a1d94fb..b753b847df869 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -8,15 +8,6 @@ package org.opensearch.repositories.s3.async; -import org.junit.Before; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.StreamContext; -import org.opensearch.common.blobstore.exception.CorruptFileException; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.repositories.blobstore.ZeroInputStream; -import org.opensearch.test.OpenSearchTestCase; import software.amazon.awssdk.awscore.exception.AwsErrorDetails; import software.amazon.awssdk.core.async.AsyncRequestBody; import software.amazon.awssdk.http.HttpStatusCode; @@ -35,9 +26,25 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.exception.CorruptFileException; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.repositories.blobstore.ZeroInputStream; +import org.opensearch.repositories.s3.StatsMetricPublisher; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -57,6 +64,7 @@ public void setUp() throws Exception { asyncTransferManager = new AsyncTransferManager( ByteSizeUnit.MB.toBytes(5), Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor() ); super.setUp(); @@ -69,17 +77,17 @@ public void testOneChunkUpload() { putObjectResponseCompletableFuture ); + AtomicReference<InputStream> streamRef = new AtomicReference<>(); CompletableFuture<Void> resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 1 - ) + }, false, null, true), + new StreamContext((partIdx, partSize, position) -> { + streamRef.set(new ZeroInputStream(partSize)); + return new InputStreamContainer(streamRef.get(), partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1), + new StatsMetricPublisher() ); try { @@ -89,6 +97,14 @@ public void testOneChunkUpload() { } verify(s3AsyncClient, times(1)).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); + + boolean closeError = false; + try { + streamRef.get().available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); } public void testOneChunkUploadCorruption() { @@ -111,13 +127,14 @@ public void testOneChunkUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, false, null), + }, false, null, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -158,17 +175,18 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + List<InputStream> streams = new ArrayList<>(); CompletableFuture<Void> resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 3376132981L), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 5 - ) + }, true, 3376132981L, true), + new StreamContext((partIdx, partSize, position) -> { + InputStream stream = new ZeroInputStream(partSize); + streams.add(stream); + return new InputStreamContainer(stream, partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5), + new StatsMetricPublisher() ); try { @@ -177,6 +195,16 @@ public void testMultipartUpload() { fail("did not expect resultFuture to fail"); } + streams.forEach(stream -> { + boolean closeError = false; + try { + stream.available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); + }); + verify(s3AsyncClient, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); verify(s3AsyncClient, times(5)).uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class)); verify(s3AsyncClient, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -212,13 +240,14 @@ public void testMultipartUploadCorruption() { s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing - }, true, 0L), + }, true, 0L, true), new StreamContext( (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java new file mode 100644 index 0000000000000..9a4267c5266e5 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3.utils; + +import software.amazon.awssdk.core.exception.SdkException; + +import org.opensearch.test.OpenSearchTestCase; + +public final class HttpRangeUtilsTests extends OpenSearchTestCase { + + public void testFromHttpRangeHeader() { + String headerValue = "bytes 0-10/200"; + Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + headerValue = "bytes 0-10/*"; + offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + final String invalidHeaderValue = "bytes */*"; + assertThrows(SdkException.class, () -> HttpRangeUtils.getStartOffsetFromRangeHeader(invalidHeaderValue)); + } +} diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java index 547f2f36a7643..e1655cc5e0784 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java @@ -32,11 +32,12 @@ package org.opensearch.index.store; -import java.io.IOException; -import java.nio.file.Path; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; +import java.io.IOException; +import java.nio.file.Path; + public class SmbMMapDirectoryTests extends OpenSearchBaseDirectoryTestCase { @Override @@ -46,9 +47,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java index 7390759029dfc..6f821147c3079 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java @@ -26,9 +26,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 2c275388cce38..735cbd92b691a 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -11,10 +11,12 @@ import org.opensearch.gradle.Architecture import org.opensearch.gradle.OS import org.opensearch.gradle.info.BuildParams +apply plugin: 'opensearch.internal-cluster-test' + opensearchplugin { description 'Opentelemetry based telemetry implementation.' classname 'org.opensearch.telemetry.OTelTelemetryPlugin' - hasClientJar = true + hasClientJar = false } dependencies { @@ -26,9 +28,17 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" - api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}-alpha" - api "io.opentelemetry:opentelemetry-api-logs:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" + api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp-common:${versions.opentelemetry}" + runtimeOnly "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" + runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0" + runtimeOnly "com.squareup.okio:okio-jvm:3.5.0" + runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-extension-incubator:${versions.opentelemetry}-alpha" + testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}" } @@ -42,16 +52,43 @@ thirdPartyAudit { ) ignoreMissingClasses( + 'android.net.http.X509TrustManagerExtensions', + 'android.net.ssl.SSLSockets', + 'android.os.Build$VERSION', + 'android.security.NetworkSecurityPolicy', + 'android.util.Log', + 'com.google.common.io.ByteStreams', + 'com.google.common.util.concurrent.ListenableFuture', + 'io.grpc.CallOptions', + 'io.grpc.Channel', + 'io.grpc.Drainable', + 'io.grpc.KnownLength', + 'io.grpc.ManagedChannel', + 'io.grpc.MethodDescriptor', + 'io.grpc.MethodDescriptor$Builder', + 'io.grpc.MethodDescriptor$Marshaller', + 'io.grpc.MethodDescriptor$MethodType', + 'io.grpc.stub.AbstractFutureStub', + 'io.grpc.stub.AbstractStub', + 'io.grpc.stub.ClientCalls', + 'org.bouncycastle.jsse.BCSSLParameters', + 'org.bouncycastle.jsse.BCSSLSocket', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.Conscrypt$Version', + 'org.conscrypt.ConscryptHostnameVerifier', + 'org.openjsse.javax.net.ssl.SSLParameters', + 'org.openjsse.javax.net.ssl.SSLSocket', + 'io.opentelemetry.api.events.EventBuilder', 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder', - 'io.opentelemetry.extension.incubator.metrics.HistogramAdviceConfigurer', 'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties', 'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', + 'kotlin.io.path.PathsKt', + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' ) } diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties index 544f42bd5513b..8dec1119eec66 100644 --- a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties +++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties @@ -25,3 +25,23 @@ logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter logger.exporter.level = INFO logger.exporter.appenderRef.tracing.ref = tracing logger.exporter.additivity = false + + +appender.metrics.type = RollingFile +appender.metrics.name = metrics +appender.metrics.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics.log +appender.metrics.filePermissions = rw-r----- +appender.metrics.layout.type = PatternLayout +appender.metrics.layout.pattern = %m%n +appender.metrics.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics-%i.log.gz +appender.metrics.policies.type = Policies +appender.metrics.policies.size.type = SizeBasedTriggeringPolicy +appender.metrics.policies.size.size = 1GB +appender.metrics.strategy.type = DefaultRolloverStrategy +appender.metrics.strategy.max = 4 + + +logger.metrics_exporter.name = io.opentelemetry.exporter.logging.LoggingMetricExporter +logger.metrics_exporter.level = INFO +logger.metrics_exporter.appenderRef.tracing.ref = metrics +logger.metrics_exporter.additivity = false diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 new file mode 100644 index 0000000000000..4d119fbf4df70 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 @@ -0,0 +1 @@ +d2abf9e77736acc4450dc4a3f707fa2c10f5099d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/grpc-context-NOTICE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/grpc-context-NOTICE.txt rename to plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt diff --git a/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 new file mode 100644 index 0000000000000..1fc0db6615cb5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 @@ -0,0 +1 @@ +436932d695b2c43f2c86b8111c596179cd133d56 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-s3/licenses/reactive-streams-NOTICE.txt b/plugins/telemetry-otel/licenses/okhttp-NOTICE.txt similarity index 100% rename from plugins/repository-s3/licenses/reactive-streams-NOTICE.txt rename to plugins/telemetry-otel/licenses/okhttp-NOTICE.txt diff --git a/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 new file mode 100644 index 0000000000000..7b19d32d872fa --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 @@ -0,0 +1 @@ +d6a0bc7343210eff7dd5cfdd6eb9b5f0036638ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt b/plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt similarity index 100% rename from plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt rename to plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 deleted file mode 100644 index da3abcc8f70d2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ee1ccca95155e4640094ba8dfbd0bb8c1709c83 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..b577500d71e1d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 @@ -0,0 +1 @@ +59470f4aa3a9207f21936461b8fdcb36d46455ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 2c233d785dcb2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b0b6c1a20da0f841634d4f736e331aa4871a4db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 deleted file mode 100644 index 01d9fd732249b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -42991f523a7a10761213e2f11633c67c8beaed88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d3156577248d5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8850bc4c65d0fd22ff987b4683206ec4e69f2689 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..f176b21d12dc4 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +8d1cb823ab18fa871a1549e7c522bf28f2b3d8fe \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/protobuf-java-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt similarity index 100% rename from protobuf-java-NOTICE.txt rename to plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 deleted file mode 100644 index ef07e4cb81e34..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b932170774da5e766440fa058d879f68fe2c5dd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..cd25e0ab9f294 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 @@ -0,0 +1 @@ +bc045cae89ff6f18071760f6e4659dd880e88a1b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..fabb394f9c2e0 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +5ee49902ba884d6c3e48499a9311a624396d9630 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..378ba4d43dcd1 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +2706e3b883d2bcd1a6b3e0bb4118ffbd7820550b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a3d7e15e1a624 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 @@ -0,0 +1 @@ +dcc924787b559278697b74dbc5bb6d046b236ef6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt new file mode 100644 index 0000000000000..6b0b1270ff0ca --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..71ab3e184db9e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 @@ -0,0 +1 @@ +d58f7c669e371f6ff61b705770af9a3c1f31df52 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 deleted file mode 100644 index dc9946de3b160..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79a86f258ede8625627e8fbdff07d1149c88a8e6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c9a75d1b4350a --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4056d1b562b4da7720817d8af15d1d3ccdf4b776 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 deleted file mode 100644 index 2bd3e60a1faf6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b42359d2232f8d802d55153be5330b1d9e21ee15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..c31584f59c0d8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 @@ -0,0 +1 @@ +11d6f8c7b029efcb5c6c449cadef155b781afb78 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 90bb8202c4c9d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8abeaee240291cce9067f07569f151d11a6275a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..a134bb06ec635 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 @@ -0,0 +1 @@ +98e94479db1e68c4779efc44bf6b4fca83e98b54 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 deleted file mode 100644 index 62396a603423f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c4af22d7d92a3a79714be3f79724b0ab774ba9e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..d146241f52f29 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 @@ -0,0 +1 @@ +4f8f5d30c3eeede7b2260d979d9f403cfa381c3d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 deleted file mode 100644 index 0fcebee353105..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcc5785b2cf2be897f31b927e24b53e46e377388 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 new file mode 100644 index 0000000000000..802761e38846c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 @@ -0,0 +1 @@ +e3068cbaedfac6a28c6483923982b2efb861d3f4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 new file mode 100644 index 0000000000000..e730c83af905e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 @@ -0,0 +1 @@ +218e361772670212a46be5940010222d68e66f2a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 47c7ece8c9f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f4f963673f8209208f868666cd43e79b9a2dd15 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java new file mode 100644 index 0000000000000..45caf8bf5f60b --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +import io.opentelemetry.api.GlobalOpenTelemetry; + +/** + * Telemetry plugin used for Integration tests. +*/ +public class IntegrationTestOTelTelemetryPlugin extends OTelTelemetryPlugin { + /** + * Creates IntegrationTestOTelTelemetryPlugin + * @param settings cluster settings + */ + public IntegrationTestOTelTelemetryPlugin(Settings settings) { + super(settings); + } + + /** + * This method overrides getTelemetry() method in OTel plugin class, so we create only one instance of global OpenTelemetry + * resetForTest() will set OpenTelemetry to null again. + * @param telemetrySettings telemetry settings + */ + public Optional<Telemetry> getTelemetry(TelemetrySettings telemetrySettings) { + GlobalOpenTelemetry.resetForTest(); + return super.getTelemetry(telemetrySettings); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java new file mode 100644 index 0000000000000..74fc872cb30e3 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.util.Collection; +import java.util.List; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; + +public class InMemorySingletonMetricsExporter implements MetricExporter { + + public static final InMemorySingletonMetricsExporter INSTANCE = new InMemorySingletonMetricsExporter(InMemoryMetricExporter.create()); + + private static InMemoryMetricExporter delegate; + + public static InMemorySingletonMetricsExporter create() { + return INSTANCE; + } + + private InMemorySingletonMetricsExporter(InMemoryMetricExporter delegate) { + InMemorySingletonMetricsExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection<MetricData> metrics) { + return delegate.export(metrics); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List<MetricData> getFinishedMetricItems() { + return delegate.getFinishedMetricItems(); + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java new file mode 100644 index 0000000000000..e77e69d121036 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 1) +public class TelemetryMetricsDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenMetricsDisabled() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "1"); + + Thread.sleep(2000); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(counter instanceof NoopCounter); + assertTrue(histogram instanceof NoopHistogram); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java new file mode 100644 index 0000000000000..1b8f694709a9c --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableExponentialHistogramPointData; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) +public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), true) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testCounter() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(1.0, value, 0.0); + } + + public void testUpDownCounter() throws Exception { + + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createUpDownCounter("test-up-down-counter", "test", "1"); + counter.add(1.0); + counter.add(-2.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-up-down-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(-1.0, value, 0.0); + } + + public void testHistogram() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Histogram histogram = metricsRegistry.createHistogram("test-histogram", "test", "ms"); + histogram.record(2.0); + histogram.record(1.0); + histogram.record(3.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + ImmutableExponentialHistogramPointData histogramPointData = ((ImmutableExponentialHistogramPointData) ((ArrayList) exporter + .getFinishedMetricItems() + .stream() + .filter(a -> a.getName().contains("test-histogram")) + .collect(Collectors.toList()) + .get(0) + .getExponentialHistogramData() + .getPoints()).get(0)); + assertEquals(1.0, histogramPointData.getSum(), 6.0); + assertEquals(1.0, histogramPointData.getMax(), 3.0); + assertEquals(1.0, histogramPointData.getMin(), 1.0); + } + + @After + public void reset() { + InMemorySingletonMetricsExporter.INSTANCE.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java new file mode 100644 index 0000000000000..6dd451ea37465 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.telemetry.tracing.MockSpanData; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; + +public class InMemorySingletonSpanExporter implements SpanExporter { + + public static final InMemorySingletonSpanExporter INSTANCE = new InMemorySingletonSpanExporter(InMemorySpanExporter.create()); + + private static InMemorySpanExporter delegate; + + public static InMemorySingletonSpanExporter create() { + return INSTANCE; + } + + private InMemorySingletonSpanExporter(InMemorySpanExporter delegate) { + InMemorySingletonSpanExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection<SpanData> spans) { + return delegate.export(spans); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List<MockSpanData> getFinishedSpanItems() { + return convertSpanDataListToMockSpanDataList(delegate.getFinishedSpanItems()); + } + + private List<MockSpanData> convertSpanDataListToMockSpanDataList(List<SpanData> spanDataList) { + List<MockSpanData> mockSpanDataList = spanDataList.stream() + .map( + spanData -> new MockSpanData( + spanData.getSpanId(), + spanData.getParentSpanId(), + spanData.getTraceId(), + spanData.getStartEpochNanos(), + spanData.getEndEpochNanos(), + spanData.hasEnded(), + spanData.getName(), + getAttributes(spanData) + ) + ) + .collect(Collectors.toList()); + return mockSpanDataList; + } + + private Map<String, Object> getAttributes(SpanData spanData) { + if (spanData.getAttributes() != null) { + return spanData.getAttributes() + .asMap() + .entrySet() + .stream() + .collect(Collectors.toMap(e -> e.getKey().getKey(), e -> e.getValue())); + } else { + return Collections.emptyMap(); + } + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java new file mode 100644 index 0000000000000..45ed140e1be94 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, supportsDedicatedMasters = false, minNumDataNodes = 2) +public class TelemetryTracerDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.tracing.InMemorySingletonSpanExporter" + ) + .put(OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityCheckWhenTracingDisabled() throws Exception { + Client client = client(); + // DISABLE TRACING + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false)) + .get(); + + // Create Index and ingest data + String indexName = "test-index-11"; + Settings basicSettings = Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 1).build(); + createIndex(indexName, basicSettings); + indexRandom(true, client.prepareIndex(indexName).setId("1").setSource("field1", "t`")); + + ensureGreen(); + refresh(); + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; + exporter.reset(); + + // Make the search call; + client.prepareSearch().setQuery(queryStringQuery("fox")).get(); + + // Sleep for about 3s to wait for traces are published (the delay is 1s) + Thread.sleep(3000); + + assertTrue(exporter.getFinishedSpanItems().isEmpty()); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java new file mode 100644 index 0000000000000..156dc344d1ae2 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.telemetry.tracing.TelemetryValidators; +import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; +import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; +import org.opensearch.test.telemetry.tracing.validators.NumberOfTraceIDsEqualToRequests; +import org.opensearch.test.telemetry.tracing.validators.TotalRootSpansEqualToRequests; + +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) +public class TelemetryTracerEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.tracing.InMemorySingletonSpanExporter" + ) + .put(OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(TelemetrySettings.TRACER_SAMPLER_PROBABILITY.getKey(), 1.0d) + .build(); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenTracingEnabled() throws Exception { + Client client = internalCluster().clusterManagerClient(); + // ENABLE TRACING + updateTelemetrySetting(client, true); + + // Create Index and ingest data + String indexName = "test-index-11"; + Settings basicSettings = Settings.builder() + .put("number_of_shards", 2) + .put("number_of_replicas", 0) + .put("index.routing.allocation.total_shards_per_node", 1) + .build(); + createIndex(indexName, basicSettings); + + indexRandom(false, client.prepareIndex(indexName).setId("1").setSource("field1", "the fox jumps in the well")); + indexRandom(false, client.prepareIndex(indexName).setId("2").setSource("field2", "another fox did the same.")); + + ensureGreen(); + refresh(); + + // Make the search calls; adding the searchType and PreFilterShardSize to make the query path predictable across all the runs. + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("fox")).get(); + client.prepareSearch().setSearchType("dfs_query_then_fetch").setPreFilterShardSize(2).setQuery(queryStringQuery("jumps")).get(); + + // Sleep for about 3s to wait for traces are published, delay is (the delay is 1s). + Thread.sleep(3000); + + TelemetryValidators validators = new TelemetryValidators( + Arrays.asList( + new AllSpansAreEndedProperly(), + new AllSpansHaveUniqueId(), + new NumberOfTraceIDsEqualToRequests(Attributes.create().addAttribute("action", "indices:data/read/search[phase/query]")), + new TotalRootSpansEqualToRequests() + ) + ); + + // See please https://github.com/opensearch-project/OpenSearch/issues/10291 till local transport is not instrumented, + // capturing only the inter-nodes transport actions. + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; + validators.validate(exporter.getFinishedSpanItems(), 4); + } + + private static void updateTelemetrySetting(Client client, boolean value) { + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), value)) + .get(); + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java new file mode 100644 index 0000000000000..98d265e92ba3c --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.Locale; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; + +/** + * Converts {@link org.opensearch.telemetry.tracing.attributes.Attributes} to OTel {@link Attributes} + */ +public final class OTelAttributesConverter { + + /** + * Constructor. + */ + private OTelAttributesConverter() {} + + /** + * Attribute converter. + * @param attributes attributes + * @return otel attributes. + */ + public static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) { + AttributesBuilder attributesBuilder = Attributes.builder(); + if (attributes != null) { + attributes.getAttributesMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); + } + return attributesBuilder.build(); + } + + private static void addSpanAttribute(String key, Object value, AttributesBuilder attributesBuilder) { + if (value instanceof Boolean) { + attributesBuilder.put(key, (Boolean) value); + } else if (value instanceof Long) { + attributesBuilder.put(key, (Long) value); + } else if (value instanceof Double) { + attributesBuilder.put(key, (Double) value); + } else if (value instanceof String) { + attributesBuilder.put(key, (String) value); + } else { + throw new IllegalArgumentException(String.format(Locale.ROOT, "Span attribute value %s type not supported", value)); + } + } + + /** + * Attribute converter. + * @param tags attributes + * @return otel attributes. + */ + public static Attributes convert(Tags tags) { + AttributesBuilder attributesBuilder = Attributes.builder(); + if (tags != null) { + tags.getTagsMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); + } + return attributesBuilder.build(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index a1ca3adf4d2a2..000fd09d43c18 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -8,28 +8,36 @@ package org.opensearch.telemetry; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.OTelResourceProvider; import org.opensearch.telemetry.tracing.OTelTelemetry; -import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import java.util.Arrays; import java.util.List; import java.util.Optional; +import io.opentelemetry.sdk.OpenTelemetrySdk; + /** * Telemetry plugin based on Otel */ public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { + /** + * Instrumentation scope name. + */ + public static final String INSTRUMENTATION_SCOPE_NAME = "org.opensearch.telemetry"; + static final String OTEL_TRACER_NAME = "otel"; private final Settings settings; + private RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry; + /** * Creates Otel plugin * @param settings cluster settings @@ -44,13 +52,25 @@ public List<Setting<?>> getSettings() { OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY ); } @Override - public Optional<Telemetry> getTelemetry(TelemetrySettings settings) { - return Optional.of(telemetry()); + public Optional<Telemetry> getTelemetry(TelemetrySettings telemetrySettings) { + initializeOpenTelemetrySdk(telemetrySettings); + return Optional.of(telemetry(telemetrySettings)); + } + + private void initializeOpenTelemetrySdk(TelemetrySettings telemetrySettings) { + if (refCountedOpenTelemetry != null) { + return; + } + OpenTelemetrySdk openTelemetrySdk = OTelResourceProvider.get(telemetrySettings, settings); + refCountedOpenTelemetry = new RefCountedReleasable<>("openTelemetry", openTelemetrySdk, openTelemetrySdk::close); } @Override @@ -58,9 +78,15 @@ public String getName() { return OTEL_TRACER_NAME; } - private Telemetry telemetry() { - return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(settings)), new MetricsTelemetry() { - }); + private Telemetry telemetry(TelemetrySettings telemetrySettings) { + return new OTelTelemetry(refCountedOpenTelemetry); + } + + @Override + public void close() { + if (refCountedOpenTelemetry != null) { + refCountedOpenTelemetry.close(); + } } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java index 2df13e2cd5612..95ce6918fcb70 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java @@ -8,15 +8,26 @@ package org.opensearch.telemetry; -import io.opentelemetry.exporter.logging.LoggingSpanExporter; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import org.opensearch.SpecialPermission; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler; + +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.List; + +import io.opentelemetry.exporter.logging.LoggingMetricExporter; +import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; /** * OTel specific telemetry settings. @@ -61,7 +72,7 @@ private OTelTelemetrySettings() {} /** * Span Exporter type setting. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "removal" }) public static final Setting<Class<SpanExporter>> OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING = new Setting<>( "telemetry.otel.tracer.span.exporter.class", LoggingSpanExporter.class.getName(), @@ -81,4 +92,64 @@ private OTelTelemetrySettings() {} Setting.Property.NodeScope, Setting.Property.Final ); + + /** + * Metrics Exporter type setting. + */ + @SuppressWarnings({ "unchecked", "removal" }) + public static final Setting<Class<MetricExporter>> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>( + "telemetry.otel.metrics.exporter.class", + LoggingMetricExporter.class.getName(), + className -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + try { + return AccessController.doPrivileged((PrivilegedExceptionAction<Class<MetricExporter>>) () -> { + final ClassLoader loader = OTelMetricsExporterFactory.class.getClassLoader(); + return (Class<MetricExporter>) loader.loadClass(className); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load span exporter class:" + className, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Samplers orders setting. + */ + @SuppressWarnings("unchecked") + public static final Setting<List<Class<Sampler>>> OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS = Setting.listSetting( + "telemetry.otel.tracer.span.sampler.classes", + Arrays.asList(ProbabilisticTransportActionSampler.class.getName(), ProbabilisticSampler.class.getName()), + sampler -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction<Class<Sampler>>) () -> { + final ClassLoader loader = OTelSamplerFactory.class.getClassLoader(); + return (Class<Sampler>) loader.loadClass(sampler); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load sampler class: " + sampler, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Probability of action based sampler + */ + public static final Setting<Double> TRACER_SAMPLER_ACTION_PROBABILITY = Setting.doubleSetting( + "telemetry.tracer.action.sampler.probability", + 0.001d, + 0.000d, + 1.00d, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java new file mode 100644 index 0000000000000..b72f63e027243 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleCounter; + +/** + * OTel Counter + */ +class OTelCounter implements Counter { + + private final DoubleCounter otelDoubleCounter; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelCounter(DoubleCounter otelDoubleCounter) { + this.otelDoubleCounter = otelDoubleCounter; + } + + @Override + public void add(double value) { + otelDoubleCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + otelDoubleCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java new file mode 100644 index 0000000000000..73bb0d8adff62 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelHistogram.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleHistogram; + +/** + * OTel aware implementation {@link Histogram} + */ +class OTelHistogram implements Histogram { + + private final DoubleHistogram otelDoubleHistogram; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelHistogram(DoubleHistogram otelDoubleCounter) { + this.otelDoubleHistogram = otelDoubleCounter; + } + + @Override + public void record(double value) { + otelDoubleHistogram.record(value); + } + + @Override + public void record(double value, Tags tags) { + otelDoubleHistogram.record(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java new file mode 100644 index 0000000000000..82ae2cdd198b2 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; + +import java.io.Closeable; +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; + +/** + * OTel implementation for {@link MetricsTelemetry} + */ +public class OTelMetricsTelemetry<T extends MeterProvider & Closeable> implements MetricsTelemetry { + private final RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry; + private final Meter otelMeter; + private final T meterProvider; + + /** + * Creates OTel based {@link MetricsTelemetry}. + * @param openTelemetry open telemetry. + * @param meterProvider {@link MeterProvider} instance + */ + public OTelMetricsTelemetry(RefCountedReleasable<OpenTelemetrySdk> openTelemetry, T meterProvider) { + this.refCountedOpenTelemetry = openTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.meterProvider = meterProvider; + this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); + } + + @SuppressWarnings("removal") + @Override + public Counter createCounter(String name, String description, String unit) { + DoubleCounter doubleCounter = AccessController.doPrivileged( + (PrivilegedAction<DoubleCounter>) () -> otelMeter.counterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelCounter(doubleCounter); + } + + @SuppressWarnings("removal") + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged( + (PrivilegedAction<DoubleUpDownCounter>) () -> otelMeter.upDownCounterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelUpDownCounter(doubleUpDownCounter); + } + + /** + * Creates the Otel Histogram. In {@link org.opensearch.telemetry.tracing.OTelResourceProvider} + * we can configure the bucketing/aggregation strategy through view. Default startegy configured + * is the {@link io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation}. + * @param name name of the histogram. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return histogram + */ + @Override + public Histogram createHistogram(String name, String description, String unit) { + DoubleHistogram doubleHistogram = AccessController.doPrivileged( + (PrivilegedAction<DoubleHistogram>) () -> otelMeter.histogramBuilder(name).setUnit(unit).setDescription(description).build() + ); + return new OTelHistogram(doubleHistogram); + } + + @Override + public void close() throws IOException { + meterProvider.close(); + refCountedOpenTelemetry.close(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java new file mode 100644 index 0000000000000..2f40881996f7e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleUpDownCounter; + +/** + * OTel Counter + */ +public class OTelUpDownCounter implements Counter { + + private final DoubleUpDownCounter doubleUpDownCounter; + + /** + * Constructor + * @param doubleUpDownCounter delegate counter. + */ + public OTelUpDownCounter(DoubleUpDownCounter doubleUpDownCounter) { + this.doubleUpDownCounter = doubleUpDownCounter; + } + + @Override + public void add(double value) { + doubleUpDownCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + doubleUpDownCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java new file mode 100644 index 0000000000000..9c548044484fd --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +/** + * Factory class to create the {@link MetricExporter} instance. + */ +public class OTelMetricsExporterFactory { + + private static final Logger logger = LogManager.getLogger(OTelMetricsExporterFactory.class); + + /** + * Base constructor. + */ + private OTelMetricsExporterFactory() { + + } + + /** + * Creates the {@link MetricExporter} instances based on the OTEL_METRIC_EXPORTER_CLASS_SETTING value. + * As of now, it expects the MetricExporter implementations to have a create factory method to instantiate the + * MetricExporter. + * @param settings settings. + * @return MetricExporter instance. + */ + public static MetricExporter create(Settings settings) { + Class<MetricExporter> MetricExporterProviderClass = OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.get(settings); + MetricExporter metricExporter = instantiateExporter(MetricExporterProviderClass); + logger.info("Successfully instantiated the Metrics MetricExporter class {}", MetricExporterProviderClass); + return metricExporter; + } + + @SuppressWarnings("removal") + private static MetricExporter instantiateExporter(Class<MetricExporter> exporterProviderClass) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + return AccessController.doPrivileged((PrivilegedExceptionAction<MetricExporter>) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : exporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } + try { + return (MetricExporter) MethodHandles.publicLookup() + .findStatic(exporterProviderClass, methodName, MethodType.methodType(exporterProviderClass)) + .asType(MethodType.methodType(MetricExporter.class)) + .invokeExact(); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create factory method exist in [" + exporterProviderClass.getName() + "]"); + } else { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + e.getCause() + ); + } + } + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + ex.getCause() + ); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java new file mode 100644 index 0000000000000..b48ec3e2336c4 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics.exporter; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..803c159eb201a --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index 4d3605ae03993..475fc09d04bff 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -8,21 +8,33 @@ package org.opensearch.telemetry.tracing; -import io.opentelemetry.api.OpenTelemetry; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; +import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import org.opensearch.telemetry.tracing.sampler.OTelSamplerFactory; +import org.opensearch.telemetry.tracing.sampler.RequestSampler; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.concurrent.TimeUnit; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; import io.opentelemetry.context.propagation.ContextPropagators; import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.metrics.internal.view.Base2ExponentialHistogramAggregation; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; -import org.opensearch.common.settings.Settings; - -import java.util.concurrent.TimeUnit; -import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; @@ -32,19 +44,24 @@ * This class encapsulates all OpenTelemetry related resources */ public final class OTelResourceProvider { + private OTelResourceProvider() {} /** * Creates OpenTelemetry instance with default configuration + * @param telemetrySettings telemetry settings * @param settings cluster settings - * @return OpenTelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(Settings settings) { - return get( - settings, - OTelSpanExporterFactory.create(settings), - ContextPropagators.create(W3CTraceContextPropagator.getInstance()), - Sampler.alwaysOn() + @SuppressWarnings("removal") + public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) { + return AccessController.doPrivileged( + (PrivilegedAction<OpenTelemetrySdk>) () -> get( + settings, + OTelSpanExporterFactory.create(settings), + ContextPropagators.create(W3CTraceContextPropagator.getInstance()), + Sampler.parentBased(new RequestSampler(OTelSamplerFactory.create(telemetrySettings, settings))) + ) ); } @@ -54,17 +71,50 @@ public static OpenTelemetry get(Settings settings) { * @param spanExporter span exporter instance * @param contextPropagators context propagator instance * @param sampler sampler instance - * @return Opentelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) { + public static OpenTelemetrySdk get( + Settings settings, + SpanExporter spanExporter, + ContextPropagators contextPropagators, + Sampler sampler + ) { Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch")); - SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + SdkTracerProvider sdkTracerProvider = createSdkTracerProvider(settings, spanExporter, sampler, resource); + SdkMeterProvider sdkMeterProvider = createSdkMetricProvider(settings, resource); + return OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .setMeterProvider(sdkMeterProvider) + .setPropagators(contextPropagators) + .buildAndRegisterGlobal(); + } + + private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resource resource) { + return SdkMeterProvider.builder() + .setResource(resource) + .registerMetricReader( + PeriodicMetricReader.builder(OTelMetricsExporterFactory.create(settings)) + .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) + .build() + ) + .registerView( + InstrumentSelector.builder().setType(InstrumentType.HISTOGRAM).build(), + View.builder().setAggregation(Base2ExponentialHistogramAggregation.getDefault()).build() + ) + .build(); + } + + private static SdkTracerProvider createSdkTracerProvider( + Settings settings, + SpanExporter spanExporter, + Sampler sampler, + Resource resource + ) { + return SdkTracerProvider.builder() .addSpanProcessor(spanProcessor(settings, spanExporter)) .setResource(resource) .setSampler(sampler) .build(); - - return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal(); } private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java index ba63df4ae47a1..fc917968579e1 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -19,6 +19,12 @@ class OTelSpan extends AbstractSpan { private final Span delegateSpan; + /** + * Constructor + * @param spanName span name + * @param span the delegate span + * @param parentSpan the parent span + */ public OTelSpan(String spanName, Span span, org.opensearch.telemetry.tracing.Span parentSpan) { super(spanName, parentSpan); this.delegateSpan = span; @@ -51,7 +57,9 @@ public void addAttribute(String key, Boolean value) { @Override public void setError(Exception exception) { - delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + if (exception != null) { + delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + } } @Override diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java new file mode 100644 index 0000000000000..4edb837082126 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.SpanKind; + +/** + * Converts {@link org.opensearch.telemetry.tracing.SpanKind} to OTel {@link SpanKind} + */ +final class OTelSpanKindConverter { + + /** + * Constructor. + */ + private OTelSpanKindConverter() {} + + /** + * SpanKind converter. + * @param spanKind span kind. + * @return otel attributes. + */ + static SpanKind convert(org.opensearch.telemetry.tracing.SpanKind spanKind) { + if (spanKind == null) { + return SpanKind.INTERNAL; + } else { + switch (spanKind) { + case CLIENT: + return SpanKind.CLIENT; + case SERVER: + return SpanKind.SERVER; + case INTERNAL: + default: + return SpanKind.INTERNAL; + } + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java index 282fabd43346b..0c697d2cc5e8c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -8,34 +8,39 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; + +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * Otel implementation of Telemetry */ public class OTelTelemetry implements Telemetry { - private final TracingTelemetry tracingTelemetry; - private final MetricsTelemetry metricsTelemetry; + private final RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry; /** * Creates Telemetry instance - * @param tracingTelemetry tracing telemetry - * @param metricsTelemetry metrics telemetry + + */ + /** + * Creates Telemetry instance + * @param refCountedOpenTelemetry open telemetry. */ - public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { - this.tracingTelemetry = tracingTelemetry; - this.metricsTelemetry = metricsTelemetry; + public OTelTelemetry(RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; } @Override public TracingTelemetry getTracingTelemetry() { - return tracingTelemetry; + return new OTelTracingTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkTracerProvider()); } @Override public MetricsTelemetry getMetricsTelemetry() { - return metricsTelemetry; + return new OTelMetricsTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkMeterProvider()); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java index 15609b39b6b94..0fb05a08c27bb 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -8,14 +8,19 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.core.common.Strings; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiConsumer; + import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.context.Context; import io.opentelemetry.context.propagation.TextMapGetter; import io.opentelemetry.context.propagation.TextMapSetter; -import java.util.Map; -import java.util.function.BiConsumer; - /** * Otel implementation of TracingContextPropagator */ @@ -32,8 +37,12 @@ public OTelTracingContextPropagator(OpenTelemetry openTelemetry) { } @Override - public Span extract(Map<String, String> props) { + public Optional<Span> extract(Map<String, String> props) { Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), props, TEXT_MAP_GETTER); + return Optional.ofNullable(getPropagatedSpan(context)); + } + + private static OTelPropagatedSpan getPropagatedSpan(Context context) { if (context != null) { io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.fromContext(context); return new OTelPropagatedSpan(span); @@ -41,6 +50,12 @@ public Span extract(Map<String, String> props) { return null; } + @Override + public Optional<Span> extractFromHeaders(Map<String, Collection<String>> headers) { + Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), headers, HEADER_TEXT_MAP_GETTER); + return Optional.ofNullable(getPropagatedSpan(context)); + } + @Override public void inject(Span currentSpan, BiConsumer<String, String> setter) { openTelemetry.getPropagators().getTextMapPropagator().inject(context((OTelSpan) currentSpan), setter, TEXT_MAP_SETTER); @@ -72,4 +87,23 @@ public String get(Map<String, String> headers, String key) { } }; + private static final TextMapGetter<Map<String, Collection<String>>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { + @Override + public Iterable<String> keys(Map<String, Collection<String>> headers) { + if (headers != null) { + return headers.keySet(); + } else { + return Collections.emptySet(); + } + } + + @Override + public String get(Map<String, Collection<String>> headers, String key) { + if (headers != null && headers.containsKey(key)) { + return Strings.collectionToCommaDelimitedString(headers.get(key)); + } + return null; + } + }; + } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index 8a0034e098461..af39617a8c744 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -8,61 +8,76 @@ package org.opensearch.telemetry.tracing; -import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.context.Context; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; import java.io.IOException; +import io.opentelemetry.api.trace.TracerProvider; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.OpenTelemetrySdk; + /** * OTel based Telemetry provider */ -public class OTelTracingTelemetry implements TracingTelemetry { - - private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class); - - private final OpenTelemetry openTelemetry; +public class OTelTracingTelemetry<T extends TracerProvider & Closeable> implements TracingTelemetry { + private final RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry; + private final T tracerProvider; private final io.opentelemetry.api.trace.Tracer otelTracer; /** - * Creates OTel based Telemetry - * @param openTelemetry OpenTelemetry instance + * Creates OTel based {@link TracingTelemetry} + * @param refCountedOpenTelemetry OpenTelemetry instance + * @param tracerProvider {@link TracerProvider} instance. */ - public OTelTracingTelemetry(OpenTelemetry openTelemetry) { - this.openTelemetry = openTelemetry; - this.otelTracer = openTelemetry.getTracer("os-tracer"); - + public OTelTracingTelemetry(RefCountedReleasable<OpenTelemetrySdk> refCountedOpenTelemetry, T tracerProvider) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.tracerProvider = tracerProvider; + this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @Override - public void close() { - try { - ((Closeable) openTelemetry).close(); - } catch (IOException e) { - logger.warn("Error while closing Opentelemetry", e); - } + public void close() throws IOException { + tracerProvider.close(); + refCountedOpenTelemetry.close(); } @Override - public Span createSpan(String spanName, Span parentSpan) { - return createOtelSpan(spanName, parentSpan); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return createOtelSpan(spanCreationContext, parentSpan); } @Override public TracingContextPropagator getContextPropagator() { - return new OTelTracingContextPropagator(openTelemetry); + return new OTelTracingContextPropagator(refCountedOpenTelemetry.get()); } - private Span createOtelSpan(String spanName, Span parentSpan) { - io.opentelemetry.api.trace.Span otelSpan = otelSpan(spanName, parentSpan); - return new OTelSpan(spanName, otelSpan, parentSpan); + private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + io.opentelemetry.api.trace.Span otelSpan = otelSpan( + spanCreationContext.getSpanName(), + parentSpan, + OTelAttributesConverter.convert(spanCreationContext.getAttributes()), + OTelSpanKindConverter.convert(spanCreationContext.getSpanKind()) + ); + Span newSpan = new OTelSpan(spanCreationContext.getSpanName(), otelSpan, parentSpan); + return newSpan; } - io.opentelemetry.api.trace.Span otelSpan(String spanName, Span parentOTelSpan) { + io.opentelemetry.api.trace.Span otelSpan( + String spanName, + Span parentOTelSpan, + io.opentelemetry.api.common.Attributes attributes, + io.opentelemetry.api.trace.SpanKind spanKind + ) { return parentOTelSpan == null || !(parentOTelSpan instanceof OTelSpan) - ? otelTracer.spanBuilder(spanName).startSpan() - : otelTracer.spanBuilder(spanName).setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())).startSpan(); + ? otelTracer.spanBuilder(spanName).setAllAttributes(attributes).startSpan() + : otelTracer.spanBuilder(spanName) + .setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())) + .setAllAttributes(attributes) + .setSpanKind(spanKind) + .startSpan(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java index c73de4370465f..e9d7e78882c7d 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java @@ -8,18 +8,21 @@ package org.opensearch.telemetry.tracing.exporter; -import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.SpecialPermission; import org.opensearch.common.settings.Settings; import org.opensearch.telemetry.OTelTelemetrySettings; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import io.opentelemetry.sdk.trace.export.SpanExporter; + /** * Factory class to create the {@link SpanExporter} instance. */ @@ -48,14 +51,23 @@ public static SpanExporter create(Settings settings) { return spanExporter; } + @SuppressWarnings("removal") private static SpanExporter instantiateSpanExporter(Class<SpanExporter> spanExporterProviderClass) { try { // Check we ourselves are not being called by unprivileged code. SpecialPermission.check(); return AccessController.doPrivileged((PrivilegedExceptionAction<SpanExporter>) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : spanExporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } try { return (SpanExporter) MethodHandles.publicLookup() - .findStatic(spanExporterProviderClass, "create", MethodType.methodType(spanExporterProviderClass)) + .findStatic(spanExporterProviderClass, methodName, MethodType.methodType(spanExporterProviderClass)) .asType(MethodType.methodType(SpanExporter.class)) .invokeExact(); } catch (Throwable e) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java new file mode 100644 index 0000000000000..b9d5c07a40cd8 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactory.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.ListIterator; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +/** + * Factory class to create the instance of OTelSampler + */ +public class OTelSamplerFactory { + + /** + * Logger instance for logging messages related to the OTelSamplerFactory. + */ + private static final Logger logger = LogManager.getLogger(OTelSamplerFactory.class); + + /** + * Base constructor. + */ + private OTelSamplerFactory() { + + } + + /** + * Creates the {@link Sampler} instances based on the TRACER_SPAN_SAMPLER_CLASSES value. + * + * @param telemetrySettings TelemetrySettings. + * @param settings the settings + * @return list of samplers. + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings) { + List<Class<Sampler>> samplersNameList = OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.get(settings); + ListIterator<Class<Sampler>> li = samplersNameList.listIterator(samplersNameList.size()); + + Sampler fallbackSampler = null; + + // Iterating samplers list in reverse order to create chain of sampler + while (li.hasPrevious()) { + Class<Sampler> samplerName = li.previous(); + fallbackSampler = instantiateSampler(samplerName, telemetrySettings, settings, fallbackSampler); + } + + return fallbackSampler; + } + + private static Sampler instantiateSampler( + Class<Sampler> samplerClassName, + TelemetrySettings telemetrySettings, + Settings settings, + Sampler fallbackSampler + ) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + return AccessController.doPrivileged((PrivilegedExceptionAction<Sampler>) () -> { + try { + // Define the method type which receives TelemetrySettings & Sampler as arguments + MethodType methodType = MethodType.methodType(Sampler.class, TelemetrySettings.class, Settings.class, Sampler.class); + + return (Sampler) MethodHandles.publicLookup() + .findStatic(samplerClassName, "create", methodType) + .invokeExact(telemetrySettings, settings, fallbackSampler); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create method exist in [" + samplerClassName + "]", e.getCause()); + } else { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } + }); + } catch (Exception e) { + throw new IllegalStateException("Sampler instantiation failed for class [" + samplerClassName + "]", e.getCause()); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java new file mode 100644 index 0000000000000..d7fe92b1f3495 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.List; +import java.util.Objects; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +/** + * ProbabilisticSampler implements a probability sampling strategy based on configured sampling ratio. + */ +public class ProbabilisticSampler implements Sampler { + private Sampler defaultSampler; + private final TelemetrySettings telemetrySettings; + private final Settings settings; + private final Sampler fallbackSampler; + + private double samplingRatio; + + /** + * Constructor + * + * @param telemetrySettings Telemetry settings. + */ + private ProbabilisticSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); + this.samplingRatio = telemetrySettings.getSamplingProbability(); + this.defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + this.fallbackSampler = fallbackSampler; + } + + /** + * Create probabilistic sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticSampler(telemetrySettings, settings, fallbackSampler); + } + + private boolean isSamplingRatioChanged(double newSamplingRatio) { + return Double.compare(this.samplingRatio, newSamplingRatio) != 0; + } + + double getSamplingRatio() { + return samplingRatio; + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List<LinkData> parentLinks + ) { + double newSamplingRatio = telemetrySettings.getSamplingProbability(); + if (isSamplingRatioChanged(newSamplingRatio)) { + synchronized (this) { + this.samplingRatio = newSamplingRatio; + defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + } + } + final SamplingResult result = defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } else { + return result; + } + } + + @Override + public String getDescription() { + return "Probabilistic Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java new file mode 100644 index 0000000000000..93a8edaaaa760 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSampler.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.List; +import java.util.Objects; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingDecision; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; + +/** + * ProbabilisticTransportActionSampler sampler samples request with action based on defined probability + */ +public class ProbabilisticTransportActionSampler implements Sampler { + + private final Sampler fallbackSampler; + private Sampler actionSampler; + private final TelemetrySettings telemetrySettings; + private final Settings settings; + private double actionSamplingRatio; + + /** + * Creates ProbabilisticTransportActionSampler sampler + * @param telemetrySettings TelemetrySettings + */ + private ProbabilisticTransportActionSampler(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.settings = Objects.requireNonNull(settings); + this.actionSamplingRatio = OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY.get(settings); + this.actionSampler = Sampler.traceIdRatioBased(actionSamplingRatio); + this.fallbackSampler = fallbackSampler; + } + + /** + * Create probabilistic transport action sampler. + * + * @param telemetrySettings the telemetry settings + * @param settings the settings + * @param fallbackSampler the fallback sampler + * @return the probabilistic transport action sampler + */ + public static Sampler create(TelemetrySettings telemetrySettings, Settings settings, Sampler fallbackSampler) { + return new ProbabilisticTransportActionSampler(telemetrySettings, settings, fallbackSampler); + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List<LinkData> parentLinks + ) { + final String action = attributes.get(AttributeKey.stringKey(TRANSPORT_ACTION)); + if (action != null) { + final SamplingResult result = actionSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + if (result.getDecision() != SamplingDecision.DROP && fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return result; + } + if (fallbackSampler != null) return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + + return SamplingResult.drop(); + } + + double getSamplingRatio() { + return actionSamplingRatio; + } + + @Override + public String getDescription() { + return "Transport Action Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java new file mode 100644 index 0000000000000..87c2849173aff --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import java.util.List; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.tracing.AttributeNames.TRACE; + +/** + * RequestSampler based on HeadBased sampler + */ +public class RequestSampler implements Sampler { + private final Sampler fallbackSampler; + + /** + * Creates request sampler which applies based on all applicable sampler + * @param fallbackSampler Sampler + */ + public RequestSampler(Sampler fallbackSampler) { + this.fallbackSampler = fallbackSampler; + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List<LinkData> parentLinks + ) { + final String trace = attributes.get(AttributeKey.stringKey(TRACE)); + + if (trace != null) { + return (Boolean.parseBoolean(trace) == true) ? SamplingResult.recordAndSample() : SamplingResult.drop(); + } + if (fallbackSampler != null) { + return fallbackSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + return SamplingResult.recordAndSample(); + } + + @Override + public String getDescription() { + return "Request Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java new file mode 100644 index 0000000000000..6534b33f6177c --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for sampler. + */ +package org.opensearch.telemetry.tracing.sampler; diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy index 4480cbb2bab4b..9d529ed5a2a56 100644 --- a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -9,6 +9,9 @@ grant { permission java.lang.RuntimePermission "getClassLoader"; permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.net.NetPermission "getProxySelector"; + permission java.net.SocketPermission "*", "connect,resolve"; + permission java.util.PropertyPermission "*", "read,write"; }; diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java index d57ae554a462d..4a1301588dad2 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -8,63 +8,81 @@ package org.opensearch.telemetry; -import org.junit.After; -import org.junit.Before; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.Optional; +import java.util.Set; import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; -import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; +import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_SAMPLER_ACTION_PROBABILITY; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; public class OTelTelemetryPluginTests extends OpenSearchTestCase { - private OTelTelemetryPlugin oTelTracerModulePlugin; + private OTelTelemetryPlugin oTelTelemetryPlugin; private Optional<Telemetry> telemetry; private TracingTelemetry tracingTelemetry; + private MetricsTelemetry metricsTelemetry; + @Before public void setup() { // TRACER_EXPORTER_DELAY_SETTING should always be less than 10 seconds because // io.opentelemetry.sdk.OpenTelemetrySdk.close waits only for 10 seconds for shutdown to complete. Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); - oTelTracerModulePlugin = new OTelTelemetryPlugin(settings); - telemetry = oTelTracerModulePlugin.getTelemetry(null); + oTelTelemetryPlugin = new OTelTelemetryPlugin(settings); + telemetry = oTelTelemetryPlugin.getTelemetry( + new TelemetrySettings(Settings.EMPTY, new ClusterSettings(settings, Set.of(TRACER_ENABLED_SETTING, TRACER_SAMPLER_PROBABILITY))) + ); tracingTelemetry = telemetry.get().getTracingTelemetry(); + metricsTelemetry = telemetry.get().getMetricsTelemetry(); } public void testGetTelemetry() { Set<Setting<?>> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); - assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName()); + assertEquals(OTEL_TRACER_NAME, oTelTelemetryPlugin.getName()); assertTrue(tracingTelemetry instanceof OTelTracingTelemetry); + assertTrue(metricsTelemetry instanceof OTelMetricsTelemetry); assertEquals( Arrays.asList( TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS, + OTEL_METRICS_EXPORTER_CLASS_SETTING, + TRACER_SAMPLER_ACTION_PROBABILITY ), - oTelTracerModulePlugin.getSettings() + oTelTelemetryPlugin.getSettings() ); } @After - public void cleanup() { + public void cleanup() throws IOException { tracingTelemetry.close(); + metricsTelemetry.close(); } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java new file mode 100644 index 0000000000000..4b39e3d0d607d --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleCounterBuilder; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.DoubleHistogramBuilder; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder; +import io.opentelemetry.api.metrics.LongCounterBuilder; +import io.opentelemetry.api.metrics.LongUpDownCounterBuilder; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class OTelMetricsTelemetryTests extends OpenSearchTestCase { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(2.0, tags); + verify(mockOTelDoubleCounter).add(2.0, OTelAttributesConverter.convert(tags)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounterNegativeValue() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(-1.0); + verify(mockOTelDoubleCounter).add(-1.0); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testUpDownCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class); + LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class); + DoubleUpDownCounterBuilder mockOTelDoubleUpDownCounterBuilder = mock(DoubleUpDownCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleUpDownCounterBuilder); + when(mockOTelDoubleUpDownCounterBuilder.build()).thenReturn(mockOTelUpDownDoubleCounter); + + Counter counter = metricsTelemetry.createUpDownCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelUpDownDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(-2.0, tags); + verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testHistogram() { + String histogramName = "test-histogram"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleHistogram mockOTelDoubleHistogram = mock(DoubleHistogram.class); + DoubleHistogramBuilder mockOTelDoubleHistogramBuilder = mock(DoubleHistogramBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.histogramBuilder(Mockito.contains(histogramName))).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setDescription(description)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.setUnit(unit)).thenReturn(mockOTelDoubleHistogramBuilder); + when(mockOTelDoubleHistogramBuilder.build()).thenReturn(mockOTelDoubleHistogram); + + Histogram histogram = metricsTelemetry.createHistogram(histogramName, description, unit); + histogram.record(1.0); + verify(mockOTelDoubleHistogram).record(1.0); + Tags tags = Tags.create().addTag("test", "test"); + histogram.record(2.0, tags); + verify(mockOTelDoubleHistogram).record(2.0, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java new file mode 100644 index 0000000000000..65c52911dbef9 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class DummyMetricExporter implements MetricExporter { + @Override + public CompletableResultCode export(Collection<MetricData> metrics) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java new file mode 100644 index 0000000000000..e68da030bfb52 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.exporter.logging.LoggingMetricExporter; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class OTelMetricsExporterFactoryTests extends OpenSearchTestCase { + + public void testMetricsExporterDefault() { + Settings settings = Settings.builder().build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricsExporterLogging() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.logging.LoggingMetricExporter" + ) + .build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricExporterInvalid() { + Settings settings = Settings.builder().put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "abc").build(); + assertThrows(IllegalArgumentException.class, () -> OTelMetricsExporterFactory.create(settings)); + } + + public void testMetricExporterNoCreateFactoryMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.exporter.DummyMetricExporter" + ) + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals( + "MetricExporter instantiation failed for class [org.opensearch.telemetry.metrics.exporter.DummyMetricExporter]", + exception.getMessage() + ); + } + + public void testMetricExporterNonMetricExporterClass() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "java.lang.String") + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals("MetricExporter instantiation failed for class [java.lang.String]", exception.getMessage()); + assertTrue(exception.getCause() instanceof NoSuchMethodError); + + } + + public void testMetricExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter" + ) + .build(); + + assertTrue(OTelMetricsExporterFactory.create(settings) instanceof OtlpGrpcMetricExporter); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java new file mode 100644 index 0000000000000..ee67384d01759 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Map; + +import io.opentelemetry.api.common.AttributeType; +import io.opentelemetry.api.internal.InternalAttributeKeyImpl; + +public class OTelAttributesConverterTests extends OpenSearchTestCase { + + public void testConverterNullAttributes() { + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert((Attributes) null); + assertEquals(0, otelAttributes.size()); + } + + public void testConverterEmptyAttributes() { + Attributes attributes = Attributes.EMPTY; + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes); + assertEquals(0, otelAttributes.size()); + } + + public void testConverterSingleAttributes() { + Attributes attributes = Attributes.create().addAttribute("key1", "value"); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes); + assertEquals(1, otelAttributes.size()); + assertEquals("value", otelAttributes.get(InternalAttributeKeyImpl.create("key1", AttributeType.STRING))); + } + + public void testConverterMultipleAttributes() { + Attributes attributes = Attributes.create() + .addAttribute("key1", 1l) + .addAttribute("key2", 1.0) + .addAttribute("key3", true) + .addAttribute("key4", "value4"); + Map<String, ?> attributeMap = attributes.getAttributesMap(); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes); + assertEquals(4, otelAttributes.size()); + otelAttributes.asMap().forEach((x, y) -> assertEquals(attributeMap.get(x.getKey()), y)); + } + + public void testConverterMultipleTags() { + Tags tags = Tags.create().addTag("key1", 1l).addTag("key2", 1.0).addTag("key3", true).addTag("key4", "value4"); + Map<String, ?> tagsMap = tags.getTagsMap(); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(tags); + assertEquals(4, otelAttributes.size()); + otelAttributes.asMap().forEach((x, y) -> assertEquals(tagsMap.get(x.getKey()), y)); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java new file mode 100644 index 0000000000000..d07e32d00a92a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.trace.SpanKind; + +public class OTelSpanKindConverterTests extends OpenSearchTestCase { + + public void testSpanKindNullConverterNull() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(null)); + } + + public void testSpanKindConverter() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.INTERNAL)); + assertEquals(SpanKind.CLIENT, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.CLIENT)); + assertEquals(SpanKind.SERVER, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.SERVER)); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java index ac849e620673a..fc92ab36908e1 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java @@ -8,14 +8,15 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.test.OpenSearchTestCase; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.TraceFlags; import io.opentelemetry.api.trace.TraceState; -import org.opensearch.test.OpenSearchTestCase; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class OTelSpanTests extends OpenSearchTestCase { diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java index 1f76b0b9def18..d865a329104c1 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -8,17 +8,21 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanContext; import io.opentelemetry.api.trace.TraceFlags; import io.opentelemetry.api.trace.TraceState; import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.Context; import io.opentelemetry.context.propagation.ContextPropagators; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.HashMap; -import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -47,8 +51,39 @@ public void testExtractTracerContextFromHeader() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); - org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders).orElse(null); + assertEquals(TRACE_ID, span.getTraceId()); + assertEquals(SPAN_ID, span.getSpanId()); + } + + public void testExtractTracerContextFromHttpHeader() { + Map<String, Collection<String>> requestHeaders = new HashMap<>(); + requestHeaders.put("traceparent", Arrays.asList("00-" + TRACE_ID + "-" + SPAN_ID + "-00")); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(requestHeaders).get(); assertEquals(TRACE_ID, span.getTraceId()); assertEquals(SPAN_ID, span.getSpanId()); } + + public void testExtractTracerContextFromHttpHeaderNull() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(null).get(); + org.opensearch.telemetry.tracing.Span propagatedSpan = new OTelPropagatedSpan(Span.fromContext(Context.root())); + assertEquals(propagatedSpan.getTraceId(), span.getTraceId()); + assertEquals(propagatedSpan.getSpanId(), span.getSpanId()); + } + + public void testExtractTracerContextFromHttpHeaderEmpty() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(new HashMap<>()).get(); + org.opensearch.telemetry.tracing.Span propagatedSpan = new OTelPropagatedSpan(Span.fromContext(Context.root())); + assertEquals(propagatedSpan.getTraceId(), span.getTraceId()); + assertEquals(propagatedSpan.getSpanId(), span.getSpanId()); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 7dec7824b9790..1f0c2f674e655 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -8,10 +8,16 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.test.OpenSearchTestCase; + import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.trace.SpanBuilder; import io.opentelemetry.api.trace.Tracer; -import org.opensearch.test.OpenSearchTestCase; +import io.opentelemetry.api.trace.TracerProvider; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -20,47 +26,119 @@ import static org.mockito.Mockito.when; public class OTelTracingTelemetryTests extends OpenSearchTestCase { - + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithoutParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); - - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); - Span span = tracingTelemetry.createSpan("span_name", null); - + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); + Attributes attributes = Attributes.create().addAttribute("name", "value"); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); + verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); assertNull(span.getParentSpan()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); - Span span = tracingTelemetry.createSpan("span_name", parentSpan); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); + Attributes attributes = Attributes.create().addAttribute("name", 1l); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); verify(mockSpanBuilder).setParent(any()); + verify(mockSpanBuilder).setAllAttributes(createAttributeLong(attributes)); assertNotNull(span.getParentSpan()); + assertEquals("parent_span", span.getParentSpan().getSpanName()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCreateSpanWithParentWithMultipleAttributes() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); + SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); + when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); + + Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); + Attributes attributes = Attributes.create() + .addAttribute("key1", 1l) + .addAttribute("key2", 2.0) + .addAttribute("key3", true) + .addAttribute("key4", "key4"); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); + + io.opentelemetry.api.common.Attributes otelAttributes = io.opentelemetry.api.common.Attributes.builder() + .put("key1", 1l) + .put("key2", 2.0) + .put("key3", true) + .put("key4", "key4") + .build(); + verify(mockSpanBuilder).setParent(any()); + verify(mockSpanBuilder).setAllAttributes(otelAttributes); + assertNotNull(span.getParentSpan()); + + assertEquals("parent_span", span.getParentSpan().getSpanName()); + } + + private io.opentelemetry.api.common.Attributes createAttribute(Attributes attributes) { + AttributesBuilder attributesBuilder = io.opentelemetry.api.common.Attributes.builder(); + attributes.getAttributesMap().forEach((x, y) -> attributesBuilder.put(x, (String) y)); + return attributesBuilder.build(); + } + + private io.opentelemetry.api.common.Attributes createAttributeLong(Attributes attributes) { + AttributesBuilder attributesBuilder = io.opentelemetry.api.common.Attributes.builder(); + attributes.getAttributesMap().forEach((x, y) -> attributesBuilder.put(x, (Long) y)); + return attributesBuilder.build(); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testGetContextPropagator() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporter.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporter.java index 3f250b5aa481f..8a47350a7c3bb 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporter.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporter.java @@ -8,10 +8,11 @@ package org.opensearch.telemetry.tracing.exporter; +import java.util.Collection; + import io.opentelemetry.sdk.common.CompletableResultCode; import io.opentelemetry.sdk.trace.data.SpanData; import io.opentelemetry.sdk.trace.export.SpanExporter; -import java.util.Collection; public class DummySpanExporter implements SpanExporter { @Override diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java new file mode 100644 index 0000000000000..225cfa6ab2d1a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; + +public class DummySpanExporterWithGetDefault implements SpanExporter { + + public static DummySpanExporterWithGetDefault getDefault() { + return new DummySpanExporterWithGetDefault(); + } + + @Override + public CompletableResultCode export(Collection<SpanData> spans) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java index 80cba425ed163..d71aef9366e21 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java @@ -8,12 +8,13 @@ package org.opensearch.telemetry.tracing.exporter; -import io.opentelemetry.exporter.logging.LoggingSpanExporter; -import io.opentelemetry.sdk.trace.export.SpanExporter; import org.opensearch.common.settings.Settings; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.test.OpenSearchTestCase; +import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.trace.export.SpanExporter; + public class OTelSpanExporterFactoryTests extends OpenSearchTestCase { public void testSpanExporterDefault() { @@ -62,4 +63,15 @@ public void testSpanExporterNonSpanExporterClass() { } + public void testSpanExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.tracing.exporter.DummySpanExporterWithGetDefault" + ) + .build(); + + assertTrue(OTelSpanExporterFactory.create(settings) instanceof DummySpanExporterWithGetDefault); + } + } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java new file mode 100644 index 0000000000000..39ccf299dfdc4 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/OTelSamplerFactoryTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class OTelSamplerFactoryTests extends OpenSearchTestCase { + + public void testDefaultCreate() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + assertEquals(sampler.getClass(), ProbabilisticTransportActionSampler.class); + } + + public void testCreateWithSingleSampler() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_TRACER_SPAN_SAMPLER_CLASS_SETTINGS.getKey(), ProbabilisticSampler.class.getName()) + .build(); + + ClusterSettings clusterSettings = new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterSettings); + Sampler sampler = OTelSamplerFactory.create(telemetrySettings, settings); + assertTrue(sampler instanceof ProbabilisticSampler); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java new file mode 100644 index 0000000000000..a094cd0119f5e --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; + +import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.mockito.Mockito.mock; + +public class ProbabilisticSamplerTests extends OpenSearchTestCase { + + // When ProbabilisticSampler is created with OTelTelemetrySettings as null + public void testProbabilisticSamplerWithNullSettings() { + // Verify that the constructor throws IllegalArgumentException when given null settings + assertThrows(NullPointerException.class, () -> { ProbabilisticSampler.create(null, null, null); }); + } + + public void testDefaultGetSampler() { + Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); + TelemetrySettings telemetrySettings = new TelemetrySettings( + Settings.EMPTY, + new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)) + ); + + // Probabilistic Sampler + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); + + assertEquals(0.01, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); + } + + public void testGetSamplerWithUpdatedSamplingRatio() { + Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); + TelemetrySettings telemetrySettings = new TelemetrySettings( + Settings.EMPTY, + new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)) + ); + + // Probabilistic Sampler + Sampler probabilisticSampler = ProbabilisticSampler.create(telemetrySettings, Settings.EMPTY, null); + + assertEquals(0.01d, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); + + telemetrySettings.setSamplingProbability(0.02); + + // Need to call shouldSample() to update the value of samplingRatio + probabilisticSampler.shouldSample(mock(Context.class), "00000000000000000000000000000000", "", SpanKind.INTERNAL, null, null); + + // Need to call getSampler() to update the value of tracerHeadSamplerSamplingRatio + assertEquals(0.02, ((ProbabilisticSampler) probabilisticSampler).getSamplingRatio(), 0.0d); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java new file mode 100644 index 0000000000000..261b0252fef60 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticTransportActionSamplerTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; +import java.util.Set; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; +import static org.mockito.Mockito.mock; + +public class ProbabilisticTransportActionSamplerTests extends OpenSearchTestCase { + + public void testGetSamplerWithActionSamplingRatio() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // ProbabilisticTransportActionSampler + Sampler probabilisticTransportActionSampler = ProbabilisticTransportActionSampler.create(telemetrySettings, Settings.EMPTY, null); + + SamplingResult result = probabilisticTransportActionSampler.shouldSample( + mock(Context.class), + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(), + Collections.emptyList() + ); + // Verify that ProbabilisticTransportActionSampler returned SamplingResult.recordAndSample() as all actions will be sampled + assertEquals(SamplingResult.recordAndSample(), result); + assertEquals(0.001, ((ProbabilisticTransportActionSampler) probabilisticTransportActionSampler).getSamplingRatio(), 0.000d); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java new file mode 100644 index 0000000000000..da234ca13dc9d --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.Set; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.opensearch.telemetry.tracing.AttributeNames.TRANSPORT_ACTION; +import static org.mockito.Mockito.mock; + +public class RequestSamplerTests extends OpenSearchTestCase { + private ClusterSettings clusterSettings; + private TelemetrySettings telemetrySettings; + private RequestSampler requestSampler; + private Context parentContext; + + @Before + public void init() { + clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + Sampler fallbackSampler = OTelSamplerFactory.create(telemetrySettings, Settings.EMPTY); + requestSampler = new RequestSampler(fallbackSampler); + parentContext = mock(Context.class); + } + + public void testShouldSampleWithTraceAttributeAsTrue() { + Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "true"); + + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + assertEquals(SamplingResult.recordAndSample(), result); + } + + public void testShouldSampleWithTraceAttributeAsFalse() { + Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "false"); + + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + assertEquals(SamplingResult.drop(), result); + } + + public void testShouldSampleForProbabilisticSampler() { + clusterSettings.applySettings( + Settings.builder() + .put("telemetry.tracer.sampler.probability", "1.0") + .put("telemetry.otel.tracer.span.sampler.classes", "org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler") + .build() + ); + + Attributes attributes = Attributes.builder().build(); + + SamplingResult result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + + // Verify that request is sampled + assertEquals(SamplingResult.recordAndSample(), result); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.0").build()); + result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + assertEquals(SamplingResult.drop(), result); + + } + + public void testShouldSampleForProbabilisticTransportActionSampler() { + clusterSettings.applySettings( + Settings.builder() + .put( + "telemetry.otel.tracer.span.sampler.classes", + "org.opensearch.telemetry.tracing.sampler.ProbabilisticTransportActionSampler" + ) + .build() + ); + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.action.sampler.probability", "1.0").build()); + + // Create a mock Context and Attributes with dummy action + Context parentContext = mock(Context.class); + Attributes attributes = Attributes.builder().put(TRANSPORT_ACTION, "dummy_action").build(); + + // Calling shouldSample to update samplingRatio + SamplingResult result = requestSampler.shouldSample( + parentContext, + "00000000000000000000000000000000", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + + // Verify that request is sampled + assertEquals(SamplingResult.recordAndSample(), result); + } + +} diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java index ac06bf03ed8cd..4f26e8ae65259 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java @@ -32,9 +32,8 @@ package org.opensearch.http.nio; -import io.netty.handler.codec.http.FullHttpResponse; import org.opensearch.NioIntegTestCase; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -42,6 +41,8 @@ import java.util.Collection; import java.util.Locale; +import io.netty.handler.codec.http.FullHttpResponse; + import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java index 25de433e3489f..2be7730cff9e9 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/transport/nio/NioTransportLoggingIT.java @@ -34,12 +34,11 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; - import org.opensearch.NioIntegTestCase; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.transport.TcpTransport; import org.opensearch.transport.TransportLogger; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ByteBufUtils.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ByteBufUtils.java index f0129e4760c60..d6665607af5d3 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ByteBufUtils.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/ByteBufUtils.java @@ -31,9 +31,6 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.core.common.bytes.BytesArray; @@ -44,6 +41,10 @@ import java.util.ArrayList; import java.util.List; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; + class ByteBufUtils { /** diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java index 561695c06effe..d44515f3dc727 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/HttpReadWriteHandler.java @@ -32,13 +32,6 @@ package org.opensearch.http.nio; -import io.netty.channel.ChannelHandler; -import io.netty.handler.codec.ByteToMessageDecoder; -import io.netty.handler.codec.http.HttpContentCompressor; -import io.netty.handler.codec.http.HttpContentDecompressor; -import io.netty.handler.codec.http.HttpObjectAggregator; -import io.netty.handler.codec.http.HttpRequestDecoder; -import io.netty.handler.codec.http.HttpResponseEncoder; import org.opensearch.common.unit.TimeValue; import org.opensearch.http.HttpHandlingSettings; import org.opensearch.http.HttpPipelinedRequest; @@ -58,6 +51,14 @@ import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import io.netty.channel.ChannelHandler; +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.http.HttpContentCompressor; +import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseEncoder; + public class HttpReadWriteHandler implements NioChannelHandler { private final NettyAdaptor adaptor; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java index 9cb224aa8decf..0b7f4ee7646d1 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyAdaptor.java @@ -32,14 +32,6 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; -import io.netty.channel.embedded.EmbeddedChannel; import org.opensearch.ExceptionsHelper; import org.opensearch.nio.FlushOperation; import org.opensearch.nio.Page; @@ -49,6 +41,15 @@ import java.util.LinkedList; import java.util.function.BiConsumer; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; + class NettyAdaptor { private final EmbeddedChannel nettyChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyListener.java index 80b46ec99f69c..e939dbc07e471 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NettyListener.java @@ -32,10 +32,6 @@ package org.opensearch.http.nio; -import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.GenericFutureListener; import org.opensearch.ExceptionsHelper; import org.opensearch.common.util.concurrent.FutureUtils; @@ -44,6 +40,11 @@ import java.util.concurrent.TimeoutException; import java.util.function.BiConsumer; +import io.netty.channel.Channel; +import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; + /** * This is an {@link BiConsumer} that interfaces with netty code. It wraps a netty promise and will * complete that promise when accept is called. It delegates the normal promise methods to the underlying diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java index a20bb55458951..6c341b07f433d 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpChannel.java @@ -32,7 +32,7 @@ package org.opensearch.http.nio; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; import org.opensearch.nio.NioSocketChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpPipeliningHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpPipeliningHandler.java index aa173b51f61d1..ed1e95b1b8c69 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpPipeliningHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpPipeliningHandler.java @@ -32,9 +32,6 @@ package org.opensearch.http.nio; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import org.apache.logging.log4j.Logger; import org.opensearch.common.collect.Tuple; import org.opensearch.http.HttpPipelinedRequest; @@ -45,6 +42,10 @@ import java.nio.channels.ClosedChannelException; import java.util.List; +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; + /** * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. */ diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java index 5007fc9076ca7..5abd6f2710198 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java @@ -32,22 +32,10 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultHttpHeaders; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.cookie.Cookie; -import io.netty.handler.codec.http.cookie.ServerCookieDecoder; -import io.netty.handler.codec.http.cookie.ServerCookieEncoder; - import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import java.util.AbstractMap; import java.util.Collection; @@ -58,6 +46,18 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; + public class NioHttpRequest implements HttpRequest { private final FullHttpRequest request; @@ -257,7 +257,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + * <p> * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequestCreator.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequestCreator.java index b75323b017282..27e43f4eef386 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequestCreator.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequestCreator.java @@ -32,13 +32,14 @@ package org.opensearch.http.nio; +import org.opensearch.ExceptionsHelper; + +import java.util.List; + import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.handler.codec.http.FullHttpRequest; -import org.opensearch.ExceptionsHelper; - -import java.util.List; @ChannelHandler.Sharable class NioHttpRequestCreator extends MessageToMessageDecoder<FullHttpRequest> { diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponse.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponse.java index 0d3ce72c6646b..c349ee14bc70f 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponse.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponse.java @@ -32,15 +32,15 @@ package org.opensearch.http.nio; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; + import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.http.HttpResponse; -import org.opensearch.core.rest.RestStatus; - public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResponse { private final HttpHeaders requestHeaders; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponseCreator.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponseCreator.java index e63ec57713b99..93822b57ec40f 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponseCreator.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpResponseCreator.java @@ -32,6 +32,11 @@ package org.opensearch.http.nio; +import org.opensearch.common.Booleans; +import org.opensearch.monitor.jvm.JvmInfo; + +import java.util.List; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; @@ -40,10 +45,6 @@ import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.DefaultLastHttpContent; import io.netty.handler.codec.http.HttpResponse; -import org.opensearch.common.Booleans; -import org.opensearch.monitor.jvm.JvmInfo; - -import java.util.List; /** * Split up large responses to prevent batch compression or other CPU intensive operations down the pipeline. diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerChannel.java index 781a4f03da952..2c47bf009f2a3 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerChannel.java @@ -32,7 +32,7 @@ package org.opensearch.http.nio; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.http.HttpServerChannel; import org.opensearch.nio.NioServerSocketChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java index 1befc110eb6a5..ecf9ad9f17f87 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java @@ -34,16 +34,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.AbstractHttpServerTransport; import org.opensearch.http.HttpChannel; @@ -57,6 +56,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; import org.opensearch.nio.SocketChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.NioGroupFactory; import org.opensearch.transport.nio.PageAllocator; @@ -107,9 +107,10 @@ public NioHttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, NioGroupFactory nioGroupFactory, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.nioGroupFactory = nioGroupFactory; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java index 4dc9c0935be67..221b2104e9904 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/PagedByteBuf.java @@ -32,17 +32,18 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.buffer.UnpooledByteBufAllocator; -import io.netty.buffer.UnpooledHeapByteBuf; import org.opensearch.nio.Page; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.buffer.UnpooledHeapByteBuf; + public class PagedByteBuf extends UnpooledHeapByteBuf { private final Runnable releasable; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java index 48b797840f5ff..3aa02d11dfd75 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpChannel.java @@ -32,7 +32,7 @@ package org.opensearch.transport.nio; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.nio.NioSocketChannel; import org.opensearch.transport.TcpChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java index 69218f2f1fcf5..a0fb2ef4cfdf2 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTcpServerChannel.java @@ -32,7 +32,7 @@ package org.opensearch.transport.nio; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.nio.NioServerSocketChannel; import org.opensearch.transport.TcpServerChannel; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java index 1509e0b179bfe..55920bab4efd3 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java @@ -34,17 +34,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.nio.BytesChannelContext; import org.opensearch.nio.ChannelFactory; import org.opensearch.nio.Config; @@ -53,6 +52,7 @@ import org.opensearch.nio.NioSelector; import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpTransport; import org.opensearch.transport.TransportSettings; @@ -85,9 +85,10 @@ protected NioTransport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - NioGroupFactory groupFactory + NioGroupFactory groupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.groupFactory = groupFactory; } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index 67598aec154fa..d4be876867651 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -44,12 +43,14 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.nio.NioHttpServerTransport; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -90,7 +91,8 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NIO_TRANSPORT_NAME, @@ -102,7 +104,8 @@ public Map<String, Supplier<Transport>> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getNioGroupFactory(settings) + getNioGroupFactory(settings), + tracer ) ); } @@ -117,7 +120,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NIO_HTTP_TRANSPORT_NAME, @@ -130,7 +134,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( xContentRegistry, dispatcher, getNioGroupFactory(settings), - clusterSettings + clusterSettings, + tracer ) ); } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java index 29ef19a2aec87..0c90deed6411c 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/TcpReadWriteHandler.java @@ -32,14 +32,14 @@ package org.opensearch.transport.nio; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.nio.BytesWriteHandler; import org.opensearch.nio.InboundChannelBuffer; import org.opensearch.nio.Page; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java index c606a4818a324..a3f7a7822cd40 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/HttpReadWriteHandlerTests.java @@ -32,25 +32,11 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelPromise; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpObjectAggregator; -import io.netty.handler.codec.http.HttpRequestEncoder; -import io.netty.handler.codec.http.HttpResponseDecoder; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpVersion; - -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.CorsHandler; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpHandlingSettings; @@ -63,11 +49,8 @@ import org.opensearch.nio.SocketChannelContext; import org.opensearch.nio.TaskScheduler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; - import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; @@ -76,12 +59,28 @@ import java.util.List; import java.util.function.BiConsumer; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import org.mockito.ArgumentCaptor; + import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java index 9ba27802822ea..21634725d5279 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NettyAdaptorTests.java @@ -32,13 +32,6 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; -import io.netty.channel.SimpleChannelInboundHandler; import org.opensearch.nio.FlushOperation; import org.opensearch.test.OpenSearchTestCase; @@ -47,6 +40,14 @@ import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; + public class NettyAdaptorTests extends OpenSearchTestCase { public void testBasicRead() { diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java index edaee15507df9..45e51c6855f79 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpClient.java @@ -32,28 +32,15 @@ package org.opensearch.http.nio; -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelHandler; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpContentDecompressor; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpObjectAggregator; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpRequestEncoder; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseDecoder; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.nio.BytesChannelContext; import org.opensearch.nio.ChannelFactory; import org.opensearch.nio.Config; @@ -83,9 +70,23 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseDecoder; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; import static io.netty.handler.codec.http.HttpHeaderNames.HOST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; -import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; import static org.junit.Assert.fail; /** diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java index 87b2234664909..d0c0406bd7774 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -32,26 +32,15 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBufUtil; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.QueryStringDecoder; - import org.opensearch.common.Randomness; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.http.HttpPipelinedRequest; import org.opensearch.http.HttpPipelinedResponse; import org.opensearch.http.HttpRequest; import org.opensearch.http.HttpResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; - import org.junit.After; import java.nio.channels.ClosedChannelException; @@ -67,9 +56,19 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import io.netty.buffer.ByteBufUtil; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.QueryStringDecoder; + +import static org.hamcrest.core.Is.is; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; -import static org.hamcrest.core.Is.is; public class NioHttpPipeliningHandlerTests extends OpenSearchTestCase { @@ -81,7 +80,7 @@ public class NioHttpPipeliningHandlerTests extends OpenSearchTestCase { @After public void cleanup() throws Exception { waitingRequests.keySet().forEach(this::finishRequest); - shutdownExecutorService(); + shutdownExecutorServices(); } private CountDownLatch finishRequest(String url) { @@ -89,7 +88,7 @@ private CountDownLatch finishRequest(String url) { return finishingRequests.get(url); } - private void shutdownExecutorService() throws InterruptedException { + private void shutdownExecutorServices() throws InterruptedException { if (!handlerService.isShutdown()) { handlerService.shutdown(); handlerService.awaitTermination(10, TimeUnit.SECONDS); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index c69fe23002dfe..09594673de5b2 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -32,49 +32,36 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.TooLongFrameException; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpVersion; import org.apache.logging.log4j.message.ParameterizedMessage; - import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.http.BindHttpException; import org.opensearch.http.CorsHandler; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpTransportSettings; import org.opensearch.http.NullDispatcher; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.nio.NioSocketChannel; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.NioGroupFactory; - import org.junit.After; import org.junit.Before; @@ -86,10 +73,23 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; -import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -187,7 +187,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -237,7 +238,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -256,7 +258,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); @@ -299,7 +302,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -373,7 +377,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -439,7 +444,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -501,7 +507,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/PagedByteBufTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/PagedByteBufTests.java index de53cdad104df..c540c1854e509 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/PagedByteBufTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/PagedByteBufTests.java @@ -32,7 +32,6 @@ package org.opensearch.http.nio; -import io.netty.buffer.ByteBuf; import org.opensearch.nio.Page; import org.opensearch.test.OpenSearchTestCase; @@ -40,6 +39,8 @@ import java.util.ArrayList; import java.util.concurrent.atomic.AtomicInteger; +import io.netty.buffer.ByteBuf; + public class PagedByteBufTests extends OpenSearchTestCase { public void testReleasingPage() { diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index 4b06c4e15bce7..f5d1c618f5ace 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -33,17 +33,18 @@ package org.opensearch.transport.nio; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.net.NetUtils; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -81,7 +82,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService(), - new NioGroupFactory(settings, logger) + new NioGroupFactory(settings, logger), + NoopTracer.INSTANCE ) { @Override diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle new file mode 100644 index 0000000000000..7d7eb330b4a55 --- /dev/null +++ b/plugins/transport-reactor-netty4/build.gradle @@ -0,0 +1,264 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.gradle.info.BuildParams +import org.opensearch.gradle.test.RestIntegTestTask +import org.opensearch.gradle.test.TestTask +import org.opensearch.gradle.test.rest.JavaRestTestPlugin +import org.opensearch.gradle.test.InternalClusterTestPlugin + +apply plugin: 'opensearch.yaml-rest-test' +apply plugin: 'opensearch.java-rest-test' +apply plugin: 'opensearch.internal-cluster-test' + +// The transport-reactor-netty4 plugin is published to maven +apply plugin: 'opensearch.publish' + +opensearchplugin { + description 'Reactor Netty 4 based transport implementation' + classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + hasClientJar = true +} + +dependencies { + // network stack + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-codec-dns:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver-dns:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + api "io.netty:netty-transport-native-unix-common:${versions.netty}" + + api "io.projectreactor.netty:reactor-netty-http:${versions.reactor_netty}" + api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}" + + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" + testImplementation "io.projectreactor:reactor-test:${versions.reactor}" + testImplementation project(":modules:transport-netty4") +} + +restResources { + restApi { + includeCore '_common', 'cluster', 'nodes' + } +} + +tasks.named("dependencyLicenses").configure { + mapping from: /netty-.*/, to: 'netty' + mapping from: /reactor-.*/, to: 'reactor' +} + +// TODO: Remove that once we have a complete test suite +testingConventions.enabled = false + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +internalClusterTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +javaRestTest { + systemProperty 'opensearch.set.netty.runtime.available.processors', 'false' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.CertificateCompressionAlgo', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.AsyncSSLPrivateKeyMethod', + 'io.netty.internal.tcnative.AsyncTask', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.ResultCallback', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLSession', + 'io.netty.internal.tcnative.SSLSessionCache', + 'io.netty.channel.epoll.Epoll', + 'io.netty.channel.epoll.EpollDatagramChannel', + 'io.netty.channel.epoll.EpollServerSocketChannel', + 'io.netty.channel.epoll.EpollSocketChannel', + 'io.netty.channel.kqueue.KQueue', + 'io.netty.channel.kqueue.KQueueDatagramChannel', + 'io.netty.channel.kqueue.KQueueServerSocketChannel', + 'io.netty.channel.kqueue.KQueueSocketChannel', + 'io.netty.handler.codec.haproxy.HAProxyMessage', + 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', + 'io.netty.handler.proxy.ProxyHandler', + 'io.netty.incubator.channel.uring.IOUring', + 'io.netty.incubator.channel.uring.IOUringDatagramChannel', + 'io.netty.incubator.channel.uring.IOUringServerSocketChannel', + 'io.netty.incubator.channel.uring.IOUringSocketChannel', + + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + 'io.micrometer.common.KeyValue', + 'io.micrometer.common.KeyValues', + 'io.micrometer.common.docs.KeyName', + 'io.micrometer.context.ContextAccessor', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Gauge', + 'io.micrometer.core.instrument.Gauge$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.Meter$Type', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.docs.MeterDocumentation', + 'io.micrometer.core.instrument.observation.MeterObservationHandler', + 'io.micrometer.observation.Observation', + 'io.micrometer.observation.Observation$Context', + 'io.micrometer.observation.ObservationHandler', + 'io.micrometer.observation.ObservationRegistry', + 'io.micrometer.observation.ObservationRegistry$ObservationConfig', + 'io.micrometer.observation.docs.ObservationDocumentation', + 'io.micrometer.observation.transport.ReceiverContext', + 'io.micrometer.observation.transport.RequestReplyReceiverContext', + 'io.micrometer.observation.transport.RequestReplySenderContext', + 'io.micrometer.observation.transport.SenderContext', + 'io.micrometer.tracing.Span', + 'io.micrometer.tracing.Tracer', + 'io.micrometer.tracing.docs.SpanDocumentation', + 'io.micrometer.tracing.handler.DefaultTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingReceiverTracingObservationHandler', + 'io.micrometer.tracing.handler.PropagatingSenderTracingObservationHandler', + 'io.micrometer.tracing.propagation.Propagator' + ) + + ignoreViolations( + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$4', + 'io.netty.util.internal.PlatformDependent0$6', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' + ) +} diff --git a/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE.<component>.txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..beb44fc0f4cf9 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..4c74bb06fd83b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..d823de7ffadd4 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..38eb2e5bad80a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..5b3d3311edc9f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..bbe91c6ccfb1d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..ba27b38632622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..3bc0f7b3fed09 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..a62cb0fefcc40 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..19419999300dd --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 new file mode 100644 index 0000000000000..407ecaffdad30 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 @@ -0,0 +1 @@ +4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt b/plugins/transport-reactor-netty4/licenses/reactor-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..3d631bc904f24 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.17.jar.sha1 @@ -0,0 +1 @@ +319b1d41f28e92b31b7ca0f19183337f5539bb44 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 new file mode 100644 index 0000000000000..9ceef6959744b --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.17.jar.sha1 @@ -0,0 +1 @@ +9ed949dcd050ef30d9eeedd53d95d1dce20ce832 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java new file mode 100644 index 0000000000000..abbd50bf1b235 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/OpenSearchReactorNetty4IntegTestCase.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch; + +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.transport.Netty4ModulePlugin; +import org.opensearch.transport.reactor.ReactorNetty4Plugin; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.Collection; +import java.util.List; + +public abstract class OpenSearchReactorNetty4IntegTestCase extends OpenSearchIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected boolean addMockTransportService() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // randomize netty settings + if (randomBoolean()) { + builder.put(ReactorNetty4Transport.SETTING_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + } + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4ModulePlugin.NETTY_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, ReactorNetty4Plugin.REACTOR_NETTY_HTTP_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return List.of(ReactorNetty4Plugin.class, Netty4ModulePlugin.class); + } +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java new file mode 100644 index 0000000000000..833d60375a2bd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequestSizeLimitIT.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +/** + * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. + * + * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing + * a single node "cluster". + */ +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) +public class ReactorNetty4HttpRequestSizeLimitIT extends OpenSearchReactorNetty4IntegTestCase { + + private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), LIMIT) + .build(); + } + + public void testLimitsInFlightRequests() throws Exception { + ensureGreen(); + + // we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit + int numRequests = LIMIT.bytesAsInt() / 100; + + StringBuilder bulkRequest = new StringBuilder(); + for (int i = 0; i < numRequests; i++) { + bulkRequest.append("{\"index\": {}}"); + bulkRequest.append(System.lineSeparator()); + bulkRequest.append("{ \"field\" : \"value\" }"); + bulkRequest.append(System.lineSeparator()); + } + + List<Tuple<String, CharSequence>> requests = new ArrayList<>(); + for (int i = 0; i < 150; i++) { + requests.add(Tuple.tuple("/index/_bulk", bulkRequest)); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection<FullHttpResponse> singleResponse = nettyHttpClient.post(transportAddress.address(), requests.subList(0, 1)); + try { + assertThat(singleResponse, hasSize(1)); + assertAtLeastOnceExpectedStatus(singleResponse, HttpResponseStatus.OK); + + final Collection<FullHttpResponse> multipleResponses = nettyHttpClient.post(transportAddress.address(), requests); + try { + assertThat(multipleResponses, hasSize(requests.size())); + assertAtLeastOnceExpectedStatus(multipleResponses, HttpResponseStatus.TOO_MANY_REQUESTS); + } finally { + multipleResponses.forEach(ReferenceCounted::release); + } + } finally { + singleResponse.forEach(ReferenceCounted::release); + } + } + } + + public void testDoesNotLimitExcludedRequests() throws Exception { + ensureGreen(); + + List<Tuple<String, CharSequence>> requestUris = new ArrayList<>(); + for (int i = 0; i < 1500; i++) { + requestUris.add(Tuple.tuple("/_cluster/settings", "{ \"transient\": {\"search.default_search_timeout\": \"40s\" } }")); + } + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create(false)) { + final Collection<FullHttpResponse> responses = nettyHttpClient.put(transportAddress.address(), requestUris); + try { + assertThat(responses, hasSize(requestUris.size())); + assertAllInExpectedStatus(responses, HttpResponseStatus.OK); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertAtLeastOnceExpectedStatus(Collection<FullHttpResponse> responses, HttpResponseStatus expectedStatus) { + long countExpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus)).count(); + assertThat("Expected at least one request with status [" + expectedStatus + "]", countExpectedStatus, greaterThan(0L)); + } + + private void assertAllInExpectedStatus(Collection<FullHttpResponse> responses, HttpResponseStatus expectedStatus) { + long countUnexpectedStatus = responses.stream().filter(r -> r.status().equals(expectedStatus) == false).count(); + assertThat( + "Expected all requests with status [" + expectedStatus + "] but [" + countUnexpectedStatus + "] requests had a different one", + countUnexpectedStatus, + equalTo(0L) + ); + } + +} diff --git a/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java new file mode 100644 index 0000000000000..c0e43de06f6ff --- /dev/null +++ b/plugins/transport-reactor-netty4/src/internalClusterTest/java/org/opensearch/http/reactor/netty4/ReactorNetty4PipeliningIT.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchReactorNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; + +import java.util.Collection; +import java.util.Locale; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class ReactorNetty4PipeliningIT extends OpenSearchReactorNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + public void testThatNettyHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[] { "/", "/_nodes/stats", "/", "/_cluster/state", "/" }; + + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + Collection<FullHttpResponse> responses = client.get(transportAddress.address(), true, requests); + try { + assertThat(responses, hasSize(5)); + + Collection<String> opaqueIds = ReactorHttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + + private void assertOpaqueIdsInOrder(Collection<String> opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java new file mode 100644 index 0000000000000..bd75227dabd08 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/HttpConversionUtil.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.rest.RestRequest; + +import io.netty.handler.codec.http.HttpMethod; + +final class HttpConversionUtil { + private HttpConversionUtil() {} + + /** + * Converts {@link HttpMethod} to {@link RestRequest.Method} + * @param method {@link HttpMethod} method + * @return corresponding {@link RestRequest.Method} + * @throws IllegalArgumentException if HTTP method is not supported + */ + public static RestRequest.Method convertMethod(HttpMethod method) { + if (method == HttpMethod.GET) { + return RestRequest.Method.GET; + } else if (method == HttpMethod.POST) { + return RestRequest.Method.POST; + } else if (method == HttpMethod.PUT) { + return RestRequest.Method.PUT; + } else if (method == HttpMethod.DELETE) { + return RestRequest.Method.DELETE; + } else if (method == HttpMethod.HEAD) { + return RestRequest.Method.HEAD; + } else if (method == HttpMethod.OPTIONS) { + return RestRequest.Method.OPTIONS; + } else if (method == HttpMethod.PATCH) { + return RestRequest.Method.PATCH; + } else if (method == HttpMethod.TRACE) { + return RestRequest.Method.TRACE; + } else if (method == HttpMethod.CONNECT) { + return RestRequest.Method.CONNECT; + } else { + throw new IllegalArgumentException("Unexpected http method: " + method); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java new file mode 100644 index 0000000000000..98b359319ff1b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingHttpChannel.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingHttpChannel implements HttpChannel { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompletableContext<Void> closeContext = new CompletableContext<>(); + private final FluxSink<HttpContent> emitter; + + NonStreamingHttpChannel(HttpServerRequest request, HttpServerResponse response, FluxSink<HttpContent> emitter) { + this.request = request; + this.response = response; + this.emitter = emitter; + this.request.withConnection(connection -> Netty4Utils.addListener(connection.channel().closeFuture(), closeContext)); + } + + @Override + public boolean isOpen() { + final AtomicBoolean isOpen = new AtomicBoolean(); + request.withConnection(connection -> isOpen.set(connection.channel().isOpen())); + return isOpen.get(); + } + + @Override + public void close() { + request.withConnection(connection -> connection.channel().close()); + } + + @Override + public void addCloseListener(ActionListener<Void> listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener<Void> listener) { + emitter.next(createResponse(response)); + listener.onResponse(null); + emitter.complete(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) response.remoteAddress(); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) response.hostAddress(); + } + + FullHttpResponse createResponse(HttpResponse response) { + return (FullHttpResponse) response; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java new file mode 100644 index 0000000000000..d43e23e800e65 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/NonStreamingRequestConsumer.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpRequest; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import io.netty.buffer.CompositeByteBuf; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.LastHttpContent; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +class NonStreamingRequestConsumer<T extends HttpContent> implements Consumer<T>, Publisher<HttpContent>, Disposable { + private final HttpServerRequest request; + private final HttpServerResponse response; + private final CompositeByteBuf content; + private final Publisher<HttpContent> publisher; + private final AbstractHttpServerTransport transport; + private final AtomicBoolean disposed = new AtomicBoolean(false); + private volatile FluxSink<HttpContent> emitter; + + NonStreamingRequestConsumer( + AbstractHttpServerTransport transport, + HttpServerRequest request, + HttpServerResponse response, + int maxCompositeBufferComponents + ) { + this.transport = transport; + this.request = request; + this.response = response; + this.content = response.alloc().compositeBuffer(maxCompositeBufferComponents); + this.publisher = Flux.create(emitter -> register(emitter)); + } + + private void register(FluxSink<HttpContent> emitter) { + this.emitter = emitter.onDispose(this).onCancel(this); + } + + @Override + public void accept(T message) { + try { + if (message instanceof LastHttpContent) { + process(message, emitter); + } else if (message instanceof HttpContent) { + process(message, emitter); + } + } catch (Throwable ex) { + emitter.error(ex); + } + } + + public void process(HttpContent in, FluxSink<HttpContent> emitter) { + // Consume request body in full before dispatching it + content.addComponent(true, in.content().retain()); + + if (in instanceof LastHttpContent) { + final NonStreamingHttpChannel channel = new NonStreamingHttpChannel(request, response, emitter); + final HttpRequest r = createRequest(request, content); + + try { + transport.incomingRequest(r, channel); + } catch (Exception ex) { + emitter.error(ex); + transport.onException(channel, ex); + } finally { + r.release(); + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } + } + } + + HttpRequest createRequest(HttpServerRequest request, CompositeByteBuf content) { + return new ReactorNetty4HttpRequest(request, content.retain()); + } + + @Override + public void subscribe(Subscriber<? super HttpContent> s) { + publisher.subscribe(s); + } + + @Override + public void dispose() { + if (disposed.compareAndSet(false, true)) { + this.content.release(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java new file mode 100644 index 0000000000000..4406c555a5b04 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpRequest.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import reactor.netty.http.server.HttpServerRequest; + +class ReactorNetty4HttpRequest implements HttpRequest { + private final String protocol; + private final HttpMethod method; + private final String uri; + private final ByteBuf content; + private final HttpHeadersMap headers; + private final AtomicBoolean released; + private final Exception inboundException; + private final boolean pooled; + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content) { + this(request, new HttpHeadersMap(request.requestHeaders()), new AtomicBoolean(false), true, content); + } + + ReactorNetty4HttpRequest(HttpServerRequest request, ByteBuf content, Exception inboundException) { + this( + request.protocol(), + request.method(), + request.uri(), + new HttpHeadersMap(request.requestHeaders()), + new AtomicBoolean(false), + true, + content, + inboundException + ); + } + + private ReactorNetty4HttpRequest( + HttpServerRequest request, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content + ) { + this(request.protocol(), request.method(), request.uri(), headers, released, pooled, content, null); + } + + private ReactorNetty4HttpRequest( + String protocol, + HttpMethod method, + String uri, + HttpHeadersMap headers, + AtomicBoolean released, + boolean pooled, + ByteBuf content, + Exception inboundException + ) { + + this.protocol = protocol; + this.method = method; + this.uri = uri; + this.headers = headers; + this.content = content; + this.pooled = pooled; + this.released = released; + this.inboundException = inboundException; + } + + @Override + public RestRequest.Method method() { + return HttpConversionUtil.convertMethod(method); + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + assert released.get() == false; + return Netty4Utils.toBytesReference(content); + } + + @Override + public void release() { + if (pooled && released.compareAndSet(false, true)) { + content.release(); + } + } + + @Override + public HttpRequest releaseAndCopy() { + assert released.get() == false; + if (pooled == false) { + return this; + } + try { + final ByteBuf copiedContent = Unpooled.copiedBuffer(content); + return new ReactorNetty4HttpRequest(protocol, method, uri, headers, new AtomicBoolean(false), false, copiedContent, null); + } finally { + release(); + } + } + + @Override + public final Map<String, List<String>> getHeaders() { + return headers; + } + + @Override + public List<String> strictCookies() { + String cookieString = headers.httpHeaders.get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set<Cookie> cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0.toString())) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (protocol.equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1.toString())) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + protocol); + } + } + + @Override + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(headers.httpHeaders); + headersWithoutContentTypeHeader.remove(header); + + return new ReactorNetty4HttpRequest( + protocol, + method, + uri, + new HttpHeadersMap(headersWithoutContentTypeHeader), + released, + pooled, + content, + null + ); + } + + @Override + public ReactorNetty4HttpResponse createResponse(RestStatus status, BytesReference content) { + return new ReactorNetty4HttpResponse( + headers.httpHeaders, + io.netty.handler.codec.http.HttpVersion.valueOf(protocol), + status, + content + ); + } + + @Override + public Exception getInboundException() { + return inboundException; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map<String, List<String>> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List<String> get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List<String> put(String key, List<String> value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List<String> remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map<? extends String, ? extends List<String>> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set<String> keySet() { + return httpHeaders.names(); + } + + @Override + public Collection<List<String>> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set<Entry<String, List<String>>> entrySet() { + return httpHeaders.names() + .stream() + .map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java new file mode 100644 index 0000000000000..c45ad54b668a3 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpResponse; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; + +class ReactorNetty4HttpResponse extends DefaultFullHttpResponse implements HttpResponse { + private final HttpHeaders requestHeaders; + + ReactorNetty4HttpResponse(HttpHeaders requestHeaders, HttpVersion version, RestStatus status, BytesReference content) { + super(version, HttpResponseStatus.valueOf(status.getStatus()), Netty4Utils.toByteBuf(content)); + this.requestHeaders = requestHeaders; + } + + @Override + public void addHeader(String name, String value) { + headers().add(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers().contains(name); + } + + public HttpHeaders requestHeaders() { + return requestHeaders; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java new file mode 100644 index 0000000000000..84360bf028ba9 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerChannel.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; + +import io.netty.channel.Channel; + +class ReactorNetty4HttpServerChannel implements HttpServerChannel { + private final Channel channel; + private final CompletableContext<Void> closeContext = new CompletableContext<>(); + + ReactorNetty4HttpServerChannel(Channel channel) { + this.channel = channel; + Netty4Utils.addListener(this.channel.closeFuture(), closeContext); + } + + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); + } + + @Override + public void addCloseListener(ActionListener<Void> listener) { + closeContext.addListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() { + channel.close(); + } + + @Override + public String toString() { + return "ReactorNetty4HttpChannel{localAddress=" + getLocalAddress() + "}"; + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..d4a5a9ad83af6 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -0,0 +1,313 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.AbstractHttpServerTransport; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpReadTimeoutException; +import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.opensearch.transport.reactor.netty4.Netty4Utils; + +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.time.Duration; + +import io.netty.channel.ChannelOption; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.timeout.ReadTimeoutException; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; +import reactor.netty.DisposableServer; +import reactor.netty.http.HttpProtocol; +import reactor.netty.http.server.HttpServer; +import reactor.netty.http.server.HttpServerRequest; +import reactor.netty.http.server.HttpServerResponse; + +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_COUNT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_IDLE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_INTERVAL; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +/** + * The HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + */ +public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTransport { + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500"))); + + /** + * The number of Reactor Netty HTTP workers + */ + public static final Setting<Integer> SETTING_HTTP_WORKER_COUNT = Setting.intSetting("http.netty.worker_count", 0, Property.NodeScope); + + /** + * The maximum number of composite components for request accumulation + */ + public static Setting<Integer> SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, + (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that OpenSearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, + s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), + Property.NodeScope + ); + + private final SharedGroupFactory sharedGroupFactory; + private final int readTimeoutMillis; + private final int connectTimeoutMillis; + private final int maxCompositeBufferComponents; + private final ByteSizeValue maxInitialLineLength; + private final ByteSizeValue maxHeaderSize; + private final ByteSizeValue maxChunkSize; + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private volatile DisposableServer disposableServer; + private volatile Scheduler scheduler; + + /** + * Creates new HTTP transport implementations based on Reactor Netty (see please {@link HttpServer}). + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param sharedGroupFactory shared group factory + * @param tracer tracer instance + */ + public ReactorNetty4HttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); + Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); + this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); + this.connectTimeoutMillis = Math.toIntExact(SETTING_HTTP_CONNECT_TIMEOUT.get(settings).getMillis()); + this.sharedGroupFactory = sharedGroupFactory; + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); + this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + } + + /** + * Binds the transport engine to the socket address + * @param socketAddress socket address to bind to + */ + @Override + protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Exception { + final HttpServer server = configureChannelOptions( + HttpServer.create() + .httpFormDecoder(builder -> builder.scheduler(scheduler)) + .idleTimeout(Duration.ofMillis(connectTimeoutMillis)) + .readTimeout(Duration.ofMillis(readTimeoutMillis)) + .runOn(sharedGroup.getLowLevelGroup()) + .bindAddress(() -> socketAddress) + .compress(true) + .httpRequestDecoder( + spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) + .maxHeaderSize(maxHeaderSize.bytesAsInt()) + .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) + ) + .protocol(HttpProtocol.HTTP11, HttpProtocol.H2C) + .handle((req, res) -> incomingRequest(req, res)) + ); + + disposableServer = server.bindNow(); + return new ReactorNetty4HttpServerChannel(disposableServer.channel()); + } + + private HttpServer configureChannelOptions(final HttpServer server1) { + HttpServer configured = server1.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings)) + .childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)); + + if (SETTING_HTTP_TCP_KEEP_ALIVE.get(settings)) { + // Netty logs a warning if it can't set the option, so try this only on supported platforms + if (IOUtils.LINUX || IOUtils.MAC_OS_X) { + if (SETTING_HTTP_TCP_KEEP_IDLE.get(settings) >= 0) { + final SocketOption<Integer> keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + configured = configured.childOption(NioChannelOption.of(keepIdleOption), SETTING_HTTP_TCP_KEEP_IDLE.get(settings)); + } + } + if (SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) >= 0) { + final SocketOption<Integer> keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepIntervalOption), + SETTING_HTTP_TCP_KEEP_INTERVAL.get(settings) + ); + } + } + if (SETTING_HTTP_TCP_KEEP_COUNT.get(settings) >= 0) { + final SocketOption<Integer> keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + configured = configured.childOption( + NioChannelOption.of(keepCountOption), + SETTING_HTTP_TCP_KEEP_COUNT.get(settings) + ); + } + } + } + } + + final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + configured = configured.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + configured = configured.option(ChannelOption.SO_REUSEADDR, reuseAddress); + configured = configured.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); + + return configured; + } + + /** + * Handles incoming Reactor Netty request + * @param request request instance + * @param response response instances + * @return response publisher + */ + protected Publisher<Void> incomingRequest(HttpServerRequest request, HttpServerResponse response) { + final NonStreamingRequestConsumer<HttpContent> consumer = new NonStreamingRequestConsumer<>( + this, + request, + response, + maxCompositeBufferComponents + ); + + request.receiveContent().switchIfEmpty(Mono.just(DefaultLastHttpContent.EMPTY_LAST_CONTENT)).subscribe(consumer); + + return Mono.from(consumer).flatMap(hc -> { + final FullHttpResponse r = (FullHttpResponse) hc; + response.status(r.status()); + response.trailerHeaders(c -> r.trailingHeaders().forEach(h -> c.add(h.getKey(), h.getValue()))); + response.chunkedTransfer(false); + response.compression(true); + r.headers().forEach(h -> response.addHeader(h.getKey(), h.getValue())); + return Mono.from(response.sendObject(r.content())); + }); + } + + /** + * Called to tear down internal resources + */ + @Override + protected void stopInternal() { + if (sharedGroup != null) { + sharedGroup.shutdown(); + sharedGroup = null; + } + + if (scheduler != null) { + scheduler.dispose(); + scheduler = null; + } + + if (disposableServer != null) { + disposableServer.disposeNow(); + disposableServer = null; + } + } + + /** + * Starts the transport + */ + @Override + protected void doStart() { + boolean success = false; + try { + scheduler = Schedulers.newBoundedElastic( + Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE, + Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, + "http-form-decoder" + ); + sharedGroup = sharedGroupFactory.getHttpGroup(); + bindServer(); + success = true; + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + public void onException(HttpChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); + } else { + super.onException(channel, cause); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..b5ecb0b62f79d --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new HTTP transport implementations based on Reactor Netty. + */ +package org.opensearch.http.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java new file mode 100644 index 0000000000000..dc310c3793109 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor; + +import org.opensearch.common.SetOnce; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +public class ReactorNetty4Plugin extends Plugin implements NetworkPlugin { + /** + * The name of new experimental HTTP transport implementations based on Reactor Netty. + */ + public static final String REACTOR_NETTY_HTTP_TRANSPORT_NAME = "reactor-netty4"; + + private final SetOnce<SharedGroupFactory> groupFactory = new SetOnce<>(); + + /** + * Default constructor + */ + public ReactorNetty4Plugin() {} + + /** + * Returns a list of additional {@link Setting} definitions for this plugin. + */ + @Override + public List<Setting<?>> getSettings() { + return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */); + } + + /** + * Returns a map of {@link HttpServerTransport} suppliers. + * See {@link org.opensearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. + * @param settings settings + * @param networkService network service + * @param bigArrays big array allocator + * @param pageCacheRecycler page cache recycler instance + * @param circuitBreakerService circuit breaker service instance + * @param threadPool thread pool instance + * @param xContentRegistry XContent registry instance + * @param dispatcher dispatcher instance + * @param clusterSettings cluster settings + * @param tracer tracer instance + */ + @Override + public Map<String, Supplier<HttpServerTransport>> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + REACTOR_NETTY_HTTP_TRANSPORT_NAME, + () -> new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + private SharedGroupFactory getSharedGroupFactory(Settings settings) { + final SharedGroupFactory groupFactory = this.groupFactory.get(); + if (groupFactory != null) { + assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; + return groupFactory; + } else { + this.groupFactory.set(new SharedGroupFactory(settings)); + return this.groupFactory.get(); + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java new file mode 100644 index 0000000000000..ab7de33c8e673 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.reactor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport; +import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.reactor.netty4.ReactorNetty4Transport; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.util.concurrent.Future; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; + +/** + * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for + * both {@link #getHttpGroup()} and {@link #getTransportGroup()} if + * {@link org.opensearch.http.reactor.netty4.ReactorNetty4HttpServerTransport#SETTING_HTTP_WORKER_COUNT} is configured to be 0. + * If that setting is not 0, then it will return a different group in the {@link #getHttpGroup()} call. + */ +public final class SharedGroupFactory { + + private static final Logger logger = LogManager.getLogger(SharedGroupFactory.class); + + private final Settings settings; + private final int workerCount; + private final int httpWorkerCount; + + private RefCountedGroup genericGroup; + private SharedGroup dedicatedHttpGroup; + + /** + * Creates new shared group factory instance from settings + * @param settings settings + */ + public SharedGroupFactory(Settings settings) { + this.settings = settings; + this.workerCount = ReactorNetty4Transport.SETTING_WORKER_COUNT.get(settings); + this.httpWorkerCount = ReactorNetty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + } + + Settings getSettings() { + return settings; + } + + /** + * Gets the number of configured transport workers + * @return the number of configured transport workers + */ + public int getTransportWorkerCount() { + return workerCount; + } + + /** + * Gets transport shared group + * @return transport shared group + */ + public synchronized SharedGroup getTransportGroup() { + return getGenericGroup(); + } + + /** + * Gets HTTP transport shared group + * @return HTTP transport shared group + */ + public synchronized SharedGroup getHttpGroup() { + if (httpWorkerCount == 0) { + return getGenericGroup(); + } else { + if (dedicatedHttpGroup == null) { + NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( + httpWorkerCount, + daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + ); + dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); + } + return dedicatedHttpGroup; + } + } + + private SharedGroup getGenericGroup() { + if (genericGroup == null) { + EventLoopGroup eventLoopGroup = new NioEventLoopGroup( + workerCount, + daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + ); + this.genericGroup = new RefCountedGroup(eventLoopGroup); + } else { + genericGroup.incRef(); + } + return new SharedGroup(genericGroup); + } + + private static class RefCountedGroup extends AbstractRefCounted { + + public static final String NAME = "ref-counted-event-loop-group"; + private final EventLoopGroup eventLoopGroup; + + private RefCountedGroup(EventLoopGroup eventLoopGroup) { + super(NAME); + this.eventLoopGroup = eventLoopGroup; + } + + @Override + protected void closeInternal() { + Future<?> shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); + } + } + } + + /** + * Wraps the {@link RefCountedGroup}. Calls {@link RefCountedGroup#decRef()} on close. After close, + * this wrapped instance can no longer be used. + */ + public static class SharedGroup { + + private final RefCountedGroup refCountedGroup; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + private SharedGroup(RefCountedGroup refCountedGroup) { + this.refCountedGroup = refCountedGroup; + } + + /** + * Gets Netty's {@link EventLoopGroup} instance + * @return Netty's {@link EventLoopGroup} instance + */ + public EventLoopGroup getLowLevelGroup() { + return refCountedGroup.eventLoopGroup; + } + + /** + * Decreases the reference to underlying {@link EventLoopGroup} instance + */ + public void shutdown() { + if (isOpen.compareAndSet(true, false)) { + refCountedGroup.decRef(); + } + } + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java new file mode 100644 index 0000000000000..8ec432b7dd5cd --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/Netty4Utils.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.reactor.netty4; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.Booleans; +import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.util.NettyRuntime; + +/** + * Shameless copy of Netty4Utils from transport-netty4 module + */ +public final class Netty4Utils { + private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); + + /** + * Utility class + */ + private Netty4Utils() {} + + /** + * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * + * @param availableProcessors the number of available processors + * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value + */ + public static void setAvailableProcessors(final int availableProcessors) { + // we set this to false in tests to avoid tests that randomly set processors from stepping on each other + final boolean set = Booleans.parseBoolean(System.getProperty("opensearch.set.netty.runtime.available.processors", "true")); + if (!set) { + return; + } + + /* + * This can be invoked twice, once from Netty4Transport and another time from Netty4HttpServerTransport; however, + * Netty4Runtime#availableProcessors forbids settings the number of processors twice so we prevent double invocation here. + */ + if (isAvailableProcessorsSet.compareAndSet(false, true)) { + NettyRuntime.setAvailableProcessors(availableProcessors); + } else if (availableProcessors != NettyRuntime.availableProcessors()) { + /* + * We have previously set the available processors yet either we are trying to set it to a different value now or there is a bug + * in Netty and our previous value did not take, bail. + */ + final String message = String.format( + Locale.ROOT, + "available processors value [%d] did not match current value [%d]", + availableProcessors, + NettyRuntime.availableProcessors() + ); + throw new IllegalStateException(message); + } + } + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + * @param reference reference to convert + */ + public static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List<ByteBuf> buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + + if (buffers.size() == 1) { + return buffers.get(0); + } else { + CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + * @param buffer buffer to convert + */ + public static BytesReference toBytesReference(final ByteBuf buffer) { + final int readableBytes = buffer.readableBytes(); + if (readableBytes == 0) { + return BytesArray.EMPTY; + } else if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), readableBytes); + } else { + final ByteBuffer[] byteBuffers = buffer.nioBuffers(); + return BytesReference.fromByteBuffers(byteBuffers); + } + } + + /** + * Add completion listener to ChannelFuture + * @param channelFuture ChannelFuture to add listener to + * @param context completion listener context + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext<Void> context) { + channelFuture.addListener(f -> { + if (f.isSuccess()) { + context.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + context.completeExceptionally(new Exception(cause)); + } else { + context.completeExceptionally((Exception) cause); + } + } + }); + } +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java new file mode 100644 index 0000000000000..b3e92f58c540a --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/ReactorNetty4Transport.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.reactor.netty4; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; + +import reactor.netty.tcp.TcpServer; + +/** + * The transport implementations based on Reactor Netty (see please {@link TcpServer}). + */ +public class ReactorNetty4Transport { + /** + * The number of Netty workers + */ + public static final Setting<Integer> SETTING_WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Property.NodeScope + ); + + /** + * Default constructor + */ + public ReactorNetty4Transport() {} +} diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java new file mode 100644 index 0000000000000..921bca104c6fe --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/netty4/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor.netty4; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java new file mode 100644 index 0000000000000..2f36ebb7f11f8 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * The experimental network plugin that introduces new transport implementations based on Reactor Netty. + */ +package org.opensearch.transport.reactor; diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..4f2dcde995338 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java new file mode 100644 index 0000000000000..443ecd0f40ead --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorHttpClient.java @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.tasks.Task; + +import java.io.Closeable; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http2.HttpConversionUtil; +import io.netty.resolver.DefaultAddressResolverGroup; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.ParallelFlux; +import reactor.netty.http.client.HttpClient; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class ReactorHttpClient implements Closeable { + private final boolean compression; + + static Collection<String> returnHttpResponseBodies(Collection<FullHttpResponse> responses) { + List<String> list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection<String> returnOpaqueIds(Collection<FullHttpResponse> responses) { + List<String> list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get(Task.X_OPAQUE_ID)); + } + return list; + } + + ReactorHttpClient(boolean compression) { + this.compression = compression; + } + + static ReactorHttpClient create() { + return create(true); + } + + static ReactorHttpClient create(boolean compression) { + return new ReactorHttpClient(compression); + } + + public List<FullHttpResponse> get(InetSocketAddress remoteAddress, String... uris) throws InterruptedException { + return get(remoteAddress, false, uris); + } + + public List<FullHttpResponse> get(InetSocketAddress remoteAddress, boolean ordered, String... uris) throws InterruptedException { + final List<FullHttpRequest> requests = new ArrayList<>(uris.length); + + for (int i = 0; i < uris.length; i++) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + httpRequest.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + requests.add(httpRequest); + } + + return sendRequests(remoteAddress, requests, ordered); + } + + public final Collection<FullHttpResponse> post(InetSocketAddress remoteAddress, List<Tuple<String, CharSequence>> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + final List<FullHttpResponse> responses = sendRequests(remoteAddress, Collections.singleton(httpRequest), false); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest, HttpContent content) + throws InterruptedException { + final List<FullHttpResponse> responses = sendRequests( + remoteAddress, + Collections.singleton( + new DefaultFullHttpRequest( + httpRequest.protocolVersion(), + httpRequest.method(), + httpRequest.uri(), + content.content(), + httpRequest.headers(), + httpRequest.trailingHeaders() + ) + ), + false + ); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.get(0); + } + + public final Collection<FullHttpResponse> put(InetSocketAddress remoteAddress, List<Tuple<String, CharSequence>> urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private List<FullHttpResponse> processRequestsWithBody( + HttpMethod method, + InetSocketAddress remoteAddress, + List<Tuple<String, CharSequence>> urisAndBodies + ) throws InterruptedException { + List<FullHttpRequest> requests = new ArrayList<>(urisAndBodies.size()); + for (int i = 0; i < urisAndBodies.size(); ++i) { + final Tuple<String, CharSequence> uriAndBody = urisAndBodies.get(i); + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), "http"); + request.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(request); + } + return sendRequests(remoteAddress, requests, false); + } + + private List<FullHttpResponse> sendRequests( + final InetSocketAddress remoteAddress, + final Collection<FullHttpRequest> requests, + boolean orderer + ) { + final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(1); + try { + final HttpClient client = HttpClient.newConnection() + .resolver(DefaultAddressResolverGroup.INSTANCE) + .runOn(eventLoopGroup) + .host(remoteAddress.getHostString()) + .port(remoteAddress.getPort()) + .compress(compression); + + @SuppressWarnings("unchecked") + final Mono<FullHttpResponse>[] monos = requests.stream() + .map( + request -> client.headers(h -> h.add(request.headers())) + .baseUrl(request.getUri()) + .request(request.method()) + .send(Mono.fromSupplier(() -> request.content())) + .responseSingle( + (r, body) -> body.switchIfEmpty(Mono.just(Unpooled.EMPTY_BUFFER)) + .map( + b -> new DefaultFullHttpResponse( + r.version(), + r.status(), + b.retain(), + r.responseHeaders(), + EmptyHttpHeaders.INSTANCE + ) + ) + ) + ) + .toArray(Mono[]::new); + + if (orderer == false) { + return ParallelFlux.from(monos).sequential().collectList().block(); + } else { + return Flux.concat(monos).flatMapSequential(r -> Mono.just(r)).collectList().block(); + } + } finally { + eventLoopGroup.shutdownGracefully().awaitUninterruptibly(); + } + } + + @Override + public void close() { + + } +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java new file mode 100644 index 0000000000000..00ca378a4e46b --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4BadRequestTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.reactor.netty4; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ReactorNetty4BadRequestTests extends OpenSearchTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new OpenSearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()).build(); + try ( + HttpServerTransport httpServerTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (ReactorHttpClient nettyHttpClient = ReactorHttpClient.create()) { + final List<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + + try { + assertThat(responses, hasSize(1)); + final FullHttpResponse response = responses.get(0); + assertThat(response.status().code(), equalTo(400)); + final Collection<String> responseBodies = ReactorHttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + final String body = responseBodies.iterator().next(); + assertThat(body, containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + body, + containsString("\"reason\":\"java.lang.IllegalArgumentException: partial escape sequence at end of string: %/\"") + ); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + } + +} diff --git a/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..15a5b04c802a4 --- /dev/null +++ b/plugins/transport-reactor-netty4/src/test/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransportTests.java @@ -0,0 +1,579 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.http.reactor.netty4; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.BindHttpException; +import org.opensearch.http.CorsHandler; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.http.NullDispatcher; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.reactor.SharedGroupFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocatorMetric; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; + +import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.opensearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link ReactorNetty4HttpServerTransport} class. + */ +public class ReactorNetty4HttpServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = createSettings(); + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = createBuilderWithPort().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link ReactorNetty4HttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + Settings settings = createSettings(); + runExpectHeaderTest(settings, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus + ) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + }; + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + // Reactor Netty 4 does not expose 100 CONTINUE response but instead just asks for content + final HttpContent continuationRequest = new DefaultHttpContent(Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), request, continuationRequest); + try { + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } finally { + continuationResponse.release(); + } + } + } + } + + public void testBindUnavailableAddress() { + Settings initialSettings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + initialSettings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder() + .put("http.port", remoteAddress.getPort()) + .put("network.host", remoteAddress.getAddress()) + .build(); + try ( + ReactorNetty4HttpServerTransport otherTransport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); + assertEquals("Failed to bind to " + NetworkAddress.format(remoteAddress.address()), bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting<ByteSizeValue> httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = createSettings(); + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = createBuilderWithPort().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final String url = "/" + randomAlphaOfLength(maxInitialLineLength); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.REQUEST_URI_TOO_LONG)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testDispatchFailed() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new RuntimeException("Bad things happen"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error("--> Unexpected bad request request"); + throw new AssertionError(cause); + } + }; + + final Settings settings = createSettings(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.INTERNAL_SERVER_ERROR)); + assertThat(response.content().array().length, equalTo(0)); + } finally { + response.release(); + } + } + } + } + + public void testLargeCompressedResponse() throws InterruptedException { + final String responseString = randomAlphaOfLength(4 * 1024 * 1024); + final String url = "/thing/"; + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + if (url.equals(request.uri())) { + channel.sendResponse(new BytesRestResponse(OK, responseString)); + } else { + logger.error("--> Unexpected successful uri [{}]", request.uri()); + throw new AssertionError(); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (ReactorHttpClient client = ReactorHttpClient.create()) { + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, randomFrom("deflate", "gzip")); + long numOfHugeAllocations = getHugeAllocationCount(); + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(getHugeAllocationCount(), equalTo(numOfHugeAllocations)); + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + byte[] bytes = new byte[response.content().readableBytes()]; + response.content().readBytes(bytes); + assertThat(new String(bytes, StandardCharsets.UTF_8), equalTo(responseString)); + } finally { + response.release(); + } + } + } + } + + private long getHugeAllocationCount() { + long numOfHugAllocations = 0; + ByteBufAllocator allocator = NettyAllocator.getAllocator(); + assert allocator instanceof NettyAllocator.NoDirectBuffers; + ByteBufAllocator delegate = ((NettyAllocator.NoDirectBuffers) allocator).getDelegate(); + if (delegate instanceof PooledByteBufAllocator) { + PooledByteBufAllocatorMetric metric = ((PooledByteBufAllocator) delegate).metric(); + numOfHugAllocations = metric.heapArenas().stream().mapToLong(PoolArenaMetric::numHugeAllocations).sum(); + } + return numOfHugAllocations; + } + + public void testCorsRequest() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError(); + } + + }; + + final Settings settings = createBuilderWithPort().put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "test-cors.org") + .build(); + + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + // Test pre-flight request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/"); + request.headers().add(CorsHandler.ORIGIN, "test-cors.org"); + request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("test-cors.org")); + assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN)); + assertTrue(response.headers().contains(CorsHandler.DATE)); + } finally { + response.release(); + } + } + + // Test short-circuited request + try (ReactorHttpClient client = ReactorHttpClient.create()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + request.headers().add(CorsHandler.ORIGIN, "google.com"); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN)); + } finally { + response.release(); + } + } + } + } + + public void testConnectTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + logger.error("--> Unexpected successful request [{}]", FakeRestRequest.requestToString(request)); + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + logger.error( + new ParameterizedMessage("--> Unexpected bad request [{}]", FakeRestRequest.requestToString(channel.request())), + cause + ); + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = createBuilderWithPort().put( + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT.getKey(), + new TimeValue(randomIntBetween(100, 300)) + ).build(); + + NioEventLoopGroup group = new NioEventLoopGroup(); + try ( + ReactorNetty4HttpServerTransport transport = new ReactorNetty4HttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + dispatcher, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new SharedGroupFactory(settings), + NoopTracer.INSTANCE + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + final CountDownLatch channelClosedLatch = new CountDownLatch(1); + + final Bootstrap clientBootstrap = new Bootstrap().option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()) + .channel(NioSocketChannel.class) + .handler(new ChannelInitializer<SocketChannel>() { + + @Override + protected void initChannel(SocketChannel ch) { + ch.pipeline().addLast(new ChannelHandlerAdapter() { + }); + + } + }) + .group(group); + ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); + + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + + } finally { + group.shutdownGracefully().await(); + } + } + + private Settings createSettings() { + return createBuilderWithPort().build(); + } + + private Settings.Builder createBuilderWithPort() { + return Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PORT.getKey(), getPortRange()); + } +} diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 07cd901449a18..ea20d8000f640 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -61,12 +61,13 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -118,7 +119,7 @@ private static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, (request, channel, task) -> { @@ -341,7 +342,7 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map<String, Object> set builder.endObject(); } builder.endObject(); - requestBody = Strings.toString(builder); + requestBody = builder.toString(); } return new StringEntity(requestBody, ContentType.APPLICATION_JSON); } diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java index db532f9a1c503..4ea23e415c994 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyUnitTests.java @@ -50,6 +50,7 @@ * Unit tests for OpenSearchPolicy: these cannot run with security manager, * we don't allow messing with the policy */ +@SuppressWarnings("removal") public class OpenSearchPolicyUnitTests extends OpenSearchTestCase { /** * Test policy with null codesource. diff --git a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java index 56d6c72705a78..99c9ee7e96d01 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/bootstrap/SystemCallFilterTests.java @@ -41,6 +41,7 @@ public class SystemCallFilterTests extends OpenSearchTestCase { /** command to try to run in tests */ static final String EXECUTABLE = Constants.WINDOWS ? "calc" : "ls"; + @SuppressWarnings("removal") @Override public void setUp() throws Exception { super.setUp(); diff --git a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java index 430df1f899708..04eae95f6fe12 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; /** Tests plugin manager security check */ +@SuppressWarnings("removal") public class PluginSecurityTests extends OpenSearchTestCase { /** Test that we can parse the set of permissions correctly for a simple policy */ diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 5fef24e75d8b7..cbd13357fedd9 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -41,11 +41,12 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Booleans; import org.opensearch.common.CheckedFunction; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.test.NotEqualMessageBuilder; @@ -149,7 +150,7 @@ public void testSearch() throws Exception { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); createIndex.setOptions(allowTypesRemovalWarnings()); client().performRequest(createIndex); @@ -208,7 +209,7 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -257,7 +258,7 @@ public void testClusterState() throws Exception { } mappingsAndSettings.endObject(); Request createTemplate = new Request("PUT", "/_template/template_1"); - createTemplate.setJsonEntity(Strings.toString(mappingsAndSettings)); + createTemplate.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createTemplate); client().performRequest(new Request("PUT", "/" + index)); } @@ -315,7 +316,7 @@ public void testShrink() throws IOException, NumberFormatException, ParseExcepti mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -384,7 +385,7 @@ public void testShrinkAfterUpgrade() throws IOException, ParseException { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -854,7 +855,7 @@ public void testSnapshotRestore() throws IOException, ParseException { } templateBuilder.endObject().endObject(); Request createTemplateRequest = new Request("PUT", "/_template/test_template"); - createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); + createTemplateRequest.setJsonEntity(templateBuilder.toString()); client().performRequest(createTemplateRequest); @@ -870,7 +871,7 @@ public void testSnapshotRestore() throws IOException, ParseException { } repoConfig.endObject(); Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + createRepoRequest.setJsonEntity(repoConfig.toString()); client().performRequest(createRepoRequest); } @@ -897,7 +898,7 @@ public void testHistoryUUIDIsAdded() throws Exception { } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createIndex); } else { ensureGreenLongWait(index); @@ -940,11 +941,11 @@ public void testSoftDeletes() throws Exception { } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + createIndex.setJsonEntity(mappingsAndSettings.toString()); client().performRequest(createIndex); int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); + String doc = JsonXContent.contentBuilder().startObject().field("field", "v1").endObject().toString(); Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); @@ -955,7 +956,7 @@ public void testSoftDeletes() throws Exception { assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); + String doc = JsonXContent.contentBuilder().startObject().field("field", "v2").endObject().toString(); Request request = new Request("POST", "/" + index + "/_doc/" + i); request.setJsonEntity(doc); client().performRequest(request); @@ -989,7 +990,7 @@ public void testClosedIndices() throws Exception { numDocs = between(1, 100); for (int i = 0; i < numDocs; i++) { final Request request = new Request("POST", "/" + index + "/" + type + "/" + i); - request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); + request.setJsonEntity(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject().toString()); assertOK(client().performRequest(request)); if (rarely()) { refresh(); @@ -1083,7 +1084,7 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver restoreCommand.endObject(); Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshotName + "/_restore"); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + restoreRequest.setJsonEntity(restoreCommand.toString()); client().performRequest(restoreRequest); // Make sure search finds all documents @@ -1158,7 +1159,7 @@ private void indexRandomDocuments( for (int i = 0; i < count; i++) { logger.debug("Indexing document [{}]", i); Request createDocument = new Request("POST", "/" + index + "/_doc/" + (specifyId ? i : "")); - createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); + createDocument.setJsonEntity(docSupplier.apply(i).toString()); client().performRequest(createDocument); if (rarely()) { refreshAllIndices(); @@ -1175,7 +1176,7 @@ private void indexRandomDocuments( private void indexDocument(String id) throws IOException { final Request indexRequest = new Request("POST", "/" + index + "/" + type + "/" + id); - indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject())); + indexRequest.setJsonEntity(JsonXContent.contentBuilder().startObject().field("f", "v").endObject().toString()); assertOK(client().performRequest(indexRequest)); } @@ -1190,7 +1191,7 @@ private void saveInfoDocument(String id, String value) throws IOException { // Only create the first version so we know how many documents are created when the index is first created Request request = new Request("PUT", "/info/" + type + "/" + id); request.addParameter("op_type", "create"); - request.setJsonEntity(Strings.toString(infoDoc)); + request.setJsonEntity(infoDoc.toString()); client().performRequest(request); } @@ -1255,7 +1256,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.endObject(); Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(settings)); + createIndex.setJsonEntity(settings.toString()); client().performRequest(createIndex); } ensureGreen(index); @@ -1365,7 +1366,7 @@ public void testResize() throws Exception { if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } - shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(XContentType.JSON, settings.build()) + "}"); + shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(MediaTypeRegistry.JSON, settings.build()) + "}"); client().performRequest(shrinkRequest); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 1); @@ -1377,7 +1378,7 @@ public void testResize() throws Exception { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); - splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(XContentType.JSON, settings.build()) + "}"); + splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(MediaTypeRegistry.JSON, settings.build()) + "}"); client().performRequest(splitRequest); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 6); @@ -1485,7 +1486,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { } repoConfig.endObject(); Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + createRepoRequest.setJsonEntity(repoConfig.toString()); client().performRequest(createRepoRequest); // create snapshot Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); @@ -1507,7 +1508,7 @@ public void testEnableSoftDeletesOnRestore() throws Exception { restoreCommand.endObject(); Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + restoreRequest.setJsonEntity(restoreCommand.toString()); client().performRequest(restoreRequest); ensureGreen(restoredIndex); int numDocs = countOfIndexedRandomDocuments(); @@ -1539,7 +1540,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { } repoConfig.endObject(); Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + createRepoRequest.setJsonEntity(repoConfig.toString()); client().performRequest(createRepoRequest); // create snapshot Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); @@ -1560,7 +1561,7 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { restoreCommand.endObject(); Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + restoreRequest.setJsonEntity(restoreCommand.toString()); final ResponseException error = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); assertThat(error.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java new file mode 100644 index 0000000000000..d4e7017aab8c2 --- /dev/null +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/PluginInfoIT.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.upgrades; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends AbstractFullClusterRestartTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + // Any issue in PluginInfo serialization logic will result into connection failures + // and hence reduced number of nodes. + assertEquals(2, nodeMap.keySet().size()); + } +} diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index 724ac9883efaa..2675e9b62de35 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -36,7 +36,6 @@ import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; -import org.opensearch.common.Strings; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -79,7 +78,7 @@ * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test * does best effort verifying that we don't break bwc for query builders between the first previous major version and * the latest current major release. - * + * <p> * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ @@ -199,7 +198,7 @@ public void testQueryBuilderBWC() throws Exception { mappingsAndSettings.endObject(); Request request = new Request("PUT", "/" + index); request.setOptions(allowTypesRemovalWarnings()); - request.setJsonEntity(Strings.toString(mappingsAndSettings)); + request.setJsonEntity(mappingsAndSettings.toString()); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); diff --git a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java index 73c546b80d431..c2f799d7d48d2 100644 --- a/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java +++ b/qa/logging-config/src/test/java/org/opensearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -62,6 +62,7 @@ public void testSuccessfulStartupWithCustomConfig() throws Exception { }); } + @SuppressWarnings("removal") private List<String> readAllLines(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<List<String>>) () -> { try { diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index f9810e027bb1e..13c2daeec37af 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -42,10 +42,10 @@ import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.Strings; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.core.rest.RestStatus; @@ -67,13 +67,14 @@ public class IndexingIT extends OpenSearchRestTestCase { protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); + private static final String TEST_MAPPING = createTestMapping(); private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; Request request = new Request("PUT", index + "/_doc/" + id); - request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\", \"sortfield\": \""+ randomIntBetween(0, numDocs) + "\"}"); assertOK(client().performRequest(request)); } return numDocs; @@ -129,9 +130,10 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish int docCount = 200; @@ -178,9 +180,10 @@ public void testIndexingWithReplicaOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.exclude._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish printClusterRouting(); @@ -214,11 +217,12 @@ public void testIndexVersionPropagation() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { @@ -300,10 +304,11 @@ public void testSeqNoCheckpoints() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { int numDocs = 0; @@ -365,15 +370,14 @@ public void testUpdateSnapshotStatus() throws Exception { // Create the repository before taking the snapshot. Request request = new Request("PUT", "/_snapshot/repo"); - request.setJsonEntity(Strings - .toString(JsonXContent.contentBuilder() + request.setJsonEntity(JsonXContent.contentBuilder() .startObject() .field("type", "fs") .startObject("settings") .field("compress", randomBoolean()) .field("location", System.getProperty("tests.path.repo")) .endObject() - .endObject())); + .endObject().toString()); assertOK(client().performRequest(request)); @@ -383,10 +387,11 @@ public void testUpdateSnapshotStatus() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-snapshot-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); indexDocs(index, 0, between(50, 100)); ensureGreen(index); assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); @@ -420,7 +425,8 @@ public void testSyncedFlushTransition() throws Exception { createIndex(index, Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes).build()); + .putList("index.sort.field", "sortfield") + .put("index.routing.allocation.include._name", newNodes).build(), TEST_MAPPING); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try (RestClient oldNodeClient = buildClient(restClientSettings(), @@ -665,4 +671,15 @@ public String toString() { '}'; } } + + private static String createTestMapping() { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }"; + } } diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java new file mode 100644 index 0000000000000..47e454a7549cb --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/PluginInfoIT.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.backwards; + +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.opensearch.test.rest.yaml.ObjectPath; + +import java.util.Map; + +public class PluginInfoIT extends OpenSearchRestTestCase { + public void testPluginInfoSerialization() throws Exception { + // Ensure all nodes are able to come up, validate with GET _nodes. + Response response = client().performRequest(new Request("GET", "_nodes")); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map<String, Object> nodeMap = objectPath.evaluate("nodes"); + assertEquals(4, nodeMap.keySet().size()); + } +} diff --git a/qa/multi-cluster-search/src/test/java/org/opensearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/opensearch/search/CCSDuelIT.java index 5f52250c7fb0c..3dc2444d8a16e 100644 --- a/qa/multi-cluster-search/src/test/java/org/opensearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/opensearch/search/CCSDuelIT.java @@ -36,7 +36,7 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -53,12 +53,12 @@ import org.opensearch.client.RestHighLevelClient; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.client.indices.CreateIndexResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -198,7 +198,7 @@ private static void indexDocuments(String idPrefix) throws IOException, Interrup createIndexRequest.mapping("{\"properties\":{" + "\"id\":{\"type\":\"keyword\"}," + "\"suggest\":{\"type\":\"completion\"}," + - "\"join\":{\"type\":\"join\", \"relations\": {\"question\":\"answer\"}}}}", XContentType.JSON); + "\"join\":{\"type\":\"join\", \"relations\": {\"question\":\"answer\"}}}}", MediaTypeRegistry.JSON); CreateIndexResponse createIndexResponse = restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); @@ -255,7 +255,7 @@ private static IndexRequest buildIndexRequest(String id, String type, String que if (questionId != null) { joinField.put("parent", questionId); } - indexRequest.source(XContentType.JSON, + indexRequest.source(MediaTypeRegistry.JSON, "id", id, "type", type, "votes", randomIntBetween(0, 30), @@ -726,7 +726,7 @@ public void testCompletionSuggester() throws Exception { sourceBuilder.suggest(suggestBuilder); duelSearch(searchRequest, response -> { assertMultiClusterSearchResponse(response); - assertEquals(Strings.toString(XContentType.JSON, response, true, true), 3, response.getSuggest().size()); + assertEquals(Strings.toString(MediaTypeRegistry.JSON, response, true, true), 3, response.getSuggest().size()); assertThat(response.getSuggest().getSuggestion("python").getEntries().size(), greaterThan(0)); assertThat(response.getSuggest().getSuggestion("java").getEntries().size(), greaterThan(0)); assertThat(response.getSuggest().getSuggestion("ruby").getEntries().size(), greaterThan(0)); @@ -827,8 +827,8 @@ private static void assertAggs(SearchResponse response) { @SuppressWarnings("unchecked") private static Map<String, Object> responseToMap(SearchResponse response) throws IOException { - BytesReference bytesReference = XContentHelper.toXContent(response, XContentType.JSON, false); - Map<String, Object> responseMap = XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2(); + BytesReference bytesReference = XContentHelper.toXContent(response, MediaTypeRegistry.JSON, false); + Map<String, Object> responseMap = org.opensearch.common.xcontent.XContentHelper.convertToMap(bytesReference, false, MediaTypeRegistry.JSON).v2(); assertNotNull(responseMap.put("took", -1)); responseMap.remove("num_reduce_phases"); Map<String, Object> profile = (Map<String, Object>)responseMap.get("profile"); diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java index c3c332aecfd4c..8ca90791f649e 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java @@ -65,7 +65,7 @@ /** * Create a simple "daemon controller", put it in the right place and check that it runs. - * + * <p> * Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and * that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other * tests that extend OpenSearchTestCase for the same reason. diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 66c6525439dac..082ed5277575a 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() +tasks.thirdPartyAudit.ignoreViolations( + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' +) + tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/DebMetadataTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/DebMetadataTests.java index 299f23e83db41..0db5aec7b1d0e 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/DebMetadataTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/DebMetadataTests.java @@ -32,7 +32,6 @@ package org.opensearch.packaging.test; -import junit.framework.TestCase; import org.opensearch.packaging.util.Distribution; import org.opensearch.packaging.util.FileUtils; import org.opensearch.packaging.util.Shell; @@ -40,6 +39,8 @@ import java.util.regex.Pattern; +import junit.framework.TestCase; + import static org.opensearch.packaging.util.FileUtils.getDistributionFile; import static org.junit.Assume.assumeTrue; diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/DockerTests.java index 725e430b3634c..be9741e660223 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/DockerTests.java @@ -33,6 +33,7 @@ package org.opensearch.packaging.test; import com.fasterxml.jackson.databind.JsonNode; + import org.opensearch.packaging.util.Installation; import org.opensearch.packaging.util.Platforms; import org.opensearch.packaging.util.ServerUtils; @@ -43,9 +44,13 @@ import org.junit.BeforeClass; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -85,11 +90,6 @@ import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; -import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.HashSet; - public class DockerTests extends PackagingTestCase { private Path tempDir; diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/KeystoreManagementTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/KeystoreManagementTests.java index fee3b0bc501d9..4970a874ffb1a 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/KeystoreManagementTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/KeystoreManagementTests.java @@ -52,7 +52,6 @@ import java.util.List; import java.util.Map; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static java.util.Collections.singletonList; import static org.opensearch.packaging.util.Archives.ARCHIVE_OWNER; import static org.opensearch.packaging.util.Archives.installArchive; @@ -76,6 +75,7 @@ import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.startsWith; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java index 7551bddc3226c..4bb3877fc04a8 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java @@ -39,6 +39,7 @@ import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; import com.carrotsearch.randomizedtesting.annotations.Timeout; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.CheckedConsumer; @@ -440,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException { /** * Run the given action with a temporary copy of the config directory. - * + * <p> * Files under the path passed to the action may be modified as necessary for the * test to execute, and running OpenSearch with {@link #startOpenSearch()} will * use the temporary directory. diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/RpmMetadataTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/RpmMetadataTests.java index 5eb8ffe8817ee..f95a79efa51ef 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/RpmMetadataTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/RpmMetadataTests.java @@ -32,7 +32,6 @@ package org.opensearch.packaging.test; -import junit.framework.TestCase; import org.opensearch.packaging.util.Distribution; import org.opensearch.packaging.util.Platforms; import org.opensearch.packaging.util.Shell; @@ -40,6 +39,8 @@ import java.util.regex.Pattern; +import junit.framework.TestCase; + import static org.opensearch.packaging.util.FileUtils.getDistributionFile; import static org.junit.Assume.assumeTrue; diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java b/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java index 57ea853e735a9..f4bb9d70354a0 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/WindowsServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.packaging.test; -import junit.framework.TestCase; import org.opensearch.packaging.util.FileUtils; import org.opensearch.packaging.util.Platforms; import org.opensearch.packaging.util.ServerUtils; @@ -46,13 +45,15 @@ import java.nio.file.Path; import java.util.Arrays; -import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeTrue; +import junit.framework.TestCase; + import static org.opensearch.packaging.util.Archives.installArchive; import static org.opensearch.packaging.util.Archives.verifyArchiveInstallation; import static org.opensearch.packaging.util.FileUtils.append; import static org.opensearch.packaging.util.FileUtils.mv; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeTrue; public class WindowsServiceTests extends PackagingTestCase { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Docker.java b/qa/os/src/test/java/org/opensearch/packaging/util/Docker.java index ef5b1e590635a..6517062c0ca50 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Docker.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Docker.java @@ -34,6 +34,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.client.fluent.Request; diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java index 7904d1a046916..958de24848178 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java @@ -51,7 +51,7 @@ /** * Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission * set is what we expect. - * + * <p> * This class saves information about its failed matches in instance variables and so instances should not be reused */ public class FileMatcher extends TypeSafeMatcher<Path> { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java index 25cefa948ff10..26af39d66cad3 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java @@ -137,7 +137,7 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { /** * Returns the user that owns this installation. - * + * <p> * For packages this is root, and for archives it is the user doing the installation. */ public String getOwner() { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java index d92feec21daaf..42eac9fdf4961 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/ServerUtils.java @@ -52,6 +52,7 @@ import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.TrustManagerFactory; + import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java index dbea8db1a12fa..c38fcc468c673 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/RemoteClustersIT.java @@ -42,11 +42,13 @@ import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.indices.CreateIndexRequest; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.util.concurrent.TimeUnit; public class RemoteClustersIT extends AbstractMultiClusterRemoteTestCase { @@ -112,7 +114,7 @@ public void testSniffModeConnectionFails() throws IOException { assertFalse(rci.isConnected()); } - public void testHAProxyModeConnectionWorks() throws IOException { + public void testHAProxyModeConnectionWorks() throws Exception { String proxyAddress = "haproxy:9600"; logger.info("Configuring remote cluster [{}]", proxyAddress); ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest().persistentSettings(Settings.builder() @@ -121,12 +123,14 @@ public void testHAProxyModeConnectionWorks() throws IOException { .build()); assertTrue(cluster1Client().cluster().putSettings(request, RequestOptions.DEFAULT).isAcknowledged()); - RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); - logger.info("Connection info: {}", rci); - if (!rci.isConnected()) { - logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); - } - assertTrue(rci.isConnected()); + assertBusy(() -> { + RemoteConnectionInfo rci = cluster1Client().cluster().remoteInfo(new RemoteInfoRequest(), RequestOptions.DEFAULT).getInfos().get(0); + logger.info("Connection info: {}", rci); + if (!rci.isConnected()) { + logger.info("Cluster health: {}", cluster1Client().cluster().health(new ClusterHealthRequest(), RequestOptions.DEFAULT)); + } + assertTrue(rci.isConnected()); + }, 10, TimeUnit.SECONDS); assertEquals(2L, cluster1Client().search( new SearchRequest("haproxynosn:test2"), RequestOptions.DEFAULT).getHits().getTotalHits().value); diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 3dff452be855f..777377f04e8b9 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -62,6 +62,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'http.content_type.required', 'true' + systemProperty 'opensearch.experimental.optimize_doc_id_lookup.fuzzy_set.enabled', 'true' } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index b60ee09d39048..8e8734b5d62b3 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -40,6 +40,7 @@ import org.opensearch.common.Booleans; import org.opensearch.common.io.Streams; import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.EngineConfig; import org.opensearch.indices.replication.common.ReplicationType; @@ -48,12 +49,14 @@ import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.opensearch.test.OpenSearchIntegTestCase.CODECS; /** * Basic test that indexed documents survive the rolling restart. See @@ -95,22 +98,33 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun // Verify segment store assertBusy(() -> { - /** - * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by - * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging - * to primary while remaining *replicaCount* records belongs to replica copies - * */ + /* + Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + to primary while remaining *replicaCount* records belongs to replica copies + */ Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); Response segrepStatsResponse = client().performRequest(segrepStatsRequest); - logger.info("--> _cat/segments response\n {}", EntityUtils.toString(segrepStatsResponse.getEntity())); List<String> responseList = Streams.readAllLines(segrepStatsResponse.getEntity().getContent()); - for (int segmentsIndex=0; segmentsIndex < responseList.size();) { - String[] primaryRow = responseList.get(segmentsIndex++).split(" +"); + logger.info("--> _cat/segments response\n {}", responseList.toString().replace(',', '\n')); + // Filter response for rows with zero doc count + List<String> filteredList = new ArrayList<>(); + for(String row: responseList) { + String count = row.split(" +")[4]; + if (count.equals("0") == false) { + filteredList.add(row); + } + } + // Ensure there is result for replica copies before processing the result. This results in retry when there + // are not enough number of rows vs failing with IndexOutOfBoundsException + assertEquals(0, filteredList.size() % (replicaCount + 1)); + for (int segmentsIndex=0; segmentsIndex < filteredList.size();) { + String[] primaryRow = filteredList.get(segmentsIndex++).split(" +"); String shardId = primaryRow[0] + primaryRow[1]; assertTrue(primaryRow[2].equals("p")); for(int replicaIndex = 1; replicaIndex <= replicaCount; replicaIndex++) { - String[] replicaRow = responseList.get(segmentsIndex).split(" +"); + String[] replicaRow = filteredList.get(segmentsIndex).split(" +"); String replicaShardId = replicaRow[0] + replicaRow[1]; // When segment has 0 doc count, not all replica copies posses that segment. Skip to next segment if (replicaRow[2].equals("p")) { @@ -154,7 +168,7 @@ private void verifySegmentStats(String indexName) throws Exception { }, 1, TimeUnit.MINUTES); } - public void testIndexing() throws IOException, ParseException { + public void testIndexing() throws Exception { switch (CLUSTER_TYPE) { case OLD: break; @@ -245,9 +259,9 @@ public void testIndexing() throws IOException, ParseException { * This test verifies that during rolling upgrades the segment replication does not break when replica shards can * be running on older codec versions. * - * @throws Exception + * @throws Exception if index creation fail + * @throws UnsupportedOperationException if cluster type is unknown */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8322") public void testIndexingWithSegRep() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { logger.info("--> Skip test for version {} where segment replication feature is not available", UPGRADE_FROM_VERSION); @@ -267,7 +281,11 @@ public void testIndexingWithSegRep() throws Exception { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put( EngineConfig.INDEX_CODEC_SETTING.getKey(), - randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC, CodecService.LUCENE_DEFAULT_CODEC) + randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + }) ) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); createIndex(indexName, settings.build()); @@ -326,6 +344,88 @@ public void testIndexingWithSegRep() throws Exception { } } + public void testIndexingWithFuzzyFilterPostings() throws Exception { + if (UPGRADE_FROM_VERSION.onOrBefore(Version.V_2_11_1)) { + logger.info("--> Skip test for version {} where fuzzy filter postings format feature is not available", UPGRADE_FROM_VERSION); + return; + } + final String indexName = "test-index-fuzzy-set"; + final int shardCount = 3; + final int replicaCount = 1; + logger.info("--> Case {}", CLUSTER_TYPE); + printClusterNodes(); + logger.info("--> _cat/shards before test execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); + switch (CLUSTER_TYPE) { + case OLD: + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), replicaCount) + .put( + EngineConfig.INDEX_CODEC_SETTING.getKey(), + randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + }) + ) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); + createIndex(indexName, settings.build()); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + bulk(indexName, "_OLD", 5); + break; + case MIXED: + waitForClusterHealthWithNoShardMigration(indexName, "yellow"); + break; + case UPGRADED: + Settings.Builder settingsBuilder = Settings.builder() + .put(IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + updateIndexSettings(indexName, settingsBuilder); + waitForClusterHealthWithNoShardMigration(indexName, "green"); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount(indexName, expectedCount); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk(indexName, "_" + CLUSTER_TYPE, 5); + logger.info("--> Index one doc (to be deleted next) and verify doc count"); + Request toBeDeleted = new Request("PUT", "/" + indexName + "/_doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount(indexName, expectedCount + 6); + + logger.info("--> Delete previously added doc and verify doc count"); + Request delete = new Request("DELETE", "/" + indexName + "/_doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + assertCount(indexName, expectedCount + 5); + + //forceMergeAndVerify(indexName, shardCount * (1 + replicaCount)); + } + } + public void testAutoIdWithOpTypeCreate() throws IOException { final String indexName = "auto_id_and_op_type_create_index"; StringBuilder b = new StringBuilder(); @@ -382,12 +482,14 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio client().performRequest(bulk); } - private void assertCount(String index, int count) throws IOException, ParseException { - Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); - searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - searchTestIndexRequest.addParameter("filter_path", "hits.total"); - Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); - assertEquals("{\"hits\":{\"total\":" + count + "}}", + private void assertCount(String index, int count) throws Exception { + assertBusy(() -> { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + }, 30, TimeUnit.SECONDS); } } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index 3dd9f371f06fd..6c7cea5e3af93 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -40,11 +40,12 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.Booleans; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.core.rest.RestStatus; @@ -733,7 +734,7 @@ public void testSoftDeletesDisabledWarning() throws Exception { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), softDeletesEnabled); } Request request = new Request("PUT", "/" + indexName); - request.setJsonEntity("{\"settings\": " + Strings.toString(XContentType.JSON, settings.build()) + "}"); + request.setJsonEntity("{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings.build()) + "}"); if (softDeletesEnabled == false) { expectSoftDeletesWarning(request, indexName); } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..e669016cad98a --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_geoshape.yml @@ -0,0 +1,16 @@ +--- +"Insert Document with geoshape field": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..30a39447905c0 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_geoshape.yml @@ -0,0 +1,28 @@ +--- +"Create index with Geoshape field": + - do: + indices.create: + index: geo_shape_index_old + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml new file mode 100644 index 0000000000000..4c7b12a7f1909 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_geoshape.yml @@ -0,0 +1,61 @@ +--- +"Validate we are able to index documents after upgrade": + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_old", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_old", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_old + - match: { hits.total: 2 } + + +--- +"Create index with Geoshape field in new cluster": + - do: + indices.create: + index: geo_shape_index_new + body: + settings: + index: + number_of_replicas: 2 + mappings: + "properties": + "location": + "type": "geo_shape" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "geo_shape_index_new", "_id":191}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [101.0, 0.0] ]}}' + - '{"index": {"_index": "geo_shape_index_new", "_id":219}}' + - '{"name": "NEMO Science Museum","location": {"type": "envelope","coordinates": [ [100.0, 1.0], [106.0, 0.0] ]}}' + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: geo_shape_index_new + body: + aggregations: + myaggregation: + geo_bounds: + field: "location" + - match: { hits.total: 2 } + - match: { aggregations.myaggregation.bounds.top_left.lat: 0.9999999823048711 } + - match: { aggregations.myaggregation.bounds.top_left.lon: 99.99999999068677 } + - match: { aggregations.myaggregation.bounds.bottom_right.lat: 0.0 } + - match: { aggregations.myaggregation.bounds.bottom_right.lon: 105.99999996833503 } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java index 4a898d816bbf4..8e7fee1e4f523 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java @@ -48,8 +48,9 @@ import org.opensearch.client.Response; import org.opensearch.client.ResponseListener; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; import org.opensearch.script.MockScriptPlugin; @@ -59,7 +60,7 @@ import org.opensearch.search.lookup.LeafFieldsLookup; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.transport.TransportService; @@ -83,7 +84,6 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.awaitLatch; public class SearchRestCancellationIT extends HttpSmokeTestCase { @@ -99,12 +99,12 @@ public void testAutomaticCancellationDuringQueryPhase() throws Exception { Request searchRequest = new Request("GET", "/test/_search"); SearchSourceBuilder searchSource = new SearchSourceBuilder().query(scriptQuery( new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))); - searchRequest.setJsonEntity(Strings.toString(XContentType.JSON, searchSource)); + searchRequest.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, searchSource)); verifyCancellationDuringQueryPhase(SearchAction.NAME, searchRequest); } public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Exception { - XContentType contentType = XContentType.JSON; + MediaType contentType = MediaTypeRegistry.JSON; MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add(new SearchRequest("test") .source(new SearchSourceBuilder().scriptField("test_field", new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); @@ -148,12 +148,12 @@ public void testAutomaticCancellationDuringFetchPhase() throws Exception { Request searchRequest = new Request("GET", "/test/_search"); SearchSourceBuilder searchSource = new SearchSourceBuilder().scriptField("test_field", new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())); - searchRequest.setJsonEntity(Strings.toString(XContentType.JSON, searchSource)); + searchRequest.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, searchSource)); verifyCancellationDuringFetchPhase(SearchAction.NAME, searchRequest); } public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Exception { - XContentType contentType = XContentType.JSON; + MediaType contentType = MediaTypeRegistry.JSON; MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add(new SearchRequest("test") .source(new SearchSourceBuilder().scriptField("test_field", new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); @@ -299,7 +299,7 @@ public Map<String, Function<Map<String, Object>, Object>> pluginScripts() { } } - private static ContentType createContentType(final XContentType xContentType) { - return ContentType.create(xContentType.mediaTypeWithoutParameters(), (Charset) null); + private static ContentType createContentType(final MediaType mediaType) { + return ContentType.create(mediaType.mediaTypeWithoutParameters(), (Charset) null); } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index e6a2a3d52e116..c043015281a9a 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -208,7 +208,7 @@ id: 1 - length: { _source: 2 } - match: { _source.do_nothing: "foo" } - - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] not present as part of path [field_to_remove]" } + - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] doesn't exist" } --- "Test rolling up json object arrays": diff --git a/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java b/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java index 0dc62b160ff3f..5f0f468898c47 100644 --- a/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java +++ b/qa/translog-policy/src/test/java/org/opensearch/upgrades/TranslogPolicyIT.java @@ -35,7 +35,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.client.Request; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexSettings; diff --git a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index d14c834405f32..8a6e5d62112c8 100644 --- a/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/opensearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -61,6 +61,7 @@ protected Matcher<String> nodeNameMatcher() { return equalTo(HOSTNAME); } + @SuppressWarnings("removal") @Override protected BufferedReader openReader(Path logFile) { return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> { diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java index 432b40367c978..d2bc0a81768d7 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientEmployeeResource.java @@ -32,15 +32,6 @@ package org.opensearch.wildfly.transport; -import jakarta.inject.Inject; -import jakarta.ws.rs.Consumes; -import jakarta.ws.rs.GET; -import jakarta.ws.rs.PUT; -import jakarta.ws.rs.Path; -import jakarta.ws.rs.PathParam; -import jakarta.ws.rs.Produces; -import jakarta.ws.rs.core.MediaType; -import jakarta.ws.rs.core.Response; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.action.index.IndexRequest; @@ -57,6 +48,16 @@ import java.util.Map; import java.util.Objects; +import jakarta.inject.Inject; +import jakarta.ws.rs.Consumes; +import jakarta.ws.rs.GET; +import jakarta.ws.rs.PUT; +import jakarta.ws.rs.Path; +import jakarta.ws.rs.PathParam; +import jakarta.ws.rs.Produces; +import jakarta.ws.rs.core.MediaType; +import jakarta.ws.rs.core.Response; + import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @Path("/employees") diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index 490ecd214c3f3..74d321ef4a7c5 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,7 +32,6 @@ package org.opensearch.wildfly.transport; -import jakarta.enterprise.inject.Produces; import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; @@ -42,6 +41,8 @@ import java.net.URISyntaxException; import java.nio.file.Path; +import jakarta.enterprise.inject.Produces; + @SuppressWarnings("unused") public final class RestHighLevelClientProducer { diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java index 7989f0351daef..7f27e69813971 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelJacksonJsonProvider.java @@ -32,9 +32,8 @@ package org.opensearch.wildfly.transport; -import org.jboss.resteasy.plugins.providers.jackson.ResteasyJackson2Provider; - import jakarta.ws.rs.ext.Provider; +import org.jboss.resteasy.plugins.providers.jackson.ResteasyJackson2Provider; @Provider public class RestHighLevelJacksonJsonProvider extends ResteasyJackson2Provider { diff --git a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java index fdb5438a9be26..faba7c4ac6b21 100644 --- a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java @@ -46,10 +46,10 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestRuleLimitSysouts; import org.opensearch.cluster.ClusterModule; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.net.URI; diff --git a/release-notes/opensearch.release-notes-1.3.12.md b/release-notes/opensearch.release-notes-1.3.12.md new file mode 100644 index 0000000000000..88551642e3c6d --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.12.md @@ -0,0 +1,8 @@ +## 2023-08-09 Version 1.3.12 Release Notes + +### Upgrades +- Upgrade `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Upgrade `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Upgrade `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Upgrade `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) +- Upgrade bundled OpenJDK (July 2023 Patch releases) ([#8872](https://github.com/opensearch-project/OpenSearch/pull/8872)) diff --git a/release-notes/opensearch.release-notes-1.3.13.md b/release-notes/opensearch.release-notes-1.3.13.md new file mode 100644 index 0000000000000..3ece2c8f91984 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.13.md @@ -0,0 +1,7 @@ +## 2023-09-14 Version 1.3.13 Release Notes + +### Upgrades +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) diff --git a/release-notes/opensearch.release-notes-1.3.14.md b/release-notes/opensearch.release-notes-1.3.14.md new file mode 100644 index 0000000000000..319f5a79781c7 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.14.md @@ -0,0 +1,18 @@ +## 2023-12-12 Version 1.3.14 Release Notes + +### Upgrades +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) +- Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) +- Bump `org.apache.zookeeper:zookeper` from 3.8.0 to 3.8.3 ([#11476](https://github.com/opensearch-project/OpenSearch/pull/11476)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.avro:avro` from 1.10.2 to 1.11.3 ([#11502](https://github.com/opensearch-project/OpenSearch/pull/11502)) +- Bump `jetty` from 9.4.51.v20230217 to 9.4.52.v20230823 ([#11501](https://github.com/opensearch-project/OpenSearch/pull/11501)) +- Bump `io.projectreactor:reactor-core` from 3.4.23 to 3.4.34 and reactor-netty from 1.0.24 to 1.0.39 ([#11500](https://github.com/opensearch-project/OpenSearch/pull/11500)) +- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) +- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) + +### Bug Fixes +- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) diff --git a/release-notes/opensearch.release-notes-1.3.15.md b/release-notes/opensearch.release-notes-1.3.15.md new file mode 100644 index 0000000000000..a5b446ad1ec49 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.15.md @@ -0,0 +1,5 @@ +## 2024-03-01 Version 1.3.15 Release Notes + +### Upgrades +- Bump `netty` from 4.1.100.Final to 4.1.107.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)), [#12034](https://github.com/opensearch-project/OpenSearch/pull/12034), [#12372](https://github.com/opensearch-project/OpenSearch/pull/12372)) +- Bump `org.apache.kerby:kerb-admin` from 1.0.1 to 2.0.3 ([#12194](https://github.com/opensearch-project/OpenSearch/pull/12194)) diff --git a/release-notes/opensearch.release-notes-2.10.0.md b/release-notes/opensearch.release-notes-2.10.0.md new file mode 100644 index 0000000000000..9d5f75d61ee2a --- /dev/null +++ b/release-notes/opensearch.release-notes-2.10.0.md @@ -0,0 +1,136 @@ +## 2023-09-08 Version 2.10.0 Release Notes + +## [2.10] + +### Added +- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) +- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) +- Introduce new static cluster setting to control slice computation for concurrent segment search. ([#8847](https://github.com/opensearch-project/OpenSearch/pull/8884)) +- Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) +- Disallow compression level to be set for default and best_compression index codecs ([#8737]()https://github.com/opensearch-project/OpenSearch/pull/8737) +- [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195]()https://github.com/opensearch-project/OpenSearch/pull/8195) +- Prioritize replica shard movement during shard relocation ([#8875](https://github.com/opensearch-project/OpenSearch/pull/8875)) +- Introducing Default and Best Compression codecs as their algorithm name ([#9123](https://github.com/opensearch-project/OpenSearch/pull/9123)) +- Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122](https://github.com/opensearch-project/OpenSearch/pull/9122)) +- [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) +- [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) +- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) +- [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) +- Allow test clusters to run with TLS ([#8900](https://github.com/opensearch-project/OpenSearch/pull/8900)) +- Add jdk.incubator.vector module support for JDK 20+ ([#8601](https://github.com/opensearch-project/OpenSearch/pull/8601)) +- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) +- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 1) ([#9520](https://github.com/opensearch-project/OpenSearch/pull/9520)) +- Add support for extensions to search responses using SearchExtBuilder ([#9379](https://github.com/opensearch-project/OpenSearch/pull/9379)) +- [Remote State] Create service to publish cluster state to remote store ([#9160](https://github.com/opensearch-project/OpenSearch/pull/9160)) +- Core crypto library to perform encryption and decryption of source content ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) +- Expose DelimitedTermFrequencyTokenFilter to allow providing term frequencies along with terms ([#9479](https://github.com/opensearch-project/OpenSearch/pull/9479)) +- APIs for performing async blob reads and async downloads from the repository using multiple streams ([#9592](https://github.com/opensearch-project/OpenSearch/issues/9592)) +- Add concurrent segment search related metrics to node and index stats ([#9622](https://github.com/opensearch-project/OpenSearch/issues/9622)) +- Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670)) +- Introduce cluster default remote translog buffer interval setting ([#9584](https://github.com/opensearch-project/OpenSearch/pull/9584)) +- Added encryption-sdk lib to provide encryption and decryption capabilities ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466) [#9289](https://github.com/opensearch-project/OpenSearch/pull/9289)) +- [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) +- Added crypto-kms plugin to provide AWS KMS based key providers for encryption/decryption. ([#8465](https://github.com/opensearch-project/OpenSearch/pull/8465)) +- [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) +- [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) +- [Remote state] Auto restore index metadata from last known cluster state ([#9831](https://github.com/opensearch-project/OpenSearch/pull/9831)) + +### Dependencies +- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) +- Bump `io.grpc:grpc-context` from 1.46.0 to 1.57.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726), [#9145](https://github.com/opensearch-project/OpenSearch/pull/9145)) +- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) +- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) +- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.9.0 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844), [#9146](https://github.com/opensearch-project/OpenSearch/pull/9146)) +- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) +- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) +- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) +- OpenJDK Update (July 2023 Patch releases) ([#8869](https://github.com/opensearch-project/OpenSearch/pull/8869)) +- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) +- Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) +- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) +- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) +- Bump `com.google.jimfs:jimfs` from 1.2 to 1.3.0 ([#9080](https://github.com/opensearch-project/OpenSearch/pull/9080)) +- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.8 to 1.1.9 ([#9147](https://github.com/opensearch-project/OpenSearch/pull/9147)) +- Bump `org.apache.maven:maven-model` from 3.9.3 to 3.9.4 ([#9148](https://github.com/opensearch-project/OpenSearch/pull/9148)) +- Bump `com.azure:azure-storage-blob` from 12.22.3 to 12.23.0 ([#9231](https://github.com/opensearch-project/OpenSearch/pull/9231)) +- Bump `com.diffplug.spotless` from 6.19.0 to 6.20.0 ([#9227](https://github.com/opensearch-project/OpenSearch/pull/9227)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.google.code.gson:gson` from 2.9.0 to 2.10.1 ([#9230](https://github.com/opensearch-project/OpenSearch/pull/9230)) +- Bump `lycheeverse/lychee-action` from 1.2.0 to 1.8.0 ([#9228](https://github.com/opensearch-project/OpenSearch/pull/9228)) +- Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) +- Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) +- Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) +- Bump `io.grpc:grpc-api` from 1.57.1 to 1.57.2 ([#9578](https://github.com/opensearch-project/OpenSearch/pull/9578)) +- Add Encryption SDK dependencies ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) + +### Changed +- Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) +- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) +- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) +- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) +- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) +- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) +- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) +- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) +- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) +- Performance improvements for BytesRefHash ([#8788](https://github.com/opensearch-project/OpenSearch/pull/8788)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) +- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) +- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) +- Removed blocking wait in TransportGetSnapshotsAction which was exhausting generic threadpool ([#8377](https://github.com/opensearch-project/OpenSearch/pull/8377)) +- Adds support for tracing runnable scenarios ([#8831](https://github.com/opensearch-project/OpenSearch/pull/8831)) +- Change shard_size and shard_min_doc_count evaluation to happen in shard level reduce phase ([#9085](https://github.com/opensearch-project/OpenSearch/pull/9085)) +- Add attributes to startSpan methods ([#9199](https://github.com/opensearch-project/OpenSearch/pull/9199)) +- [Refactor] Task foundation classes to core library - pt 1 ([#9082](https://github.com/opensearch-project/OpenSearch/pull/9082)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Add base class for parameterizing the search based tests #9083 ([#9083](https://github.com/opensearch-project/OpenSearch/pull/9083)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) +- Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) +- Refactor Compressors from CompressorFactory to CompressorRegistry for extensibility ([#9262](https://github.com/opensearch-project/OpenSearch/pull/9262)) +- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Add support to use trace propagated from client ([#9506](https://github.com/opensearch-project/OpenSearch/pull/9506)) +- Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Redefine telemetry context restoration and propagation ([#9617](https://github.com/opensearch-project/OpenSearch/pull/9617)) +- Use non-concurrent path for sort request on timeseries index and field([#9562](https://github.com/opensearch-project/OpenSearch/pull/9562)) +- Added sampler based on `Blanket Probabilistic Sampling rate` and `Override for on demand` ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Decouple replication lag from logic to fail stale replicas ([#9507](https://github.com/opensearch-project/OpenSearch/pull/9507)) +- Improve performance of rounding dates in date_histogram aggregation ([#9727](https://github.com/opensearch-project/OpenSearch/pull/9727)) +- [Remote Store] Add support for Remote Translog Store stats in `_remotestore/stats/` API ([#9263](https://github.com/opensearch-project/OpenSearch/pull/9263)) +- Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) +- Cleanup Unreferenced file on segment merge failure ([#9503](https://github.com/opensearch-project/OpenSearch/pull/9503)) +- Move zstd compression codec to external custom-codecs repository ([#9422](https://github.com/opensearch-project/OpenSearch/issues/9422]) +- [Remote Store] Add support for Remote Translog Store upload stats in `_nodes/stats/` API ([#8908](https://github.com/opensearch-project/OpenSearch/pull/8908)) +- [Remote Store] Removing feature flag to mark feature GA ([#9761](https://github.com/opensearch-project/OpenSearch/pull/9761)) + +### Removed +- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) + +### Fixed +- Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) +- Fix null_pointer_exception when creating or updating ingest pipeline ([#9259](https://github.com/opensearch-project/OpenSearch/pull/9259)) +- Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) +- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) +- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) +- Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) +- [Segment Replication] Fixed bug where replica shard temporarily serves stale data during an engine reset ([#9495](https://github.com/opensearch-project/OpenSearch/pull/9495)) +- Disable shard/segment level search_after short cutting if track_total_hits != false ([#9683](https://github.com/opensearch-project/OpenSearch/pull/9683)) +- [Segment Replication] Fixed bug where bytes behind metric is not accurate ([#9686](https://github.com/opensearch-project/OpenSearch/pull/9686)) diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..040cc053469ed --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,68 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Add parallel file download support for remote store based replication ([#8596](https://github.com/opensearch-project/OpenSearch/pull/8596)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) diff --git a/release-notes/opensearch.release-notes-2.11.1.md b/release-notes/opensearch.release-notes-2.11.1.md new file mode 100644 index 0000000000000..06613558de177 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.1.md @@ -0,0 +1,10 @@ +## 2023-11-20 Version 2.11.1 Release Notes + +## [2.11.1] + +### Changed +- Use iterative approach to evaluate Regex.simpleMatch ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060)) + +### Fixed +- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) +- Adding version condition while adding geoshape doc values to the index, to ensure backward compatibility.([#11095](https://github.com/opensearch-project/OpenSearch/pull/11095)) diff --git a/release-notes/opensearch.release-notes-2.12.0.md b/release-notes/opensearch.release-notes-2.12.0.md new file mode 100644 index 0000000000000..49955c1f969f0 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.12.0.md @@ -0,0 +1,175 @@ +## 2024-02-09 Version 2.12.0 Release Notes + +## [2.12.0] +### Added +- [Admission control] Add Resource usage collector service and resource usage tracker ([#9890](https://github.com/opensearch-project/OpenSearch/pull/9890)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10541](https://github.com/opensearch-project/OpenSearch/pull/10541)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Search Pipelines] Add request-scoped state shared between processors (and three new processors) ([#9405](https://github.com/opensearch-project/OpenSearch/pull/9405)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967), [#11983](https://github.com/opensearch-project/OpenSearch/pull/11983)) +- [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) +- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Enable must_exist parameter for update aliases API ([#11210](https://github.com/opensearch-project/OpenSearch/pull/11210)) +- Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) +- Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) +- Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) +- Allowing pipeline processors to access index mapping info by passing ingest service ref as part of the processor factory parameters ([#10307](https://github.com/opensearch-project/OpenSearch/pull/10307)) +- Add experimental SIMD implementation of B-tree to round down dates ([#11194](https://github.com/opensearch-project/OpenSearch/issues/11194)) +- Make number of segment metadata files in remote segment store configurable ([#11329](https://github.com/opensearch-project/OpenSearch/pull/11329)) +- Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) +- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) +- [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) +- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) +- Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) +- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) +- Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) +- Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) +- Add remove_by_pattern ingest processor ([#11920](https://github.com/opensearch-project/OpenSearch/pull/11920)) +- Support index level allocation filtering for searchable snapshot index ([#11563](https://github.com/opensearch-project/OpenSearch/pull/11563)) +- Add `org.opensearch.rest.MethodHandlers` and `RestController#getAllHandlers` ([11876](https://github.com/opensearch-project/OpenSearch/pull/11876)) +- New DateTime format for RFC3339 compatible date fields ([#11465](https://github.com/opensearch-project/OpenSearch/pull/11465)) +- Add community_id ingest processor ([#12121](https://github.com/opensearch-project/OpenSearch/pull/12121)) +- Add support for Google Application Default Credentials in repository-gcs ([#8394](https://github.com/opensearch-project/OpenSearch/pull/8394)) +- Remove concurrent segment search feature flag for GA launch ([#12074](https://github.com/opensearch-project/OpenSearch/pull/12074)) +- Enable Fuzzy codec for doc id fields using a bloom filter ([#11022](https://github.com/opensearch-project/OpenSearch/pull/11022)) +- [Metrics Framework] Adds support for Histogram metric ([#12062](https://github.com/opensearch-project/OpenSearch/pull/12062)) + +### Dependencies +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.8.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630), [#12167](https://github.com/opensearch-project/OpenSearch/pull/12167)) +- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) +- Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) +- Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) +- Bump `actions/github-script` from 6 to 7.0.1 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271), [#12166](https://github.com/opensearch-project/OpenSearch/pull/12166)) +- Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) +- Bump `netty` from 4.1.100.Final to 4.1.106.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)), [#12034](https://github.com/opensearch-project/OpenSearch/pull/12034)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) +- Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) +- Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) +- Bump `reactor-netty-core` from 1.1.12 to 1.1.15 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)), ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.2 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629), [#12056](https://github.com/opensearch-project/OpenSearch/pull/12056)) +- Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) +- Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) +- Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.1.1 to 5.2.0 ([#11448](https://github.com/opensearch-project/OpenSearch/pull/11448)) +- Bump `logback-core` and `logback-classic` to 1.2.13 ([#11521](https://github.com/opensearch-project/OpenSearch/pull/11521)) +- Bumps `jetty` version from 9.4.52.v20230823 to 9.4.53.v20231009 ([#11539](https://github.com/opensearch-project/OpenSearch/pull/11539)) +- Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) +- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) +- Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) +- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) +- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) +- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) +- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) +- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) +- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) +- Bump `Lucene` from 9.8.0 to 9.9.2 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)), ([#12063](https://github.com/opensearch-project/OpenSearch/pull/12063)) +- Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.2.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886), [#11963](https://github.com/opensearch-project/OpenSearch/pull/11963)) +- Bump `com.google.api:gax-httpjson` from 0.103.1 to 2.42.0 ([#11794](https://github.com/opensearch-project/OpenSearch/pull/11794), [#12165](https://github.com/opensearch-project/OpenSearch/pull/12165)) +- Bump `com.google.oauth-client:google-oauth-client` from 1.34.1 to 1.35.0 ([#11960](https://github.com/opensearch-project/OpenSearch/pull/11960)) +- Bump `com.diffplug.spotless` from 6.23.2 to 6.25.0 ([#11962](https://github.com/opensearch-project/OpenSearch/pull/11962), [#12055](https://github.com/opensearch-project/OpenSearch/pull/12055)) +- Bump `com.google.cloud:google-cloud-core` from 2.5.10 to 2.30.0 ([#11961](https://github.com/opensearch-project/OpenSearch/pull/11961)) +- Bump `reactor-core` from 3.5.11 to 3.5.14 ([#12042](https://github.com/opensearch-project/OpenSearch/pull/12042)) +- Bump `org.apache.shiro:shiro-core` from 1.11.0 to 1.13.0 ([#12200](https://github.com/opensearch-project/OpenSearch/pull/12200)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.3 to 1.44.1 ([#12059](https://github.com/opensearch-project/OpenSearch/pull/12059)) +- Bump `peter-evans/create-issue-from-file` from 4 to 5 ([#12057](https://github.com/opensearch-project/OpenSearch/pull/12057)) +- Bump `org.gradle.test-retry` from 1.5.4 to 1.5.8 ([#12168](https://github.com/opensearch-project/OpenSearch/pull/12168)) +- Bump `org.apache.kerby:kerb-admin` from 1.0.1 to 2.0.3 ([#12194](https://github.com/opensearch-project/OpenSearch/pull/12194)) + +### Changed +- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) +- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) +- Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Apply the fast filter optimization to composite aggregation of date histogram source ([#11505](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) +- Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) +- Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) +- Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) +- Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) +- Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) +- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) +- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) +- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) +- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) +- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) +- Extract cluster management for integration tests into JUnit test rule out of OpenSearchIntegTestCase ([#11877](https://github.com/opensearch-project/OpenSearch/pull/11877)), ([#12000](https://github.com/opensearch-project/OpenSearch/pull/12000)) +- Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2 ([#11968](https://github.com/opensearch-project/OpenSearch/pull/11968)) +- Updates IpField to be searchable when only `doc_values` are enabled ([#11508](https://github.com/opensearch-project/OpenSearch/pull/11508)) +- [Query Insights] Query Insights Framework which currently supports retrieving the most time-consuming queries within the last configured time window ([#11903](https://github.com/opensearch-project/OpenSearch/pull/11903)) +- [Query Insights] Implement Top N Queries feature to collect and gather information about high latency queries in a window ([#11904](https://github.com/opensearch-project/OpenSearch/pull/11904)) +- Add override support for sampling based on action ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Added custom sampler support based on transport action in request ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Disable concurrent search for composite aggregation([#12375](https://github.com/opensearch-project/OpenSearch/pull/12375)) + +### Removed +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) + +### Fixed +- Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) +- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) +- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) +- Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) +- Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) +- Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) +- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) +- Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) +- Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) +- Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) +- Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485),[#11917](https://github.com/opensearch-project/OpenSearch/pull/11917)) +- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) +- Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) +- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) +- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) +- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) +- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) +- Fix memory leak issue in ReorganizingLongHash ([#11953](https://github.com/opensearch-project/OpenSearch/issues/11953)) +- Prevent setting remote_snapshot store type on index creation ([#11867](https://github.com/opensearch-project/OpenSearch/pull/11867)) +- [BUG] Fix remote shards balancer when filtering throttled nodes ([#11724](https://github.com/opensearch-project/OpenSearch/pull/11724)) +- Add advance(int) for numeric values in order to allow point based optimization to kick in ([#12089](https://github.com/opensearch-project/OpenSearch/pull/12089)) diff --git a/release-notes/opensearch.release-notes-2.9.0.md b/release-notes/opensearch.release-notes-2.9.0.md index 76d2d9f4593d6..3588d6883469a 100644 --- a/release-notes/opensearch.release-notes-2.9.0.md +++ b/release-notes/opensearch.release-notes-2.9.0.md @@ -87,7 +87,6 @@ - [Search Pipelines] Pass pipeline creation context to processor factories ([#8164](https://github.com/opensearch-project/OpenSearch/pull/8164)) - Enabling compression levels for zstd and zstd_no_dict ([#8312](https://github.com/opensearch-project/OpenSearch/pull/8312)) - Optimize Metadata build() to skip redundant computations as part of ClusterState build ([#7853](https://github.com/opensearch-project/OpenSearch/pull/7853)) -- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) - Move span actions to Scope ([#8411](https://github.com/opensearch-project/OpenSearch/pull/8411)) - Add wrapper tracer implementation ([#8565](https://github.com/opensearch-project/OpenSearch/pull/8565)) - Improved performance of parsing floating point numbers ([#7909](https://github.com/opensearch-project/OpenSearch/pull/7909)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 02fbcc36dfe64..986bce55f41e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -63,6 +63,10 @@ "wait_for_completion": { "type" : "boolean", "description" : "If false, the request will return a task immediately and the operation will run in background. Defaults to true." + }, + "primary_only": { + "type" : "boolean", + "description" : "Specify whether the operation should only perform on primary shards. Defaults to false." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e0fbeeb83ffc4..e78d49a67a98a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -229,6 +229,11 @@ "search_pipeline": { "type": "string", "description": "The search pipeline to use to execute this request" + }, + "include_named_queries_score":{ + "type": "boolean", + "description":"Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false)", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 189215b6562a3..29fbf55417961 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,14 +1,107 @@ ---- "Help": - skip: - version: " - 2.3.99" + version: " - 2.11.99" + reason: deleted docs and concurrent search are added in 2.12.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: "2.12.0 - " + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + docs.deleted .+ \n + $/ +--- +"Help from 2.4.0 to 2.11.0": + - skip: + version: " - 2.3.99 , 2.12.0 - " reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.4.0 - " + version: "2.4.0 - 2.11.99" - match: $body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 1ce8468cb51f9..39c8040993f2a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,3 +1,57 @@ +"Test cat thread_pool total_wait_time output": + - skip: + version: " - 2.10.99" + reason: thread_pool total_wait_time stats were introduced in V_2.11.0 + + - do: + cat.thread_pool: {} + + - match: + $body: | + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + + - do: + cat.thread_pool: + thread_pool_patterns: search,search_throttled,generic + h: name,total_wait_time,twt + v: true + + - match: + $body: | + /^ name \s+ total_wait_time \s+ twt \n + (generic \s+ -1 \s+ -1 \n + search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ + +--- +"Test cat thread_pool total_wait_time output with concurrent search thread_pool": + - skip: + version: " - 2.11.99" + reason: index_search thread_pool was introduced in V_2.12.0 + + - do: + cat.thread_pool: {} + + - match: + $body: | + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + + - do: + cat.thread_pool: + thread_pool_patterns: search,search_throttled,index_searcher,generic + h: name,total_wait_time,twt + v: true + + - match: + $body: | + /^ name \s+ total_wait_time \s+ twt \n + (generic \s+ -1 \s+ -1 \n + index_searcher \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ + --- "Test cat thread_pool output": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml index d62c4c8882b13..39fb1604d9596 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/10_basic.yml @@ -27,3 +27,23 @@ index: test max_num_segments: 10 only_expunge_deletes: true + +--- +"Test primary_only parameter": + - skip: + version: " - 2.99.99" + reason: "primary_only is available in 3.0+" + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 1 + + - do: + indices.forcemerge: + index: test + primary_only: true + - match: { _shards.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml index 9561ecd89fdad..efa239547e84a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.forcemerge/20_wait_for_completion.yml @@ -25,7 +25,7 @@ wait_for_completion: true task_id: $taskId - match: { task.action: "indices:admin/forcemerge" } - - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" } + - match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" } # .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally, # if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml index a36f807e63e0e..a65908b238013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_field_mapping/20_missing_field.yml @@ -1,9 +1,5 @@ --- "Return empty object if field doesn't exist, but index does": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/opensearch-project/OpenSearch/issues/2440" - - do: indices.create: index: test_index @@ -18,7 +14,5 @@ indices.get_field_mapping: index: test_index fields: not_existent - ignore: 404 # ignore 404 failures for now - # see: https://github.com/opensearch-project/OpenSearch/issues/2440 - match: { 'test_index.mappings': {}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml index 32ac11097d3dc..bac2898ccea1c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml @@ -4,8 +4,8 @@ # number_of_shards for the target index. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.4.99" + reason: "max_shard_size was introduced in 2.5.0" features: allowed_warnings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index b9089689b0cf1..3b7ea15164e9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -156,3 +156,23 @@ query: {"range": { "rank": { "from": 0 } } } track_total_hits: false size: 3 + +--- +"Index Sort half float": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.sort.field: rank + mappings: + properties: + rank: + type: half_float + + # This should failed with 400 as half_float is not supported for index sort + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml new file mode 100644 index 0000000000000..2192873623715 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml @@ -0,0 +1,55 @@ +--- +setup: + + - do: + indices.create: + index: test1 + wait_for_active_shards: all + body: + settings: + index.number_of_shards: 1 + index.number_of_replicas: 1 + + - do: + index: + index: test1 + id: 1 + body: { "bar": "bar" } + + - do: + indices.refresh: {} + +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/9857 +--- +"Test noop_update_total metric can be updated by both update API and bulk API": + - skip: + version: " - 2.11.99" + reason: "fixed in 2.12.0" + + - do: + update: + index: test1 + id: 1 + body: { "doc": { "bar": "bar" } } + + - do: + indices.stats: + index: test1 + metric: indexing + + - match: { indices.test1.primaries.indexing.noop_update_total: 1 } + - match: { indices.test1.total.indexing.noop_update_total: 1 } + + - do: + bulk: + body: | + {"update": {"_id": "1", "_index": "test1"}} + {"doc": {"bar": "bar"}} + + - do: + indices.stats: + index: test1 + metric: indexing + + - match: { indices.test1.primaries.indexing.noop_update_total: 2 } + - match: { indices.test1.total.indexing.noop_update_total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml new file mode 100644 index 0000000000000..b9457f0290897 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml @@ -0,0 +1,141 @@ +--- +"Throw aliases missing exception when removing non-existing alias with setting must_exist to true": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + catch: /aliases \[test_alias\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + alias: test_alias + must_exist: true + + - do: + catch: /aliases \[testAlias\*\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [ testAlias* ] + must_exist: true + + - do: + catch: /\[aliases\] can't be empty/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [] + must_exist: true + +--- +"Throw aliases missing exception when all of the specified aliases are non-existing": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + catch: /aliases \[test\_alias\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + alias: test_alias + + - do: + catch: /aliases \[test\_alias\*\] missing/ + indices.update_aliases: + body: + actions: + - remove: + indices: [ test_index ] + aliases: [ test_alias* ] + +--- +"Remove successfully when some specified aliases are non-existing": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + indices.update_aliases: + body: + actions: + - add: + indices: [ test_index ] + aliases: [ test_alias ] + + - do: + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [test_alias, test_alias1, test_alias2] + must_exist: false + + - match: { acknowledged: true } + +--- +"Remove silently when all of the specified aliases are non-existing and must_exist is false": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [test_alias, test_alias1, test_alias2] + must_exist: false + + - match: { acknowledged: true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml new file mode 100644 index 0000000000000..044ae5dd6a94d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml @@ -0,0 +1,53 @@ +--- +"validate_query with query_string parameters": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + mappings: + properties: + field: + type: match_only_text + number: + type: integer + + - do: + indices.validate_query: + index: test + q: bar + df: field + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:foo field:xyz + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:foo field:xyz + default_operator: AND + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:BA* + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: number:foo + lenient: true + + - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 1f1f42890355e..784c7b52b18b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -138,6 +138,35 @@ - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery +--- +"Metric - indexing doc_status": + - skip: + version: " - 2.10.99" + reason: "Doc Status Stats were introduced in v2.11.0" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: indices, index_metric: indexing } + + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.indexing.doc_status + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index 3b16cdb13a22f..e7da9a0bc454c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -598,7 +598,6 @@ setup: - match: { aggregations.histo.buckets.0.doc_count: 2 } - match: { profile.shards.0.aggregations.0.type: DateHistogramAggregator } - match: { profile.shards.0.aggregations.0.description: histo } - - match: { profile.shards.0.aggregations.0.breakdown.collect_count: 4 } - match: { profile.shards.0.aggregations.0.debug.total_buckets: 3 } --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 09278690f5d05..2808be8cd7045 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -509,6 +509,134 @@ setup: - match: { aggregations.1.2.buckets.1.key.nested: 1000 } - match: { aggregations.1.2.buckets.1.doc_count: 1 } +--- +"Composite aggregation with filtered nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 2 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 10 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + - match: { aggregations.1.2.3.buckets.1.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.1.doc_count: 2 } + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 100 + aggs: + 3: + composite: + after: { "nested": 10 } + sources: [ + "nested": { + "terms": { + "field": "nested.nested_long" + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.buckets: 1 } + - match: { aggregations.1.2.3.buckets.0.key.nested: 20 } + - match: { aggregations.1.2.3.buckets.0.doc_count: 2 } + +--- +"Composite aggregation with filtered reverse nested parent": + - skip: + version: " - 2.99.99" + reason: fixed in 3.0.0 + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + 1: + nested: + path: nested + aggs: + 2: + filter: + range: + nested.nested_long: + gt: 0 + lt: 20 + aggs: + 3: + reverse_nested: {} + aggs: + 4: + composite: + sources: [ + { + "long": { + "terms": { + "field": "long" + } + } + }, + { + "kw": { + "terms": { + "field": "keyword" + } + } + } + ] + - match: {hits.total: 6} + - length: { aggregations.1.2.3.4.buckets: 4 } + - match: { aggregations.1.2.3.4.buckets.0.key.long: 0 } + - match: { aggregations.1.2.3.4.buckets.0.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.0.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.1.key.long: 10 } + - match: { aggregations.1.2.3.4.buckets.1.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.1.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.2.key.long: 20 } + - match: { aggregations.1.2.3.4.buckets.2.key.kw: "foo" } + - match: { aggregations.1.2.3.4.buckets.2.doc_count: 1 } + - match: { aggregations.1.2.3.4.buckets.3.key.long: 100 } + - match: { aggregations.1.2.3.4.buckets.3.key.kw: "bar" } + - match: { aggregations.1.2.3.4.buckets.3.doc_count: 1 } + --- "Composite aggregation with unmapped field": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml new file mode 100644 index 0000000000000..d1cc6c8295bd9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml @@ -0,0 +1,76 @@ +--- +"Default index": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: true + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_terms": {"significant_terms": {"field": "text"}}}}}} + + - match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"} + - match: {aggregations.class.buckets.1.sig_terms.buckets.0.key: "good"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml new file mode 100644 index 0000000000000..e21c4fb946d85 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml @@ -0,0 +1,155 @@ +--- +"Default index": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: false + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text"}}}}}} + + - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} + - match: {aggregations.class.buckets.1.sig_text.buckets.0.key: "good"} + +--- +"Dedup noise": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: false + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good noisewords1 g1 g2 g3 g4 g5 g6", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good noisewords2 g1 g2 g3 g4 g5 g6", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad noisewords3 b1 b2 b3 b4 b5 b6", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad noisewords4 b1 b2 b3 b4 b5 b6", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad noisewords5 gb1 gb2 gb3 gb4 gb5 gb6", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad noisewords6 gb1 gb2 gb3 gb4 gb5 gb6", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad noisewords7 b1 b2 b3 b4 b5 b6", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text", "filter_duplicate_text": true}}}}}} + + - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} + - length: { aggregations.class.buckets.0.sig_text.buckets: 1 } + - match: {aggregations.class.buckets.1.sig_text.buckets.0.key: "good"} + - length: { aggregations.class.buckets.1.sig_text.buckets: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index a18ac45e62175..4ee905972d106 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -78,3 +78,15 @@ setup: index: test1 body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}}} - match: { error.root_cause.0.type: "illegal_argument_exception" } + +--- +"Plain highlighter on a field WITHOUT OFFSETS using max_analyzer_offset should SUCCEED": + - skip: + version: " - 2.1.99" + reason: only starting supporting the parameter max_analyzer_offset on version 2.2 + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field1" : "quick"}}, "highlight" : {"type" : "plain", "fields" : {"field1" : {"max_analyzer_offset": 10}}}} + - match: {hits.hits.0.highlight.field1.0: "The <em>quick</em> "} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml new file mode 100644 index 0000000000000..9e60d69bfedd7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml @@ -0,0 +1,137 @@ +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + - do: + indices.create: + index: test + body: + mappings: + _source: + excludes: ["nested.stored_only"] + properties: + nested: + type: nested + properties: + field: + type: text + fields: + vectors: + type: text + term_vector: "with_positions_offsets" + postings: + type: text + index_options: "offsets" + stored: + type: match_only_text + store: true + stored_only: + type: match_only_text + store: true + - do: + index: + index: test + id: 1 + refresh: true + body: + nested: + field : "The quick brown fox is brown." + stored : "The quick brown fox is brown." + stored_only : "The quick brown fox is brown." + +--- +"Unified highlighter": + - do: + search: + index: test + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: [ "nested.field", "nested.field.vectors", "nested.field.postings" ] + inner_hits: + highlight: + type: "unified" + fields: + nested.field: {} + nested.field.vectors: {} + nested.field.postings: {} + + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field\.vectors.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field\.postings.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>." } + +--- +"Unified highlighter with stored fields": + - do: + search: + index: test + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: [ "nested.stored", "nested.stored_only" ] + inner_hits: + highlight: + type: "unified" + fields: + nested.stored: {} + nested.stored_only: {} + + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored_only.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>." } + +--- +"Unified highlighter with stored fields and disabled source": + - do: + indices.create: + index: disabled_source + body: + mappings: + _source: + enabled: false + properties: + nested: + type: nested + properties: + field: + type: match_only_text + stored_only: + type: match_only_text + store: true + - do: + index: + index: disabled_source + id: 1 + refresh: true + body: + nested: + field: "The quick brown fox is brown." + stored_only: "The quick brown fox is brown." + + - do: + search: + index: disabled_source + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: ["nested.field", "nested.stored_only"] + inner_hits: + highlight: + type: "unified" + fields: + nested.field: {} + nested.stored_only: {} + + - is_false: hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored_only.0: "The <em>quick</em> <em>brown</em> <em>fox</em> is <em>brown</em>."} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml new file mode 100644 index 0000000000000..69c639a8f506a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml @@ -0,0 +1,119 @@ +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + features: ["headers"] + + - do: + indices.create: + index: test + body: + mappings: + dynamic: false + properties: + match_only_text: + type: match_only_text + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 1 + body: + match_only_text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 2 + body: + match_only_text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 3 + routing: "route_me" + body: + match_only_text: "foo bar" + + - do: + index: + index: "test" + id: 4 + body: {} + + - do: + indices.create: + index: test-unmapped + body: + mappings: + dynamic: false + properties: + unrelated: + type: keyword + + - do: + index: + index: "test-unmapped" + id: 1 + body: + unrelated: "foo" + + - do: + indices.create: + index: test-empty + body: + mappings: + dynamic: false + properties: + match_only_text: + type: match_only_text + + - do: + indices.refresh: + index: [test, test-unmapped, test-empty] + +--- +"Test exists query on mapped match_only_text field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 3} + +--- +"Test exists query on unmapped match_only_text field": + - do: + search: + rest_total_hits_as_int: true + index: test-unmapped + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 0} + +--- +"Test exists query on match_only_text field in empty index": + - do: + search: + rest_total_hits_as_int: true + index: test-empty + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 0} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml new file mode 100644 index 0000000000000..13fd6b3858948 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml @@ -0,0 +1,67 @@ +--- +"search with indexed phrases": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: { text: "peter piper picked a peck of pickled peppers" } + + - do: + indices.refresh: + index: [test] + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: + query: "peter piper" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + q: '"peter piper"~1' + df: text + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: "peter piper picked" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: "piper" + + - match: {hits.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index 1ddba45c97c72..fb5c3268a3c82 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -164,3 +164,99 @@ setup: - match: { aggregations.date.buckets.1.key: 1540857600000 } - match: { aggregations.date.buckets.1.key_as_string: "2018-10-30T00:00:00.000Z" } - match: { aggregations.date.buckets.1.doc_count: 2 } + + +--- +"date with nested sort now": + - skip: + version: " - 2.11.99" + reason: fixed in 2.12.0 + + # This tests cover scenario where nested sort have now() in date field type. + # For this test, we have date field as nested field and we trigger asc/desc sort + # on that nested field. `filter` clause is needed when we sort any nested field, + # like in this case, "gte": "now/h" says sort nested field date_field only where + # document is having value greater than current time now(). + # Nested field sort query doesn't sort documents if it is not qualified through + # `filter` clause. + # Only adding tests for `gte` as `lte` would be same behaviour + + - do: + indices.create: + index: test + body: + mappings: + properties: + nested_field: + type: nested + properties: + date_field: + type: date + format: date_optional_time + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"nested_field": [{"date_field": "3023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3024-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3025-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3026-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "3027-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2022-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2023-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2021-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2020-10-26T12:00:00+09:00"}]} + {"index":{}} + {"nested_field": [{"date_field": "2019-10-26T12:00:00+09:00"}]} + + # gte: now/h with the desc sort + - do: + search: + index: test + body: + size: 5 + sort: [{ nested_field.date_field: { mode: max, order: desc, nested: { path: nested_field, filter: { bool: { filter : [{ range : { nested_field.date_field: { gte: now/h, time_zone: +09:00} } }] } } } } } ] + - match: {hits.total.value: 10 } + - length: {hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [33381428400000] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33349892400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33286820400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33255198000000 ] } + + # gte: now/h with the asc sort + - do: + search: + index: test + body: + size: 5 + sort: [ { nested_field.date_field: { mode: max, order: asc, nested: { path: nested_field, filter: { bool: { filter: [ { range: { nested_field.date_field: { gte: now/h, time_zone: +09:00 } } } ] } } } } } ] + - match: { hits.total.value: 10 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.nested_field.0.date_field: "3023-10-26T12:00:00+09:00" } + - match: { hits.hits.0.sort: [ 33255198000000 ] } + - match: { hits.hits.1._source.nested_field.0.date_field: "3024-10-26T12:00:00+09:00" } + - match: { hits.hits.1.sort: [ 33286820400000 ] } + - match: { hits.hits.2._source.nested_field.0.date_field: "3025-10-26T12:00:00+09:00" } + - match: { hits.hits.2.sort: [ 33318356400000 ] } + - match: { hits.hits.3._source.nested_field.0.date_field: "3026-10-26T12:00:00+09:00" } + - match: { hits.hits.3.sort: [ 33349892400000 ] } + - match: { hits.hits.4._source.nested_field.0.date_field: "3027-10-26T12:00:00+09:00" } + - match: { hits.hits.4.sort: [ 33381428400000 ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml index ba2b18eb3b6d0..7385eef051243 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -20,6 +20,7 @@ properties: counter: type: double + - do: bulk: refresh: true @@ -119,3 +120,87 @@ - match: { status: 400 } - match: { error.type: search_phase_execution_exception } - match: { error.caused_by.reason: "Can't do sort across indices, as a field has [unsigned_long] type in one index, and different type in another index!" } + +--- +"search across indices with mixed long and double and float numeric types": + - skip: + version: " - 2.11.0" + reason: half float was broken before 2.11.1 + + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + counter: + type: long + + - do: + indices.create: + index: test_2 + body: + mappings: + properties: + counter: + type: double + + - do: + indices.create: + index: test_3 + body: + mappings: + properties: + counter: + type: half_float + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - counter: 223372036854775800 + - index: + _index: test_2 + - counter: 1223372036854775800.23 + - index: + _index: test_2 + - counter: 184.4 + - index: + _index: test_3 + - counter: 187.4 + - index: + _index: test_3 + - counter: 194.4 + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: desc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 1223372036854775800.23 } + - match: { hits.hits.0.sort.0: 1223372036854775800.23 } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort.0: 223372036854775800 } + - match: { hits.hits.2._index: test_3 } + - match: { hits.hits.2._source.counter: 194.4 } + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: asc }] + - match: { hits.total: 5 } + - length: { hits.hits: 5 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 184.4 } + - match: { hits.hits.0.sort.0: 184.4 } + - match: { hits.hits.1._index: test_3 } + - match: { hits.hits.1._source.counter: 187.4 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml new file mode 100644 index 0000000000000..682a7dded1e9b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml @@ -0,0 +1,282 @@ +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field1: + type: match_only_text + my_field2: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + my_field1: "brown fox jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 2 + body: + my_field1: "brown emu jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 3 + body: + my_field1: "jumparound" + my_field2: "emu" + + - do: + index: + index: test + id: 4 + body: + my_field1: "dog" + my_field2: "brown fox jump lazy" + + - do: + indices.refresh: {} + +--- +"minimum should match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + minimum_should_match: 3 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "BROWN dog" + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + +--- +"operator": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + operator: AND + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field2: + query: "xylophoen foo" + fuzziness: 1 + prefix_length: 1 + max_expansions: 10 + fuzzy_transpositions: true + fuzzy_rewrite: constant_score + + - match: { hits.total: 2 } + - match: { hits.hits.0._source.my_field2: "xylophone" } + - match: { hits.hits.1._source.my_field2: "xylophone" } + +--- +"multi_match single field complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + +--- +"multi_match single field partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump laz" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields with analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "BROWN FOX JUMP dog" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with minimum_should_match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + minimum_should_match: 4 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "dob nomatch" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + fuzziness: 1 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with slop throws exception": + + - do: + catch: /\[slop\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + slop: 1 + +--- +"multi_match multiple fields with cutoff_frequency throws exception": + + - do: + catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + cutoff_frequency: 0.001 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml new file mode 100644 index 0000000000000..00e54e43d6f04 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml @@ -0,0 +1,141 @@ +--- +setup: + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + analyzer: standard + fields: + raw: + type: keyword + nested1: + type: nested + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "1"}}' + - '{"text" : "Some like it hot, some like it cold", "nested1": [{"foo": "bar1"}]}' + - '{"index": {"_index": "test", "_id": "2"}}' + - '{"text" : "Its cold outside, theres no kind of atmosphere", "nested1": [{"foo": "bar2"}]}' + - '{"index": {"_index": "test", "_id": "3"}}' + - '{"text" : "Baby its cold there outside", "nested1": [{"foo": "bar3"}]}' + - '{"index": {"_index": "test", "_id": "4"}}' + - '{"text" : "Outside it is cold and wet", "nested1": [{"foo": "bar4"}]}' + +--- +teardown: + + - do: + cluster.put_settings: + body: + transient: + search.allow_expensive_queries: null + +--- +"Test disallow expensive queries": + + ### Check for initial setting = null -> false + - do: + cluster.get_settings: + flat_settings: true + + - is_false: search.allow_expensive_queries + + ### Update setting to false + - do: + cluster.put_settings: + body: + transient: + search.allow_expensive_queries: "false" + flat_settings: true + + - match: {transient: {search.allow_expensive_queries: "false"}} + + ### Prefix + - do: + catch: /\[prefix\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false. For optimised prefix queries on text fields please enable \[index_prefixes\]./ + search: + index: test + body: + query: + prefix: + text: + value: out + + ### Fuzzy + - do: + catch: /\[fuzzy\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + fuzzy: + text: + value: outwide + + ### Regexp + - do: + catch: /\[regexp\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + regexp: + text: + value: .*ou.*id.* + + ### Wildcard + - do: + catch: /\[wildcard\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + wildcard: + text: + value: out?ide + + ### Range on text + - do: + catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + range: + text: + gte: "theres" + + ### Range on keyword + - do: + catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + range: + text.raw: + gte : "Outside it is cold and wet" + + ### Nested + - do: + catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + nested: + path: "nested1" + query: + bool: + must: [{"match" : {"nested1.foo" : "bar2"}}] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml new file mode 100644 index 0000000000000..d5ece1719dc48 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -0,0 +1,1241 @@ +--- +"search on fields with both index and doc_values enabled": + - do: + indices.create: + index: test-iodvq + body: + mappings: + properties: + some_keyword: + type: keyword + index: true + doc_values: true + byte: + type: byte + index: true + doc_values: true + double: + type: double + index: true + doc_values: true + float: + type: float + index: true + doc_values: true + half_float: + type: half_float + index: true + doc_values: true + integer: + type: integer + index: true + doc_values: true + long: + type: long + index: true + doc_values: true + short: + type: short + index: true + doc_values: true + unsigned_long: + type: unsigned_long + index: true + doc_values: true + ip_field: + type: ip + index: true + doc_values: true + + - do: + bulk: + index: test-iodvq + refresh: true + body: + - '{"index": {"_index": "test-iodvq", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' + - '{ "index": { "_index": "test-iodvq", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' + - '{ "index": { "_index": "test-iodvq", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } + +--- +"search on fields with only index enabled": + - do: + indices.create: + index: test-index + body: + mappings: + properties: + some_keyword: + type: keyword + index: true + doc_values: false + byte: + type: byte + index: true + doc_values: false + double: + type: double + index: true + doc_values: false + float: + type: float + index: true + doc_values: false + half_float: + type: half_float + index: true + doc_values: false + integer: + type: integer + index: true + doc_values: false + long: + type: long + index: true + doc_values: false + short: + type: short + index: true + doc_values: false + unsigned_long: + type: unsigned_long + index: true + doc_values: false + ip_field: + type: ip + index: true + doc_values: false + + - do: + bulk: + index: test-index + refresh: true + body: + - '{"index": {"_index": "test-index", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' + - '{ "index": { "_index": "test-index", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' + - '{ "index": { "_index": "test-index", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + ip_field: "192.168.0.1" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + ip_field: ["192.168.0.1", "192.168.0.2"] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } +--- +"search on fields with only doc_values enabled": + - skip: + features: [ "headers" ] + version: " - 2.99.99" + reason: "searching with only doc_values was added in 3.0.0" + - do: + indices.create: + index: test-doc-values + body: + mappings: + properties: + some_keyword: + type: keyword + index: false + doc_values: true + byte: + type: byte + index: false + doc_values: true + double: + type: double + index: false + doc_values: true + float: + type: float + index: false + doc_values: true + half_float: + type: half_float + index: false + doc_values: true + integer: + type: integer + index: false + doc_values: true + long: + type: long + index: false + doc_values: true + short: + type: short + index: false + doc_values: true + unsigned_long: + type: unsigned_long + index: false + doc_values: true + ip_field: + type: ip + index: false + doc_values: true + + - do: + bulk: + index: test-doc-values + refresh: true + body: + - '{"index": {"_index": "test-doc-values", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800, "ip_field": "192.168.0.1" }' + - '{ "index": { "_index": "test-doc-values", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2" }' + - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + ip_field: "192.168.0.3" + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: + ip_field: + gte: "192.168.0.1" + lte: "192.168.0.2" + + - match: { hits.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml new file mode 100644 index 0000000000000..08a20df093c01 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/350_matched_queries.yml @@ -0,0 +1,103 @@ +setup: + - skip: + version: " - 2.12.99" + reason: "implemented for versions 2.13.0 and above" + +--- +"matched queries": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: {hits.total.value: 2} + - length: {hits.hits.0.matched_queries: 2} + - match: {hits.hits.0.matched_queries: [ "match_field_1", "match_field_2" ]} + - length: {hits.hits.1.matched_queries: 1} + - match: {hits.hits.1.matched_queries: [ "match_field_1" ]} + +--- + +"matched queries with scores": + - do: + indices.create: + index: test + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "test_1", "_id" : "1" } }' + - '{"field" : 1 }' + - '{ "index" : { "_index" : "test_1", "_id" : "2" } }' + - '{"field" : [1, 2] }' + + - do: + search: + include_named_queries_score: true + index: test_1 + body: + query: + bool: { + should: [ + { + match: { + field: { + query: 1, + _name: match_field_1 + } + } + }, + { + match: { + field: { + query: 2, + _name: match_field_2, + boost: 10 + } + } + } + ] + } + + - match: { hits.total.value: 2 } + - length: { hits.hits.0.matched_queries: 2 } + - match: { hits.hits.0.matched_queries.match_field_1: 1 } + - match: { hits.hits.0.matched_queries.match_field_2: 10 } + - length: { hits.hits.1.matched_queries: 1 } + - match: { hits.hits.1.matched_queries.match_field_1: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 55e1566656faf..ebecb63dedbaf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -219,7 +219,7 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 08:30:04.828733" } - match: {hits.hits.0.sort: [1571646604828733000] } - # search_after with the sort + # search_after with the asc sort - do: search: index: test @@ -320,3 +320,468 @@ - length: { hits.hits: 1 } - match: { hits.hits.0._index: test } - match: { hits.hits.0._source.population: null } + +--- +"half float": + - skip: + version: " - 2.11.0" + reason: half_float was broken for 2.11.0 and earlier + + - do: + indices.create: + index: test + body: + mappings: + properties: + population: + type: half_float + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": 184.4} + {"index":{}} + {"population": 194.4} + {"index":{}} + {"population": 144.4} + {"index":{}} + {"population": 174.4} + {"index":{}} + {"population": 164.4} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: desc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 184.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 3 + sort: [ { population: asc } ] + - match: { hits.total: 5 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.population: 164.4 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.population: 174.4 } + + # search_after with the asc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: asc } ] + search_after: [ 184.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 194.4 } + + # search_after with the desc sort + - do: + search: + index: test + rest_total_hits_as_int: true + body: + size: 1 + sort: [ { population: desc } ] + search_after: [ 164.375 ] # this is rounded sort value in sort result + - match: { hits.total: 5 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: 144.4 } + + # search_after with the asc sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [ 200 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + + # search_after with the desc sort with missing + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "desc", "missing": "_last" } } ] + search_after: [ 100 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 6 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } + +--- +"numeric skipping logic with competitive missing value": + - skip: + version: " - 2.12.99" + reason: newly added test, supported from 3.0.0 + +# This test checks if skipping logic is skipped in case missing values are competitive +# for all numeric type int, long, float, half_float, double, unsigned_long. +# We are inserting 24 documents with some missing values and giving search after parameter +# as missing value. The secondary sort field is on id which doesn't have missing value. +# In case skipping logic is applied in Lucene, it will skipp all documents with primary sort field +# missing value even though it should list sort by secondary field id with missing value primary field. +# This test is addressing bugs like here https://github.com/opensearch-project/OpenSearch/issues/9537 + + - do: + indices.create: + index: test + body: + mappings: + properties: + halffloat: + type: half_float + long: + type: long + int: + type: integer + float: + type: float + double: + type: double + unsignedlong: + type: unsigned_long + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"id": 1, "halffloat": 1, "long": 1, "int": 1, "float": 1, "double": 1, "unsignedlong": 1} + {"index":{}} + {"id": 2, "halffloat": 2, "long": 2, "int": 2, "float": 2, "double": 2, "unsignedlong": 2} + {"index":{}} + {"id": 3, "halffloat": 3, "long": 3, "int": 3, "float": 3, "double": 3, "unsignedlong": 3} + {"index":{}} + {"id": 4, "halffloat": 4, "long": 4, "int": 4, "float": 4, "double": 4, "unsignedlong": 4} + {"index":{}} + {"id": 5, "halffloat": 5, "long": 5, "int": 5, "float": 5, "double": 5, "unsignedlong": 5} + {"index":{}} + {"id": 6, "halffloat": 6, "long": 6, "int": 6, "float": 6, "double": 6, "unsignedlong": 6} + {"index":{}} + {"id": 7, "halffloat": 7, "long": 7, "int": 7, "float": 7, "double": 7, "unsignedlong": 7} + {"index":{}} + {"id": 8, "halffloat": 8, "long": 8, "int": 8, "float": 8, "double": 8, "unsignedlong": 8} + {"index":{}} + {"id": 9, "halffloat": 9, "long": 9, "int": 9, "float": 9, "double": 9, "unsignedlong": 9} + {"index":{}} + {"id": 10, "halffloat": 10, "long": 10, "int": 10, "float": 10, "double": 10, "unsignedlong": 10} + {"index":{}} + {"id": 11, "halffloat": 11, "long": 11, "int": 11, "float": 11, "double": 11, "unsignedlong": 11} + {"index":{}} + {"id": 12, "halffloat": 12, "long": 12, "int": 12, "float": 12, "double": 12, "unsignedlong": 12} + {"index":{}} + {"id": 13, "halffloat": 13, "long": 13, "int": 13, "float": 13, "double": 13, "unsignedlong": 13} + {"index":{}} + {"id": 14, "halffloat": 14, "long": 14, "int": 14, "float": 14, "double": 14, "unsignedlong": 14} + {"index":{}} + {"id": 15, "halffloat": 15, "long": 15, "int": 15, "float": 15, "double": 15, "unsignedlong": 15} + {"index":{}} + {"id": 16, "halffloat": 16, "long": 16, "int": 16, "float": 16, "double": 16, "unsignedlong": 16} + {"index":{}} + {"id": 17, "halffloat": 17, "long": 17, "int": 17, "float": 17, "double": 17, "unsignedlong": 17} + {"index":{}} + {"id": 18, "halffloat": 18, "long": 18, "int": 18, "float": 18, "double": 18, "unsignedlong": 18} + {"index":{}} + {"id": 19, "halffloat": 19, "long": 19, "int": 19, "float": 19, "double": 19, "unsignedlong": 19} + {"index":{}} + {"id": 20, "halffloat": 20, "long": 20, "int": 20, "float": 20, "double": 20, "unsignedlong": 20} + {"index":{}} + {"id": 21, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 22, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 23, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + {"index":{}} + {"id": 24, "halffloat": null, "long": null, "int": null, "float": null, "double": null, "unsignedlong": null} + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "halffloat": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.halffloat: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.halffloat: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.halffloat: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "long": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.long: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.long: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.long: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "int": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.int: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.int: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.int: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "float": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.float: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.float: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.float: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "double": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.double: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.double: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.double: null } + - match: { hits.hits.2._source.id: 22 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "asc" } }, { "id": { "order": "asc" } } ] + search_after: [ 200, 0 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 21 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 22 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 23 } + + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 3 + "sort": [ { "unsignedlong": { "order": "desc" } }, { "id": { "order": "desc" } } ] + search_after: [ 0, 25 ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 24 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.unsignedlong: null } + - match: { hits.hits.0._source.id: 24 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1._source.unsignedlong: null } + - match: { hits.hits.1._source.id: 23 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._source.unsignedlong: null } + - match: { hits.hits.2._source.id: 22 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml new file mode 100644 index 0000000000000..44adb48c8765e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml @@ -0,0 +1,92 @@ +--- +"Search shards aliases with and without filters": + - skip: + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + field: + type: match_only_text + aliases: + test_alias_no_filter: {} + test_alias_filter_1: + filter: + term: + field : value1 + test_alias_filter_2: + filter: + term: + field : value2 + + - do: + search_shards: + index: test_alias_no_filter + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - is_true: indices.test_index + - is_false: indices.test_index.filter + - match: { indices.test_index.aliases: [test_alias_no_filter]} + + - do: + search_shards: + index: test_alias_filter_1 + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1] } + - match: { indices.test_index.filter.term.field.value: value1 } + - lte: { indices.test_index.filter.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.term.field.boost: 1.0 } + + - do: + search_shards: + index: ["test_alias_filter_1","test_alias_filter_2"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2]} + - length: { indices.test_index.filter.bool.should: 2 } + - lte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - lte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - match: { indices.test_index.filter.bool.adjust_pure_negative: true} + - lte: { indices.test_index.filter.bool.boost: 1.0 } + - gte: { indices.test_index.filter.bool.boost: 1.0 } + + - do: + search_shards: + index: "test*" + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_filter_1","test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_no_filter]} + - is_false: indices.test_index.filter diff --git a/rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java b/rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java index 9a1973e9d5aeb..a7f196190c350 100644 --- a/rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java +++ b/rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java @@ -33,8 +33,8 @@ package org.opensearch.test.rest; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; diff --git a/server/build.gradle b/server/build.gradle index 3fde1b745c546..e36498bf1038b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -47,7 +47,7 @@ publishing { } base { - archivesBaseName = 'opensearch' + archivesName = 'opensearch' } sourceSets { @@ -57,50 +57,12 @@ sourceSets { } } } -// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs -if (!isEclipse) { - sourceSets { - java11 { - java { - srcDirs = ['src/main/java11'] - } - } - } - - configurations { - java11Implementation.extendsFrom(api) - } - - dependencies { - java11Implementation sourceSets.main.output - } - - compileJava11Java { - sourceCompatibility = JavaVersion.VERSION_11 - targetCompatibility = JavaVersion.VERSION_11 - } - - tasks.named('forbiddenApisJava11').configure { - doFirst { - if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) { - targetCompatibility = JavaVersion.VERSION_11 - } - } - } - - jar { - metaInf { - into 'versions/11' - from sourceSets.java11.output - } - manifest.attributes('Multi-Release': 'true') - } -} dependencies { api project(':libs:opensearch-common') api project(':libs:opensearch-core') + api project(":libs:opensearch-compress") api project(':libs:opensearch-secure-sm') api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") @@ -146,6 +108,7 @@ dependencies { api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional annotationProcessor "org.apache.logging.log4j:log4j-core:${versions.log4j}" + annotationProcessor project(':libs:opensearch-common') // jna api "net.java.dev.jna:jna:${versions.jna}" @@ -153,13 +116,14 @@ dependencies { // jcraft api "com.jcraft:jzlib:${versions.jzlib}" + // reactor + api "io.projectreactor:reactor-core:${versions.reactor}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" + // protobuf api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" - //zstd - api "com.github.luben:zstd-jni:${versions.zstd}" - testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' @@ -176,7 +140,8 @@ tasks.withType(JavaCompile).configureEach { } compileJava { - options.compilerArgs += ['-processor', 'org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor'] + options.compilerArgs += ['-processor', ['org.apache.logging.log4j.core.config.plugins.processor.PluginProcessor', + 'org.opensearch.common.annotation.processor.ApiAnnotationProcessor'].join(','), '-AcontinueOnFailingChecks'] } tasks.named("internalClusterTest").configure { @@ -366,11 +331,15 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1', + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException' ) } tasks.named("dependencyLicenses").configure { + mapping from: /reactor-.*/, to: 'reactor' mapping from: /lucene-.*/, to: 'lucene' dependencies = project.configurations.runtimeClasspath.fileCollection { it.group.startsWith('org.opensearch') == false || @@ -397,6 +366,7 @@ tasks.named("licenseHeaders").configure { } tasks.test { + environment "node.roles.test", "[]" if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_1_8) { jvmArgs += ["--add-opens", "java.base/java.nio.file=ALL-UNNAMED"] } diff --git a/server/cli/build.gradle b/server/cli/build.gradle new file mode 100644 index 0000000000000..385d5ff27433e --- /dev/null +++ b/server/cli/build.gradle @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. + +// See please https://docs.gradle.org/8.5/userguide/upgrading_version_8.html#deprecated_missing_project_directory diff --git a/server/licenses/jna-5.13.0.jar.sha1 b/server/licenses/jna-5.13.0.jar.sha1 new file mode 100644 index 0000000000000..faf2012f0b5c0 --- /dev/null +++ b/server/licenses/jna-5.13.0.jar.sha1 @@ -0,0 +1 @@ +1200e7ebeedbe0d10062093f32925a912020e747 \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 deleted file mode 100644 index 5621dfc743dd0..0000000000000 --- a/server/licenses/jna-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/licenses/log4j-api-2.20.0.jar.sha1 b/server/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/server/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/server/licenses/log4j-api-2.21.0.jar.sha1 b/server/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/server/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.20.0.jar.sha1 b/server/licenses/log4j-core-2.20.0.jar.sha1 deleted file mode 100644 index 49c972626563b..0000000000000 --- a/server/licenses/log4j-core-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb2a9a47b1396e00b5eee1264296729a70565cc0 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.21.0.jar.sha1 b/server/licenses/log4j-core-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..c88e6f7a25ca9 --- /dev/null +++ b/server/licenses/log4j-core-2.21.0.jar.sha1 @@ -0,0 +1 @@ +122e1a9e0603cc9eae07b0846a6ff01f2454bc49 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.20.0.jar.sha1 b/server/licenses/log4j-jul-2.20.0.jar.sha1 deleted file mode 100644 index a456651e4569e..0000000000000 --- a/server/licenses/log4j-jul-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8170e6118eac1ab332046c179718a0f107f688e1 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.21.0.jar.sha1 b/server/licenses/log4j-jul-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..480010840abca --- /dev/null +++ b/server/licenses/log4j-jul-2.21.0.jar.sha1 @@ -0,0 +1 @@ +f0da61113f4a47654677e6a98b1e13ca7de2483d \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9cab77f4e7394 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +6f0cfa07a5e4b36423e398cd1fd51c6825773d9c \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index e7c7dc2bbc046..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1446b7641743a1082b566179d1bf2960f5a0724b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2d5d1a281a0f0 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ab201b997c8449db1ecd2fa88bd42d2f457286fa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index d0f64519cd6ff..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -127032ea137d2501b24f0e35e5f9a2e1c7864633 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..25beb34873c0c --- /dev/null +++ b/server/licenses/lucene-core-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +fe1cf5663be8bdb6aa757fd4101bc551684c90fb \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dc363f2776429..0000000000000 --- a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30c3afcf058532d3d2b8820375043000e7f34a9b \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..27d4f0f5874e9 --- /dev/null +++ b/server/licenses/lucene-grouping-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +a988f92842e48195c75a49377432533c9170d93d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 294beba43f62a..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6f742efe0ef3b383468fe38f88ab2dd69ed3d2c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..2545822f2ac7b --- /dev/null +++ b/server/licenses/lucene-highlighter-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +7c669e2c01565d3bdf175cd61a1e4d0bdfc44311 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index c2a2ef5b13946..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3162856444777130dee2c4cabe1bf6d18710ff63 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..fe44ad772335f --- /dev/null +++ b/server/licenses/lucene-join-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +633a6d44b4cde8e149daa3407e8b8f644eece951 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 7c6adaaba9cf1..0000000000000 --- a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5fe8383516eca7300f978ce38042e327b0a57877 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..85bfbe066ff56 --- /dev/null +++ b/server/licenses/lucene-memory-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +25390259c9e5592354efbc2f250bb396402016b2 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 586702c968a77..0000000000000 --- a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b3e77970485be6d2dd59b999bbaa65a2cb993744 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..4dba5837b66de --- /dev/null +++ b/server/licenses/lucene-misc-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +056f87a3d30c223b08d2f45fe465ddf11210b85f \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 493598eefff5e..0000000000000 --- a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -86d667ea2f7fb2142d2acacf801dcea47d014a5e \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..b6b8c441eefb1 --- /dev/null +++ b/server/licenses/lucene-queries-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +e640f850b4fb13190be8422fe74c14c9d6603bb5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1bf937f10d795..0000000000000 --- a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -930d004de698f374da8ac5530fd80e241edeba45 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..d0e77b04db51a --- /dev/null +++ b/server/licenses/lucene-queryparser-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +8eb57762bf408fa51d7511f5e3b917627be61d1d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 74458bc93f90b..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f62882823d5aa9ed4cf0081a8c18f35e21992080 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..179df9f07a594 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +615b4a90c5402084c2d5916a4c1fadc9d9177782 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 3231d0e067940..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1ec1527e283b423b7ff5e12cd8d889e7247199d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..9b88b24c21b12 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +19b42cdb5f76f63dece3ef5128207ebdd3741d48 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index dd47faf91f206..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de787c052879893e47d21fa161c93413665d55d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..789ab1d52ea8c --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +882691fe917e716fe6bcf8c0dd984b153495d015 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 2b378438bfb14..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e541ed960a571f5d9a0ecff5c26fd5ca857581e \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 new file mode 100644 index 0000000000000..8cfaf60763724 --- /dev/null +++ b/server/licenses/lucene-suggest-9.11.0-snapshot-8a555eb.jar.sha1 @@ -0,0 +1 @@ +ef6d483960f776d5dbdd1009863786ee09ba5707 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 deleted file mode 100644 index 1e3ed6561e3ef..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b222ef09a5f20896d031a8322f2e69304c16384 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 b/server/licenses/reactive-streams-1.0.4.jar.sha1 similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-1.0.4.jar.sha1 rename to server/licenses/reactive-streams-1.0.4.jar.sha1 diff --git a/plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt b/server/licenses/reactive-streams-LICENSE.txt similarity index 100% rename from plugins/discovery-ec2/licenses/reactive-streams-LICENSE.txt rename to server/licenses/reactive-streams-LICENSE.txt diff --git a/server/licenses/reactive-streams-NOTICE.txt b/server/licenses/reactive-streams-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/licenses/reactor-LICENSE.txt b/server/licenses/reactor-LICENSE.txt new file mode 100644 index 0000000000000..e5583c184e67a --- /dev/null +++ b/server/licenses/reactor-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/reactor-NOTICE.txt b/server/licenses/reactor-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/licenses/reactor-core-3.5.15.jar.sha1 b/server/licenses/reactor-core-3.5.15.jar.sha1 new file mode 100644 index 0000000000000..02df47ed58b9d --- /dev/null +++ b/server/licenses/reactor-core-3.5.15.jar.sha1 @@ -0,0 +1 @@ +4e07a24c671235a2a806e75e9b8ff23d7d1db3d4 \ No newline at end of file diff --git a/server/licenses/zstd-jni-1.5.5-3.jar.sha1 b/server/licenses/zstd-jni-1.5.5-3.jar.sha1 deleted file mode 100644 index 6d30ba7e2de80..0000000000000 --- a/server/licenses/zstd-jni-1.5.5-3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -488dd9b15c9e8cf87d857f65f5cd6359c2853381 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java index 5054039319392..84d833569edcb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java @@ -94,9 +94,9 @@ import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; @@ -112,7 +112,6 @@ import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; - import org.junit.After; import org.junit.Before; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java index 1512fa4934ca1..c82ca0a06f4a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ListenerActionIT.java @@ -36,6 +36,7 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.client.Client; import org.opensearch.client.Requests; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.concurrent.CountDownLatch; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/RejectionActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/RejectionActionIT.java index bda24b48b7f10..e52df4476241b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/RejectionActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/RejectionActionIT.java @@ -38,6 +38,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ClientTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ClientTimeoutIT.java index 3b56c07cb10c8..340caa75c61d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ClientTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ClientTimeoutIT.java @@ -8,9 +8,9 @@ package org.opensearch.action.admin; -import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.opensearch.action.admin.cluster.node.info.NodesInfoAction; import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoAction; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -33,10 +33,10 @@ import java.util.Collections; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClientTimeoutIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java index ab44c95b4f5a6..4c9f49df71257 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin; import org.apache.lucene.util.Constants; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; import org.opensearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; @@ -60,8 +60,8 @@ public class HotThreadsIT extends OpenSearchIntegTestCase { public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. + /* + This test just checks if nothing crashes or gets stuck etc. */ createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java index e095927efdd01..c81d491719e4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/ReloadSecureSettingsIT.java @@ -33,13 +33,13 @@ package org.opensearch.action.admin; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.common.settings.SecureSettings; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; @@ -449,6 +449,7 @@ public void onFailure(Exception e) { } } + @SuppressWarnings("removal") private SecureSettings writeEmptyKeystore(Environment environment, char[] password) throws Exception { final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); try { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 4cbcc5b9bb507..a9a6993ff8d64 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -49,18 +49,17 @@ import org.opensearch.cluster.routing.allocation.NodeAllocationResult; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Priority; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.util.Collections; @@ -1275,7 +1274,7 @@ private ClusterAllocationExplanation runExplain(boolean primary, String nodeId, XContentBuilder builder = JsonXContent.contentBuilder(); builder.prettyPrint(); builder.humanReadable(true); - logger.debug("--> explain json output: \n{}", Strings.toString(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS))); + logger.debug("--> explain json output: \n{}", explanation.toXContent(builder, ToXContent.EMPTY_PARAMS).toString()); } return explanation; } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java index 84e7d82d25ab2..44ba585016d8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java @@ -16,12 +16,12 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.ThreadResourceInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.transport.MockTransportService; @@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function<Tuple<Boolean, TaskInf * Returns all events that satisfy the criteria across all nodes * * @param actionMasks action masks to match - * @return number of events that satisfy the criteria + * @return List of events that satisfy the criteria */ protected List<TaskInfo> findEvents(String actionMasks, Function<Tuple<Boolean, TaskInfo>, Boolean> criteria) { List<TaskInfo> events = new ArrayList<>(); @@ -182,7 +182,7 @@ protected void indexDocumentsWithRefresh(String indexName, int numDocs) { for (int i = 0; i < numDocs; i++) { client().prepareIndex(indexName) .setId("test_id_" + String.valueOf(i)) - .setSource("{\"foo_" + String.valueOf(i) + "\": \"bar_" + String.valueOf(i) + "\"}", XContentType.JSON) + .setSource("{\"foo_" + String.valueOf(i) + "\": \"bar_" + String.valueOf(i) + "\"}", MediaTypeRegistry.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 161dee1a96e3c..bdb36b62ada21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -31,13 +31,12 @@ package org.opensearch.action.admin.cluster.node.tasks; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionRunnable; import org.opensearch.action.ActionType; import org.opensearch.action.LatchedActionListener; @@ -50,31 +49,35 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.SetOnce; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; - import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -87,6 +90,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -94,7 +98,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -public class CancellableTasksIT extends OpenSearchIntegTestCase { +public class CancellableTasksIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int idGenerator = 0; static final Map<TestRequest, CountDownLatch> beforeSendLatches = ConcurrentCollections.newConcurrentMap(); @@ -102,6 +106,18 @@ public class CancellableTasksIT extends OpenSearchIntegTestCase { static final Map<TestRequest, CountDownLatch> beforeExecuteLatches = ConcurrentCollections.newConcurrentMap(); static final Map<TestRequest, CountDownLatch> completedLatches = ConcurrentCollections.newConcurrentMap(); + public CancellableTasksIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Before public void resetTestStates() { idGenerator = 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index 2b2421072e03b..8b3c40c43e2d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.node.tasks; -import org.hamcrest.MatcherAssert; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.search.SearchAction; import org.opensearch.cluster.metadata.IndexMetadata; @@ -16,21 +15,22 @@ import org.opensearch.common.settings.FeatureFlagSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.SearchService; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.ThreadResourceInfo; +import org.hamcrest.MatcherAssert; import java.util.List; import java.util.Map; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; /** * Integration tests for task management API with Concurrent Segment Search - * + * <p> * The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag. * Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting. */ @@ -44,6 +44,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put("thread_pool.index_searcher.size", INDEX_SEARCHER_THREADS) .put("thread_pool.index_searcher.queue_size", INDEX_SEARCHER_THREADS) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) .build(); } @@ -66,13 +67,12 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } - featureSettings.put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true); return featureSettings.build(); } /** * Tests the number of threads that worked on a search task. - * + * <p> * Currently, we try to control concurrency by creating an index with 7 segments and rely on * the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced * we should improve this test to use those methods. @@ -108,8 +108,9 @@ public void testConcurrentSearchTaskTracking() { assertEquals(mainTaskInfo.getTaskId(), taskInfo.getParentTaskId()); Map<Long, List<ThreadResourceInfo>> threadStats = getThreadStats(SearchAction.NAME + "[*]", taskInfo.getTaskId()); - // Concurrent search forks each slice of 5 segments to different thread - assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0), threadStats.size()); + // Concurrent search forks each slice of 5 segments to different thread (see please + // https://github.com/apache/lucene/issues/12498) + assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0) + 1, threadStats.size()); // assert that all task descriptions have non-zero length MatcherAssert.assertThat(taskInfo.getDescription().length(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java index e710d3a8438d8..455be343de2c5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TaskStorageRetryIT.java @@ -36,9 +36,9 @@ import org.opensearch.action.support.PlainListenableActionFuture; import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.tasks.TaskId; import org.opensearch.plugins.Plugin; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index 67e52529ae86b..c7d75108883dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -35,8 +35,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -56,14 +54,16 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportReplicationActionTests; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskResult; import org.opensearch.tasks.TaskResultsService; @@ -86,6 +86,12 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singleton; +import static org.opensearch.common.unit.TimeValue.timeValueMillis; +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFutureThrows; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -97,12 +103,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -import static org.opensearch.common.unit.TimeValue.timeValueMillis; -import static org.opensearch.common.unit.TimeValue.timeValueSeconds; -import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFutureThrows; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; /** * Integration tests for task management API @@ -287,7 +287,9 @@ public void testTransportBulkTasks() { ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks // ensures the mapping is available on all nodes so we won't retry the request (in case replicas don't have the right mapping). client().admin().indices().preparePutMapping("test").setSource("foo", "type=keyword").get(); - client().prepareBulk().add(client().prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", XContentType.JSON)).get(); + client().prepareBulk() + .add(client().prepareIndex("test").setId("test_id").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)) + .get(); // the bulk operation should produce one main task List<TaskInfo> topTask = findEvents(BulkAction.NAME, Tuple::v1); @@ -338,7 +340,7 @@ public void testSearchTaskDescriptions() { ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks client().prepareIndex("test") .setId("test_id") - .setSource("{\"foo\": \"bar\"}", XContentType.JSON) + .setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index aff7c5d9876ac..36fe3748e9d10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -46,7 +46,7 @@ /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. - * + * <p> * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 13200ba2ca43e..78fb01b07b6b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -37,13 +37,11 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; - import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; - import org.junit.Before; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; @@ -55,7 +53,7 @@ /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. - * + * <p> * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index cc1f8169afd3f..085a32593063a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -49,7 +49,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; - import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 7c10d52c7a111..83aa744a80599 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -34,8 +34,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.gateway.GatewayService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java index daf4769e9fabb..910a9d351d83a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java @@ -42,6 +42,7 @@ import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; + import java.util.Collection; import java.util.Collections; import java.util.List; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CloneIndexIT.java index 98fc6483703c4..0551d19b02b8f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CloneIndexIT.java @@ -37,7 +37,7 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.test.OpenSearchIntegTestCase; @@ -62,7 +62,7 @@ public void testCreateCloneIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } internalCluster().ensureAtLeastNumDataNodes(2); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -122,7 +122,7 @@ public void testCreateCloneIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } flushAndRefresh(); assertHitCount( diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index e969e06ec56e0..1c182b05fa4a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.indices.alias.Alias; @@ -49,6 +48,7 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperParsingException; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java new file mode 100644 index 0000000000000..f50e8fd0a38cf --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.Version; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testCreateCloneIndex() { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = randomIntBetween(1, 5); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards)); + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + + public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = 1; + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = 2; + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + setFailRate(REPOSITORY_NAME, 100); + + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setWaitForActiveShards(0) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get(); + + Thread.sleep(2000); + ensureYellow("target"); + + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } finally { + setFailRate(REPOSITORY_NAME, 0); + ensureGreen(); + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java new file mode 100644 index 0000000000000..282eb9c6ad95e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -0,0 +1,545 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.InternalClusterInfoService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateShrinkIndexToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + int[][] possibleShardSplits = new int[][] { { 8, 4, 2 }, { 9, 3, 1 }, { 4, 2, 1 }, { 15, 5, 1 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 4 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("first_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("first_shrink") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 2 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_shrink", "second_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_shrink") + .setSettings(Settings.builder().putNull("index.routing.allocation.include._id").put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("second_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testShrinkIndexPrimaryTerm() throws Exception { + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); + + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + final String mergeNode = discoveryNodes[0].getName(); + // This needs more than the default timeout if a large number of shards were created. + ensureGreen(TimeValue.timeValueSeconds(120)); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + // relocate all shards to one node such that we can merge it. + final Settings.Builder prepareShrinkSettings = Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now merge source into target + final Settings shrinkSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .build(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); + + ensureGreen(TimeValue.timeValueSeconds(120)); + + final IndexMetadata afterShrinkIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(afterShrinkIndexMetadata.primaryTerm(shardId), equalTo(beforeShrinkPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateShrinkIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + + // now merge source into a single shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + + // resolve true merge node - this is not always the node we required as all shards may be on another node + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("merge node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + /** + * Tests that we can manually recover from a failed allocation due to shards being moved away etc. + */ + public void testCreateShrinkIndexFails() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String spareNode = discoveryNodes[0].getName(); + String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // now merge source into a single shard index + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings( + Settings.builder() + .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up + .put("index.number_of_replicas", 0) + .put("index.allocation.max_retries", 1) + .build() + ) + .get(); + client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + + // now we move all shards away from the merge node + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)) + .get(); + ensureGreen("source"); + + client().admin() + .indices() + .prepareUpdateSettings("target") // erase the forcefully fuckup! + .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")) + .get(); + // wait until it fails + assertBusy(() -> { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + RoutingTable routingTables = clusterStateResponse.getState().routingTable(); + assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); + assertEquals( + UnassignedInfo.Reason.ALLOCATION_FAILED, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason() + ); + assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); + }); + client().admin() + .indices() + .prepareUpdateSettings("source") // now relocate them all to the right node + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)) + .get(); + ensureGreen("source"); + + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance( + ClusterInfoService.class, + internalCluster().getClusterManagerName() + ); + infoService.refresh(); + // kick off a retry and wait until it's done! + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + long expectedShardSize = clusterRerouteResponse.getState() + .routingTable() + .index("target") + .shard(0) + .getShards() + .get(0) + .getExpectedShardSize(); + // we support the expected shard size in the allocator to sum up over the source index shards + assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testCreateShrinkWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("sort.order", "desc") + .put("number_of_shards", 8) + .put("number_of_replicas", 0) + ).setMapping("id", "type=keyword,doc_values=true").get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build() + ) + .get() + ); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java new file mode 100644 index 0000000000000..dd4252d24f314 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -0,0 +1,506 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateSplitIndexToN() throws IOException { + int[][] possibleShardSplits = new int[][] { { 2, 4, 8 }, { 3, 6, 12 }, { 1, 2, 4 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + splitToN(shardSplits[0], shardSplits[1], shardSplits[2]); + } + + public void testSplitFromOneToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + splitToN(1, 5, 10); + client().admin().indices().prepareDelete("*").get(); + int randomSplit = randomIntBetween(2, 6); + splitToN(1, randomSplit, randomSplit * 2); + } + + private void splitToN(int sourceShards, int firstSplitShards, int secondSplitShards) { + + assertEquals(sourceShards, (sourceShards * firstSplitShards) / firstSplitShards); + assertEquals(firstSplitShards, (firstSplitShards * secondSplitShards) / secondSplitShards); + internalCluster().ensureAtLeastNumDataNodes(2); + final boolean useRouting = randomBoolean(); + final boolean useNested = randomBoolean(); + final boolean useMixedRouting = useRouting ? randomBoolean() : false; + CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + Settings.Builder settings = Settings.builder().put(indexSettings()).put("number_of_shards", sourceShards); + final boolean useRoutingPartition; + if (randomBoolean()) { + // randomly set the value manually + int routingShards = secondSplitShards * randomIntBetween(1, 10); + settings.put("index.number_of_routing_shards", routingShards); + useRoutingPartition = false; + } else { + useRoutingPartition = randomBoolean(); + } + if (useRouting && useMixedRouting == false && useRoutingPartition) { + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, Version.CURRENT) - 1; + settings.put("index.routing_partition_size", randomIntBetween(1, numRoutingShards)); + if (useNested) { + createInitialIndex.setMapping("_routing", "required=true", "nested1", "type=nested"); + } else { + createInitialIndex.setMapping("_routing", "required=true"); + } + } else if (useNested) { + createInitialIndex.setMapping("nested1", "type=nested"); + } + logger.info("use routing {} use mixed routing {} use nested {}", useRouting, useMixedRouting, useNested); + createInitialIndex.setSettings(settings).get(); + + int numDocs = randomIntBetween(10, 50); + String[] routingValue = new String[numDocs]; + + BiFunction<String, Integer, IndexRequestBuilder> indexFunc = (index, id) -> { + try { + return client().prepareIndex(index) + .setId(Integer.toString(id)) + .setSource( + jsonBuilder().startObject() + .field("foo", "bar") + .field("i", id) + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1_1") + .field("n_field2", "n_value2_1") + .endObject() + .startObject() + .field("n_field1", "n_value1_2") + .field("n_field2", "n_value2_2") + .endObject() + .endArray() + .endObject() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); + if (useMixedRouting && randomBoolean()) { + routingValue[i] = null; + } else { + routingValue[i] = routing; + } + builder.setRouting(routingValue[i]); + } + builder.get(); + } + + if (randomBoolean()) { + for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index + if (randomBoolean()) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + } + } + + ensureYellow(); + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + Settings.Builder firstSplitSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", firstSplitShards) + .putNull("index.blocks.write"); + if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard + firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards); + } + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(firstSplitSettingsBuilder.build()) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("first_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("first_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + + client().admin() + .indices() + .prepareUpdateSettings("first_split") + .setSettings(Settings.builder().put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now split source into a new index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_split", "second_split") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", secondSplitShards) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_split") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("second_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + if (useNested) { + assertNested("source", numDocs); + assertNested("first_split", numDocs); + assertNested("second_split", numDocs); + } + assertAllUniqueDocs( + client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs( + client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + } + + public void assertNested(String index, int numDocs) { + // now, do a nested query + SearchResponse searchResponse = client().prepareSearch(index) + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + } + + public void assertAllUniqueDocs(SearchResponse response, int numDocs) { + Set<String> ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + } + + public void testSplitIndexPrimaryTerm() throws Exception { + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("number_of_shards", numberOfShards) + .put("index.number_of_routing_shards", numberOfTargetShards) + ).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + final Settings.Builder prepareSplitSettings = Settings.builder().put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareSplitSettings).get(); + ensureYellow(); + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeSplitPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now split source into target + final Settings splitSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .putNull("index.blocks.write") + .build(); + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(splitSettings) + .get() + ); + + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata aftersplitIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(aftersplitIndexMetadata.primaryTerm(shardId), equalTo(beforeSplitPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateSplitIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("split node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index 51598d7775623..cafcb73b699fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Constants; - import org.opensearch.Version; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; @@ -65,10 +64,10 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.query.TermsQueryBuilder; @@ -109,7 +108,7 @@ public void testCreateShrinkIndexToN() { for (int i = 0; i < 20; i++) { client().prepareIndex("source") .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) .get(); } final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); @@ -147,7 +146,7 @@ public void testCreateShrinkIndexToN() { for (int i = 0; i < 20; i++) { // now update client().prepareIndex("first_shrink") .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) .get(); } flushAndRefresh(); @@ -190,7 +189,7 @@ public void testCreateShrinkIndexToN() { for (int i = 0; i < 20; i++) { // now update client().prepareIndex("second_shrink") .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) .get(); } flushAndRefresh(); @@ -232,7 +231,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); if (hash == shardId) { final IndexRequest request = new IndexRequest("source").id(s) - .source("{ \"f\": \"" + s + "\"}", XContentType.JSON); + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); client().index(request).get(); break; } else { @@ -283,7 +282,7 @@ public void testCreateShrinkIndex() { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -378,7 +377,7 @@ public void testCreateShrinkIndex() { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } flushAndRefresh(); assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); @@ -405,7 +404,7 @@ public void testCreateShrinkIndexFails() throws Exception { Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) ).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); @@ -506,7 +505,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { for (int i = 0; i < 20; i++) { client().prepareIndex("source") .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) .get(); } final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); @@ -569,7 +568,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); @@ -580,7 +579,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { Settings.builder().put(indexSettings()).put("index.number_of_replicas", 0).put("number_of_shards", 5) ).get(); for (int i = 0; i < 30; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } client().admin().indices().prepareFlush("source").get(); final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); @@ -737,7 +736,7 @@ public void testCreateShrinkIndexWithMaxShardSize() { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount) ).get(); for (int i = 0; i < 20; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } client().admin().indices().prepareFlush("source").get(); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java index ea53efe176eaf..c8b151e24ce98 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/SplitIndexIT.java @@ -38,7 +38,6 @@ import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.Constants; - import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -61,8 +60,8 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.engine.SegmentsStats; import org.opensearch.index.query.TermsQueryBuilder; @@ -347,7 +346,7 @@ public void testSplitIndexPrimaryTerm() throws Exception { final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); if (hash == shardId) { final IndexRequest request = new IndexRequest("source").id(s) - .source("{ \"f\": \"" + s + "\"}", XContentType.JSON); + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); client().index(request).get(); break; } else { @@ -403,7 +402,7 @@ public void testCreateSplitIndex() throws Exception { ).get(); final int docs = randomIntBetween(0, 128); for (int i = 0; i < docs; i++) { - client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due @@ -487,7 +486,7 @@ public void testCreateSplitIndex() throws Exception { } for (int i = docs; i < 2 * docs; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } flushAndRefresh(); assertHitCount( @@ -526,7 +525,7 @@ public void testCreateSplitWithIndexSort() throws Exception { for (int i = 0; i < 20; i++) { client().prepareIndex("source") .setId(Integer.toString(i)) - .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) .get(); } // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -582,7 +581,7 @@ public void testCreateSplitWithIndexSort() throws Exception { // ... and that the index sort is also applied to updates for (int i = 20; i < 40; i++) { - client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); } flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 44f66dd4e0f90..82ab5b0118c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -16,19 +16,19 @@ import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.Template; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.List; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.is; import static org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import static org.opensearch.test.OpenSearchIntegTestCase.Scope; +import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, numDataNodes = 2) public class DataStreamTestCase extends OpenSearchIntegTestCase { @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception { CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name); AcknowledgedResponse response = client().admin().indices().createDataStream(request).get(); assertThat(response.isAcknowledged(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception { RolloverResponse response = client().admin().indices().rolloverIndex(request).get(); assertThat(response.isAcknowledged(), is(true)); assertThat(response.isRolledOver(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -90,7 +92,7 @@ public AcknowledgedResponse createDataStreamIndexTemplate(String name, List<Stri } public AcknowledgedResponse createIndexTemplate(String name, String jsonContent) throws Exception { - XContentParser parser = XContentHelper.createParser(xContentRegistry(), null, new BytesArray(jsonContent), XContentType.JSON); + XContentParser parser = XContentHelper.createParser(xContentRegistry(), null, new BytesArray(jsonContent), MediaTypeRegistry.JSON); return createIndexTemplate(name, ComposableIndexTemplate.parse(parser)); } @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception { assertThat(response.isAcknowledged(), is(true)); return response; } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java index 2d06b594a598c..deabbe44bfc3d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamUsageIT.java @@ -14,8 +14,8 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import java.util.Arrays; import java.util.List; @@ -93,13 +93,18 @@ public void assertDataStreamIndexDocuments(String timestampFieldName) throws Exc Exception exception; // Only op_type=create requests should be allowed. - exception = expectThrows(Exception.class, () -> index(new IndexRequest("logs-demo").id("doc-1").source("{}", XContentType.JSON))); + exception = expectThrows( + Exception.class, + () -> index(new IndexRequest("logs-demo").id("doc-1").source("{}", MediaTypeRegistry.JSON)) + ); assertThat(exception.getMessage(), containsString("only write ops with an op_type of create are allowed in data streams")); // Documents must contain a valid timestamp field. exception = expectThrows( Exception.class, - () -> index(new IndexRequest("logs-demo").id("doc-1").source("{}", XContentType.JSON).opType(DocWriteRequest.OpType.CREATE)) + () -> index( + new IndexRequest("logs-demo").id("doc-1").source("{}", MediaTypeRegistry.JSON).opType(DocWriteRequest.OpType.CREATE) + ) ); assertThat( exception.getMessage(), diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java index f2db023a2ac01..b5ab4b5290171 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/exists/IndicesExistsIT.java @@ -36,9 +36,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.gateway.GatewayService; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.InternalTestCluster; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java index 09af533292e9a..5090af1706d5a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -100,6 +100,24 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(primaryForceMergeUUID, is(replicaForceMergeUUID)); } + public void testForceMergeOnlyOnPrimaryShards() throws IOException { + internalCluster().ensureAtLeastNumDataNodes(2); + final String index = "test-index"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + ensureGreen(index); + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(index) + .setMaxNumSegments(1) + .setPrimaryOnly(true) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); + } + private static String getForceMergeUUID(IndexShard indexShard) throws IOException { try (GatedCloseable<IndexCommit> wrappedIndexCommit = indexShard.acquireLastIndexCommit(true)) { return wrappedIndexCommit.get().getUserData().get(Engine.FORCE_MERGE_UUID_KEY); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java index 7f175289f3a88..d4e07aa4251c3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/rollover/RolloverIT.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -47,13 +46,13 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchIntegTestCase; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -62,10 +61,10 @@ import java.util.List; import java.util.Set; -import static org.hamcrest.Matchers.containsString; import static org.opensearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java new file mode 100644 index 0000000000000..85c70e098652c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewIT.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 2) +public class ViewIT extends ViewTestBase { + + public void testCreateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = randomAlphaOfLength(8); + + logger.info("Testing createView with valid parameters"); + final View view = createView(viewName, indexPattern).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(1)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + + logger.info("Testing createView with existing view name"); + final Exception ex = assertThrows(ViewAlreadyExistsException.class, () -> createView(viewName, randomAlphaOfLength(8))); + MatcherAssert.assertThat(ex.getMessage(), is("View [" + viewName + "] already exists")); + } + + public void testCreateViewTargetsSet() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String indexPattern = "a" + randomAlphaOfLength(8); + final String indexPattern2 = "b" + randomAlphaOfLength(8); + final List<String> targetPatterns = List.of(indexPattern2, indexPattern, indexPattern); + + logger.info("Testing createView with targets that will be reordered and deduplicated"); + final View view = createView(viewName, targetPatterns).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + MatcherAssert.assertThat(view.getTargets().size(), is(2)); + MatcherAssert.assertThat(view.getTargets().first().getIndexPattern(), is(indexPattern)); + MatcherAssert.assertThat(view.getTargets().last().getIndexPattern(), is(indexPattern2)); + } + + public void testGetView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + final View view = getView(viewName).getView(); + MatcherAssert.assertThat(view.getName(), is(viewName)); + + logger.info("Testing getView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> getView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testDeleteView() throws Exception { + final String viewName = randomAlphaOfLength(8); + createView(viewName, randomAlphaOfLength(8)); + + logger.info("Testing deleteView with existing view"); + deleteView(viewName); + final Exception whenDeletedEx = assertThrows(ViewNotFoundException.class, () -> getView(viewName)); + MatcherAssert.assertThat(whenDeletedEx.getMessage(), is("View [" + viewName + "] does not exist")); + + logger.info("Testing deleteView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> deleteView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testUpdateView() throws Exception { + final String viewName = randomAlphaOfLength(8); + final String originalIndexPattern = randomAlphaOfLength(8); + final View originalView = createView(viewName, originalIndexPattern).getView(); + + logger.info("Testing updateView with existing view"); + final String newDescription = randomAlphaOfLength(20); + final String newIndexPattern = "newPattern-" + originalIndexPattern; + final View updatedView = updateView(viewName, newDescription, newIndexPattern).getView(); + + MatcherAssert.assertThat(updatedView, not(is(originalView))); + MatcherAssert.assertThat(updatedView.getDescription(), is(newDescription)); + MatcherAssert.assertThat(updatedView.getTargets(), hasSize(1)); + MatcherAssert.assertThat(updatedView.getTargets().first().getIndexPattern(), is(newIndexPattern)); + + logger.info("Testing updateView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> updateView(nonExistentView, null, "index-*")); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } + + public void testListViewNames() throws Exception { + logger.info("Testing listViewNames when no views have been created"); + MatcherAssert.assertThat(listViewNames(), is(List.of())); + + final String view1 = "view1"; + final String view2 = "view2"; + createView(view1, "index-1-*"); + createView(view2, "index-2-*"); + + logger.info("Testing listViewNames"); + final List<String> views = listViewNames(); + MatcherAssert.assertThat(views, containsInAnyOrder(view1, view2)); + + logger.info("Testing listViewNames after deleting a view"); + deleteView(view1); + final List<String> viewsAfterDeletion = listViewNames(); + MatcherAssert.assertThat(viewsAfterDeletion, not(contains(view1))); + MatcherAssert.assertThat(viewsAfterDeletion, contains(view2)); + } + + public void testSearchOperations() throws Exception { + final String indexInView1 = "index-1"; + final String indexInView2 = "index-2"; + final String indexNotInView = "another-index-1"; + + final int indexInView1DocCount = createIndexWithDocs(indexInView1); + final int indexInView2DocCount = createIndexWithDocs(indexInView2); + createIndexWithDocs(indexNotInView); + + logger.info("Testing view with no matches"); + createView("no-matches", "this-pattern-will-match-nothing"); + final Exception ex = assertThrows(IndexNotFoundException.class, () -> searchView("no-matches")); + MatcherAssert.assertThat(ex.getMessage(), is("no such index [this-pattern-will-match-nothing]")); + + logger.info("Testing view with exact index match"); + createView("only-index-1", "index-1"); + assertHitCount(searchView("only-index-1"), indexInView1DocCount); + + logger.info("Testing view with wildcard matches"); + createView("both-indices", "index-*"); + assertHitCount(searchView("both-indices"), indexInView1DocCount + indexInView2DocCount); + + logger.info("Testing searchView with non-existent view"); + final String nonExistentView = "non-existent-" + randomAlphaOfLength(8); + final Exception whenNeverExistedEx = assertThrows(ViewNotFoundException.class, () -> searchView(nonExistentView)); + MatcherAssert.assertThat(whenNeverExistedEx.getMessage(), is("View [" + nonExistentView + "] does not exist")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java new file mode 100644 index 0000000000000..a44ba0cf7c717 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/view/ViewTestBase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.test.BackgroundIndexer; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public abstract class ViewTestBase extends OpenSearchIntegTestCase { + + protected int createIndexWithDocs(final String indexName) throws Exception { + createIndex(indexName); + ensureGreen(indexName); + + final int numOfDocs = scaledRandomIntBetween(0, 200); + try (final BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { + waitForDocs(numOfDocs, indexer); + } + + refresh(indexName); + assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); + return numOfDocs; + } + + protected GetViewAction.Response createView(final String name, final String indexPattern) throws Exception { + return createView(name, List.of(indexPattern)); + } + + protected GetViewAction.Response createView(final String name, final List<String> targets) throws Exception { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + null, + targets.stream().map(CreateViewAction.Request.Target::new).collect(Collectors.toList()) + ); + return client().admin().indices().createView(request).actionGet(); + } + + protected GetViewAction.Response getView(final String name) { + return client().admin().indices().getView(new GetViewAction.Request(name)).actionGet(); + + } + + protected void deleteView(final String name) { + client().admin().indices().deleteView(new DeleteViewAction.Request(name)).actionGet(); + performRemoteStoreTestAction(); + } + + protected List<String> listViewNames() { + return client().listViewNames(new ListViewNamesAction.Request()).actionGet().getViewNames(); + } + + protected SearchResponse searchView(final String viewName) throws Exception { + final SearchViewAction.Request request = new SearchViewAction.Request(viewName, new SearchRequest()); + final SearchResponse response = client().searchView(request).actionGet(); + return response; + } + + protected GetViewAction.Response updateView(final String name, final String description, final String indexPattern) { + final CreateViewAction.Request request = new CreateViewAction.Request( + name, + description, + List.of(new CreateViewAction.Request.Target(indexPattern)) + ); + final GetViewAction.Response response = client().admin().indices().updateView(request).actionGet(); + return response; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index 1c2e8200abb6d..cf83f20244a4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -37,7 +37,6 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; - import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.ingest.PutPipelineRequest; @@ -46,11 +45,11 @@ import org.opensearch.action.update.UpdateRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.ingest.IngestTestPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -84,7 +83,7 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { public void testBulkIndexCreatesMapping() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/bulk-log.json"); BulkRequestBuilder bulkBuilder = client().prepareBulk(); - bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); bulkBuilder.get(); assertBusy(() -> { GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); @@ -155,7 +154,7 @@ public void testBulkWithGlobalDefaults() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk-missing-index-type.json"); { BulkRequestBuilder bulkBuilder = client().prepareBulk(); - bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder::get); assertThat(ex.validationErrors(), containsInAnyOrder("index is missing", "index is missing", "index is missing")); @@ -165,7 +164,7 @@ public void testBulkWithGlobalDefaults() throws Exception { createSamplePipeline("pipeline"); BulkRequestBuilder bulkBuilder = client().prepareBulk("test").routing("routing").pipeline("pipeline"); - bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); BulkResponse bulkItemResponses = bulkBuilder.get(); assertFalse(bulkItemResponses.hasFailures()); } @@ -183,7 +182,7 @@ private void createSamplePipeline(String pipelineId) throws IOException, Executi AcknowledgedResponse acknowledgedResponse = client().admin() .cluster() - .putPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(pipeline), XContentType.JSON)) + .putPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(pipeline), MediaTypeRegistry.JSON)) .get(); assertTrue(acknowledgedResponse.isAcknowledged()); @@ -201,7 +200,7 @@ public void testDeleteIndexWhileIndexing() throws Exception { try { IndexResponse response = client().prepareIndex(index) .setId(id) - .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), MediaTypeRegistry.JSON) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorClusterSettingsIT.java index 14531787e9903..ea7af48266905 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -33,7 +33,7 @@ package org.opensearch.action.bulk; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -50,9 +50,9 @@ public void testBulkProcessorAutoCreateRestrictions() throws Exception { client().admin().cluster().prepareHealth("willwork").setWaitForGreenStatus().execute().actionGet(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", XContentType.JSON)); - bulkRequestBuilder.add(client().prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", XContentType.JSON)); + bulkRequestBuilder.add(client().prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("wontwork").setId("2").setSource("{\"foo\":2}", MediaTypeRegistry.JSON)); + bulkRequestBuilder.add(client().prepareIndex("willwork").setId("3").setSource("{\"foo\":3}", MediaTypeRegistry.JSON)); BulkResponse br = bulkRequestBuilder.get(); BulkItemResponse[] responses = br.getItems(); assertEquals(3, responses.length); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java index 850034bc631b1..94202c208ba3d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorIT.java @@ -33,6 +33,7 @@ package org.opensearch.action.bulk; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.action.get.MultiGetItemResponse; import org.opensearch.action.get.MultiGetRequestBuilder; import org.opensearch.action.get.MultiGetResponse; @@ -41,9 +42,9 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java index d0285f24e22fe..cd6cb0ca3b172 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java @@ -32,12 +32,11 @@ package org.opensearch.action.bulk; import org.opensearch.action.admin.indices.refresh.RefreshRequest; - import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.index.query.QueryBuilders; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.RemoteTransportException; @@ -195,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + * <p> * This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkRejectionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkRejectionIT.java index 7dde67791a2ec..a41664fe71c24 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkRejectionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkRejectionIT.java @@ -31,15 +31,15 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionFuture; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.WriteRequest; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java index 53afa53de92f3..e27c0c4786da8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java @@ -35,6 +35,8 @@ import org.opensearch.action.DocWriteRequest.OpType; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.action.index.IndexRequest; @@ -46,9 +48,9 @@ import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.VersionType; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; @@ -56,8 +58,8 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptException; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -618,19 +620,19 @@ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() { // issue 6630 public void testThatFailedUpdateRequestReturnsCorrectType() throws Exception { BulkResponse indexBulkItemResponse = client().prepareBulk() - .add(new IndexRequest("test").id("3").source("{ \"title\" : \"Great Title of doc 3\" }", XContentType.JSON)) - .add(new IndexRequest("test").id("4").source("{ \"title\" : \"Great Title of doc 4\" }", XContentType.JSON)) - .add(new IndexRequest("test").id("5").source("{ \"title\" : \"Great Title of doc 5\" }", XContentType.JSON)) - .add(new IndexRequest("test").id("6").source("{ \"title\" : \"Great Title of doc 6\" }", XContentType.JSON)) + .add(new IndexRequest("test").id("3").source("{ \"title\" : \"Great Title of doc 3\" }", MediaTypeRegistry.JSON)) + .add(new IndexRequest("test").id("4").source("{ \"title\" : \"Great Title of doc 4\" }", MediaTypeRegistry.JSON)) + .add(new IndexRequest("test").id("5").source("{ \"title\" : \"Great Title of doc 5\" }", MediaTypeRegistry.JSON)) + .add(new IndexRequest("test").id("6").source("{ \"title\" : \"Great Title of doc 6\" }", MediaTypeRegistry.JSON)) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); assertNoFailures(indexBulkItemResponse); BulkResponse bulkItemResponse = client().prepareBulk() - .add(new IndexRequest("test").id("1").source("{ \"title\" : \"Great Title of doc 1\" }", XContentType.JSON)) - .add(new IndexRequest("test").id("2").source("{ \"title\" : \"Great Title of doc 2\" }", XContentType.JSON)) - .add(new UpdateRequest("test", "3").doc("{ \"date\" : \"2014-01-30T23:59:57\"}", XContentType.JSON)) - .add(new UpdateRequest("test", "4").doc("{ \"date\" : \"2014-13-30T23:59:57\"}", XContentType.JSON)) + .add(new IndexRequest("test").id("1").source("{ \"title\" : \"Great Title of doc 1\" }", MediaTypeRegistry.JSON)) + .add(new IndexRequest("test").id("2").source("{ \"title\" : \"Great Title of doc 2\" }", MediaTypeRegistry.JSON)) + .add(new UpdateRequest("test", "3").doc("{ \"date\" : \"2014-01-30T23:59:57\"}", MediaTypeRegistry.JSON)) + .add(new UpdateRequest("test", "4").doc("{ \"date\" : \"2014-13-30T23:59:57\"}", MediaTypeRegistry.JSON)) .add(new DeleteRequest("test", "5")) .add(new DeleteRequest("test", "6")) .get(); @@ -732,7 +734,17 @@ public void testNoopUpdate() { final BulkItemResponse noopUpdate = bulkResponse.getItems()[0]; assertThat(noopUpdate.getResponse().getResult(), equalTo(DocWriteResponse.Result.NOOP)); - assertThat(Strings.toString(XContentType.JSON, noopUpdate), noopUpdate.getResponse().getShardInfo().getSuccessful(), equalTo(2)); + assertThat( + Strings.toString(MediaTypeRegistry.JSON, noopUpdate), + noopUpdate.getResponse().getShardInfo().getSuccessful(), + equalTo(2) + ); + + // test noop_update_total metric in stats changed + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(indexName).indexing(true); + final IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).actionGet(); + assertThat(indicesStatsResponse.getIndex(indexName).getTotal().indexing.getTotal().getNoopUpdateCount(), equalTo(1L)); + assertThat(indicesStatsResponse.getIndex(indexName).getPrimaries().indexing.getTotal().getNoopUpdateCount(), equalTo(1L)); final BulkItemResponse notFoundUpdate = bulkResponse.getItems()[1]; assertNotNull(notFoundUpdate.getFailure()); @@ -740,7 +752,7 @@ public void testNoopUpdate() { final BulkItemResponse notFoundDelete = bulkResponse.getItems()[2]; assertThat(notFoundDelete.getResponse().getResult(), equalTo(DocWriteResponse.Result.NOT_FOUND)); assertThat( - Strings.toString(XContentType.JSON, notFoundDelete), + Strings.toString(MediaTypeRegistry.JSON, notFoundDelete), notFoundDelete.getResponse().getShardInfo().getSuccessful(), equalTo(2) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java index 58c18be59fdac..aefabcb9bc14f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java @@ -42,8 +42,8 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.ingest.AbstractProcessor; @@ -69,7 +69,7 @@ /** * The purpose of this test is to verify that when a processor executes an operation asynchronously that * the expected result is the same as if the same operation happens synchronously. - * + * <p> * In this test two test processor are defined that basically do the same operation, but a single processor * executes asynchronously. The result of the operation should be the same and also the order in which the * bulk responses are returned should be the same as how the corresponding index requests were defined. @@ -84,12 +84,12 @@ protected Collection<Class<? extends Plugin>> getPlugins() { public void testAsyncProcessorImplementation() { // A pipeline with 2 processors: the test async processor and sync test processor. BytesReference pipelineBody = new BytesArray("{\"processors\": [{\"test-async\": {}, \"test\": {}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("_id", pipelineBody, XContentType.JSON)).actionGet(); + client().admin().cluster().putPipeline(new PutPipelineRequest("_id", pipelineBody, MediaTypeRegistry.JSON)).actionGet(); BulkRequest bulkRequest = new BulkRequest(); int numDocs = randomIntBetween(8, 256); for (int i = 0; i < numDocs; i++) { - bulkRequest.add(new IndexRequest("foobar").id(Integer.toString(i)).source("{}", XContentType.JSON).setPipeline("_id")); + bulkRequest.add(new IndexRequest("foobar").id(Integer.toString(i)).source("{}", MediaTypeRegistry.JSON).setPipeline("_id")); } BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); assertThat(bulkResponse.getItems().length, equalTo(numDocs)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/SearchProgressActionListenerIT.java index eb69eaaa9c2e1..4475ee837da4e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/SearchProgressActionListenerIT.java @@ -34,9 +34,9 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; - import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.AggregationBuilders; @@ -44,7 +44,6 @@ import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.ArrayList; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java index afa5ac908c137..b1934f901ac65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java @@ -35,9 +35,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; - import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -47,13 +45,15 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -64,7 +64,6 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.AbstractAggregationBuilder; @@ -110,8 +109,8 @@ public List<AggregationSpec> getAggregations() { @Override public List<FetchSubPhase> getFetchSubPhases(FetchPhaseConstructionContext context) { - /** - * Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". + /* + Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". */ return Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { @Override @@ -595,6 +594,11 @@ protected Aggregator createInternal( ) throws IOException { return new TestAggregator(name, parent, searchContext); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/ActiveShardsObserverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/ActiveShardsObserverIT.java index 30f5c21ba6cd7..e19a7483370c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/ActiveShardsObserverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/ActiveShardsObserverIT.java @@ -32,9 +32,9 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java index c82af8bce6e2d..08cffac8aac5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java @@ -35,12 +35,11 @@ import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.create.CreateIndexResponse; - import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.common.unit.TimeValue.timeValueMillis; @@ -63,11 +62,11 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { assertAcked(createIndexResponse); // indexing, by default, will work (waiting for one shard copy only) - client().prepareIndex("test").setId("1").setSource(source("1", "test"), XContentType.JSON).execute().actionGet(); + client().prepareIndex("test").setId("1").setSource(source("1", "test"), MediaTypeRegistry.JSON).execute().actionGet(); try { client().prepareIndex("test") .setId("1") - .setSource(source("1", "test"), XContentType.JSON) + .setSource(source("1", "test"), MediaTypeRegistry.JSON) .setWaitForActiveShards(2) // wait for 2 active shard copies .setTimeout(timeValueMillis(100)) .execute() @@ -99,7 +98,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { // this should work, since we now have two client().prepareIndex("test") .setId("1") - .setSource(source("1", "test"), XContentType.JSON) + .setSource(source("1", "test"), MediaTypeRegistry.JSON) .setWaitForActiveShards(2) .setTimeout(timeValueSeconds(1)) .execute() @@ -108,7 +107,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { try { client().prepareIndex("test") .setId("1") - .setSource(source("1", "test"), XContentType.JSON) + .setSource(source("1", "test"), MediaTypeRegistry.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueMillis(100)) .execute() @@ -143,7 +142,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { // this should work, since we now have all shards started client().prepareIndex("test") .setId("1") - .setSource(source("1", "test"), XContentType.JSON) + .setSource(source("1", "test"), MediaTypeRegistry.JSON) .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout(timeValueSeconds(1)) .execute() diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index 9f60e65eca297..569e64d795b06 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -32,38 +32,37 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.index.shard.IndexShard; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; - import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java index e84f40b3813eb..280f574b1baf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java @@ -39,10 +39,10 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; @@ -74,6 +74,10 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { + public GetTermVectorsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MockKeywordPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java index 91d280a9c4771..3fc3235701f17 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java @@ -52,6 +52,10 @@ import static org.hamcrest.Matchers.nullValue; public class MultiTermVectorsIT extends AbstractTermVectorsTestCase { + public MultiTermVectorsIT(Settings staticSettings) { + super(staticSettings); + } + public void testDuelESLucene() throws Exception { AbstractTermVectorsTestCase.TestFieldSetting[] testFieldSettings = getFieldSettings(); createIndexBasedOnFieldSettings("test", "alias", testFieldSettings); diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index 0b050fd60f920..f91df19232971 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.StopWatch; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; @@ -115,7 +115,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), MediaTypeRegistry.JSON)).actionGet() ); assertThat( exception.getMessage(), @@ -132,7 +132,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - IndexResponse indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)) + IndexResponse indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), MediaTypeRegistry.JSON)) .actionGet(); assertThat(indexResponse.getIndex(), equalTo("test")); @@ -149,7 +149,7 @@ public void testAliases() throws Exception { logger.info("--> indexing against [alias1], should fail now"); exception = expectThrows( IllegalArgumentException.class, - () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON)).actionGet() + () -> client().index(indexRequest("alias1").id("1").source(source("2", "test"), MediaTypeRegistry.JSON)).actionGet() ); assertThat( exception.getMessage(), @@ -177,7 +177,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test")); assertAliasesVersionIncreases("test_x", () -> { @@ -186,7 +186,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work now"); - indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test_x")); logger.info("--> deleting against [alias1], should fail now"); @@ -199,7 +199,7 @@ public void testAliases() throws Exception { }); logger.info("--> indexing against [alias1], should work against [test_x]"); - indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + indexResponse = client().index(indexRequest("alias1").id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).actionGet(); assertThat(indexResponse.getIndex(), equalTo("test_x")); } @@ -281,16 +281,18 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { logger.info("--> indexing against [test]"); client().index( - indexRequest("test").id("1").source(source("1", "foo test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("1").source(source("1", "foo test"), MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").id("2").source(source("2", "bar test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("2").source(source("2", "bar test"), MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").id("3").source(source("3", "baz test"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("3").source(source("3", "baz test"), MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); client().index( - indexRequest("test").id("4").source(source("4", "something else"), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE) + indexRequest("test").id("4") + .source(source("4", "something else"), MediaTypeRegistry.JSON) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) ).actionGet(); logger.info("--> checking single filtering alias search"); @@ -387,16 +389,16 @@ public void testSearchingFilteringAliasesTwoIndices() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("1").source(source("1", "foo test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("2").source(source("2", "bar test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("3").source(source("3", "baz test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("4").source(source("4", "something else"), MediaTypeRegistry.JSON)).get(); logger.info("--> indexing against [test2]"); - client().index(indexRequest("test2").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("8").source(source("8", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("5").source(source("5", "foo test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("6").source(source("6", "bar test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("7").source(source("7", "baz test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("8").source(source("8", "something else"), MediaTypeRegistry.JSON)).get(); refresh(); @@ -501,17 +503,17 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").id("11").source(source("11", "foo test1"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("12").source(source("12", "bar test1"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("13").source(source("13", "baz test1"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("11").source(source("11", "foo test1"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("12").source(source("12", "bar test1"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("13").source(source("13", "baz test1"), MediaTypeRegistry.JSON)).get(); - client().index(indexRequest("test2").id("21").source(source("21", "foo test2"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("22").source(source("22", "bar test2"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("23").source(source("23", "baz test2"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("21").source(source("21", "foo test2"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("22").source(source("22", "bar test2"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("23").source(source("23", "baz test2"), MediaTypeRegistry.JSON)).get(); - client().index(indexRequest("test3").id("31").source(source("31", "foo test3"), XContentType.JSON)).get(); - client().index(indexRequest("test3").id("32").source(source("32", "bar test3"), XContentType.JSON)).get(); - client().index(indexRequest("test3").id("33").source(source("33", "baz test3"), XContentType.JSON)).get(); + client().index(indexRequest("test3").id("31").source(source("31", "foo test3"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test3").id("32").source(source("32", "bar test3"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test3").id("33").source(source("33", "baz test3"), MediaTypeRegistry.JSON)).get(); refresh(); @@ -624,16 +626,16 @@ public void testDeletingByQueryFilteringAliases() throws Exception { ); logger.info("--> indexing against [test1]"); - client().index(indexRequest("test1").id("1").source(source("1", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("2").source(source("2", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("3").source(source("3", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test1").id("4").source(source("4", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test1").id("1").source(source("1", "foo test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("2").source(source("2", "bar test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("3").source(source("3", "baz test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test1").id("4").source(source("4", "something else"), MediaTypeRegistry.JSON)).get(); logger.info("--> indexing against [test2]"); - client().index(indexRequest("test2").id("5").source(source("5", "foo test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("6").source(source("6", "bar test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("7").source(source("7", "baz test"), XContentType.JSON)).get(); - client().index(indexRequest("test2").id("8").source(source("8", "something else"), XContentType.JSON)).get(); + client().index(indexRequest("test2").id("5").source(source("5", "foo test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("6").source(source("6", "bar test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("7").source(source("7", "baz test"), MediaTypeRegistry.JSON)).get(); + client().index(indexRequest("test2").id("8").source(source("8", "something else"), MediaTypeRegistry.JSON)).get(); refresh(); @@ -722,7 +724,7 @@ public void testWaitForAliasCreationMultipleShards() throws Exception { for (int i = 0; i < 10; i++) { final String aliasName = "alias" + i; assertAliasesVersionIncreases("test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName))); - client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).get(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).get(); } } @@ -743,7 +745,7 @@ public void testWaitForAliasCreationSingleShard() throws Exception { for (int i = 0; i < 10; i++) { final String aliasName = "alias" + i; assertAliasesVersionIncreases("test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName))); - client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).get(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).get(); } } @@ -765,7 +767,7 @@ public void run() { "test", () -> assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName)) ); - client().index(indexRequest(aliasName).id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + client().index(indexRequest(aliasName).id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).actionGet(); } }); } @@ -1086,7 +1088,7 @@ public void testCreateIndexWithAliasesInSource() throws Exception { + " \"alias4\" : {\"is_hidden\": true}\n" + " }\n" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -1379,12 +1381,13 @@ public void testIndexingAndQueryingHiddenAliases() throws Exception { ensureGreen(); // Put a couple docs in each index directly - IndexResponse res = client().index(indexRequest(nonWriteIndex).id("1").source(source("1", "nonwrite"), XContentType.JSON)).get(); + IndexResponse res = client().index(indexRequest(nonWriteIndex).id("1").source(source("1", "nonwrite"), MediaTypeRegistry.JSON)) + .get(); assertThat(res.status().getStatus(), equalTo(201)); - res = client().index(indexRequest(writeIndex).id("2").source(source("2", "writeindex"), XContentType.JSON)).get(); + res = client().index(indexRequest(writeIndex).id("2").source(source("2", "writeindex"), MediaTypeRegistry.JSON)).get(); assertThat(res.status().getStatus(), equalTo(201)); // And through the alias - res = client().index(indexRequest(alias).id("3").source(source("3", "through alias"), XContentType.JSON)).get(); + res = client().index(indexRequest(alias).id("3").source(source("3", "through alias"), MediaTypeRegistry.JSON)).get(); assertThat(res.status().getStatus(), equalTo(201)); refresh(writeIndex, nonWriteIndex); diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/CreateIndexBlockIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/CreateIndexBlockIT.java index d4a4df39bfb2f..3c1f1e83f7481 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/CreateIndexBlockIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/CreateIndexBlockIT.java @@ -8,10 +8,10 @@ package org.opensearch.blocks; -import org.junit.After; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; import static org.opensearch.test.OpenSearchIntegTestCase.client; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java index 8ede3e25b2e1a..6275571cc2371 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.opensearch.action.admin.indices.readonly.AddIndexBlockRequestBuilder; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; - import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActiveShardCount; @@ -48,6 +47,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexNotFoundException; @@ -492,10 +492,20 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } catch (InterruptedException e) { throw new AssertionError(e); } - try { - assertAcked(client().admin().indices().prepareDelete(indexToDelete)); - } catch (final Exception e) { - exceptionConsumer.accept(e); + int pendingRetries = 3; + boolean success = false; + while (success == false && pendingRetries-- > 0) { + try { + assertAcked(client().admin().indices().prepareDelete(indexToDelete)); + success = true; + } catch (final Exception e) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ProcessClusterEventTimeoutException && pendingRetries > 0) { + // ignore error & retry + continue; + } + exceptionConsumer.accept(e); + } } })); } diff --git a/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java b/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java index 9800c20d0b752..96b8c63f6c2ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/broadcast/BroadcastActionsIT.java @@ -33,8 +33,8 @@ package org.opensearch.broadcast; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 047584c1f13cb..158ed107b79c3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -31,14 +31,6 @@ package org.opensearch.bwcompat; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; - import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestUtil; import org.opensearch.ExceptionsHelper; @@ -48,6 +40,14 @@ import org.opensearch.gateway.CorruptStateException; import org.opensearch.test.OpenSearchIntegTestCase; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + import static org.hamcrest.Matchers.containsString; @LuceneTestCase.SuppressCodecs("*") diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterAwarenessHealthIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterAwarenessHealthIT.java index 232342ae658a1..acb75ddfe3b67 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterAwarenessHealthIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterAwarenessHealthIT.java @@ -17,8 +17,8 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.test.NodeRoles.onlyRole; +import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterAwarenessHealthIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java index 56f290ef4b50c..d63b87cbee6f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.support.IndicesOptions; @@ -42,10 +41,11 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.List; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index 508b8e21e42c1..35b8bdf3dafe5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -33,7 +33,6 @@ package org.opensearch.cluster; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; import org.opensearch.action.admin.indices.stats.IndicesStatsAction; @@ -47,29 +46,29 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.index.store.Store; +import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SystemIndexPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; - import org.hamcrest.Matchers; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index 147be425f93b3..44ab41fb5a8d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -54,17 +54,17 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.set.Sets; -import org.opensearch.gateway.GatewayService; import org.opensearch.core.index.Index; -import org.opensearch.index.query.QueryBuilders; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.GatewayService; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfoTests; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 3e2a1f1452628..84648eda3d38c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -48,10 +48,10 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; @@ -317,8 +317,8 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { ); Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); - internalCluster().stopRandomNonClusterManagerNode(); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java index 7f454ce848f6b..da500fa717202 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java @@ -50,9 +50,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index 61171b0a817b0..af5900b1cba6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -46,15 +46,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexNotFoundException; @@ -66,7 +66,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; - import org.junit.Before; import java.io.IOException; @@ -82,9 +81,9 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; /** @@ -98,12 +97,17 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(PrivateCustomPlugin.class); } + @Override + protected boolean useRandomReplicationStrategy() { + return true; + } + @Before public void indexData() throws Exception { index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject()); index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject()); index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject()); - refresh(); + refreshAndWaitForReplication(); } public void testRoutingTable() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleDataNodesIT.java index d7adf57593953..4f7fda6b94e36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleDataNodesIT.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; @@ -50,7 +50,6 @@ import static org.opensearch.common.unit.TimeValue.timeValueSeconds; import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.NodeRoles.nonDataNode; - import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @@ -62,7 +61,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); client().admin().indices().create(createIndexRequest("test").waitForActiveShards(ActiveShardCount.NONE)).actionGet(); try { - client().index(Requests.indexRequest("test").id("1").source(SOURCE, XContentType.JSON).timeout(timeValueSeconds(1))) + client().index(Requests.indexRequest("test").id("1").source(SOURCE, MediaTypeRegistry.JSON).timeout(timeValueSeconds(1))) .actionGet(); fail("no allocation should happen"); } catch (UnavailableShardsException e) { @@ -85,7 +84,7 @@ public void testIndexingBeforeAndAfterDataNodesStart() { // still no shard should be allocated try { - client().index(Requests.indexRequest("test").id("1").source(SOURCE, XContentType.JSON).timeout(timeValueSeconds(1))) + client().index(Requests.indexRequest("test").id("1").source(SOURCE, MediaTypeRegistry.JSON).timeout(timeValueSeconds(1))) .actionGet(); fail("no allocation should happen"); } catch (UnavailableShardsException e) { @@ -107,7 +106,8 @@ public void testIndexingBeforeAndAfterDataNodesStart() { equalTo(false) ); - IndexResponse indexResponse = client().index(Requests.indexRequest("test").id("1").source(SOURCE, XContentType.JSON)).actionGet(); + IndexResponse indexResponse = client().index(Requests.indexRequest("test").id("1").source(SOURCE, MediaTypeRegistry.JSON)) + .actionGet(); assertThat(indexResponse.getId(), equalTo("1")); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index 3c3d10d5e2bc2..713873bb222e2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -38,10 +38,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java index e0d070c385def..90eb3aa97a050 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.action.shard; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.cluster.ClusterState; @@ -40,6 +39,7 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index c69718d982f8b..522d63b22a0da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -364,6 +364,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(noZoneNode), equalTo(2)); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/5908") public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws Exception { int nodeCountPerAZ = 5; int numOfShards = 30; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java index 298aff4a5c853..dbcb030d8a4f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java @@ -58,18 +58,18 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.Priority; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.MockLogAppender; import java.nio.file.Path; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index 6f2d858e349f0..b33d57ed43189 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; -import org.junit.After; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; @@ -49,6 +48,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.junit.After; import java.util.ArrayList; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 9c8cb9f15e79c..b3cb15d028090 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -34,11 +34,8 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; - -import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; @@ -53,10 +50,12 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.discovery.Discovery; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.index.Index; +import org.opensearch.discovery.Discovery; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; @@ -72,10 +71,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.opensearch.action.DocWriteResponse.Result.CREATED; - import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index ebe65ad48f47e..b30eb1f3e3b39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -44,6 +44,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.indices.IndicesService; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.test.InternalTestCluster; @@ -55,9 +56,6 @@ import java.util.Locale; import java.util.Objects; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.gateway.DanglingIndicesState.AUTO_IMPORT_DANGLING_INDICES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -65,6 +63,9 @@ import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class UnsafeBootstrapAndDetachCommandIT extends OpenSearchIntegTestCase { @@ -180,6 +181,16 @@ public void testBootstrapNotClusterManagerEligible() { expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } + public void testBootstrapRemoteClusterEnabled() { + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build() + ); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.REMOTE_CLUSTER_STATE_ENABLED_NODE); + } + public void testBootstrapNoDataFolder() { final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); expectThrows(() -> unsafeBootstrap(environment), OpenSearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java index aaba53dcb2b07..9bddb39e79484 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java @@ -42,9 +42,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.test.OpenSearchIntegTestCase; @@ -59,8 +59,8 @@ import java.util.concurrent.TimeoutException; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.DISCOVERY; -import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; +import static org.opensearch.test.NodeRoles.dataNode; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java new file mode 100644 index 0000000000000..25fa7ae7eb8eb --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java @@ -0,0 +1,372 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class ClusterIndexRefreshIntervalIT extends AbstractSnapshotIntegTestCase { + + public static final String INDEX_NAME = "test-index"; + + public static final String OTHER_INDEX_NAME = "other-test-index"; + + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + internalCluster().startClusterManagerOnlyNode(); + } + + static void putIndexTemplate(String refreshInterval) { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put("index.refresh_interval", refreshInterval) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + public void testIndexTemplateCreationSucceedsWhenNoMinimumRefreshInterval() throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + putIndexTemplate("2s"); + + // Test index creation using template with valid refresh interval + String indexName = "log-myindex-1"; + createIndex(indexName); + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + assertEquals(TimeValue.timeValueSeconds(2), indexService.getRefreshTaskInterval()); + } + + public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws Exception { + String clusterManagerName = internalCluster().getClusterManagerName(); + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + assertEquals(getDefaultRefreshInterval(), indexService.getRefreshTaskInterval()); + + // Update the cluster.default.index.refresh_interval setting to another value and validate the index refresh interval + TimeValue refreshInterval = TimeValue.timeValueMillis(randomIntBetween(10, 90) * 1000L); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval)) + .get(); + assertEquals(refreshInterval, indexService.getRefreshTaskInterval()); + + // Update of cluster.minimum.index.refresh_interval setting to value more than default refreshInterval above will fail + TimeValue invalidMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() + randomIntBetween(1, 1000)); + IllegalArgumentException exceptionDuringMinUpdate = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidMinimumRefreshInterval) + ) + .get() + ); + assertEquals( + "cluster minimum index refresh interval [" + + invalidMinimumRefreshInterval + + "] more than cluster default index refresh interval [" + + refreshInterval + + "]", + exceptionDuringMinUpdate.getMessage() + ); + + // Update the cluster.minimum.index.refresh_interval setting to a valid value, this will succeed. + TimeValue validMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() - randomIntBetween(1, 1000)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), validMinimumRefreshInterval) + ) + .get(); + + // Update with invalid index setting index.refresh_interval, this will fail. + TimeValue invalidRefreshInterval = TimeValue.timeValueMillis(validMinimumRefreshInterval.millis() - randomIntBetween(1, 1000)); + String expectedMessage = "invalid index.refresh_interval [" + + invalidRefreshInterval + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + validMinimumRefreshInterval + + "]"; + + IllegalArgumentException exceptionDuringUpdateSettings = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + ) + ) + .actionGet() + ); + assertEquals(expectedMessage, exceptionDuringUpdateSettings.getMessage()); + + // Create another index with invalid index setting index.refresh_interval, this fails. + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + .build(); + IllegalArgumentException exceptionDuringCreateIndex = assertThrows( + IllegalArgumentException.class, + () -> createIndex(OTHER_INDEX_NAME, indexSettings) + ); + assertEquals(expectedMessage, exceptionDuringCreateIndex.getMessage()); + + // Update with valid index setting index.refresh_interval, this will succeed now. + TimeValue validRefreshInterval = TimeValue.timeValueMillis(validMinimumRefreshInterval.millis() + randomIntBetween(1, 1000)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), validRefreshInterval) + ) + ) + .get(); + // verify refresh task interval is updated. + assertEquals(validRefreshInterval, indexService.getRefreshTaskInterval()); + + // Try to create another index with valid index setting index.refresh_interval, this will pass. + createIndex( + OTHER_INDEX_NAME, + Settings.builder().put(indexSettings).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), validRefreshInterval).build() + ); + getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String otherUuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + assertEquals(validRefreshInterval, indicesService.indexService(new Index(OTHER_INDEX_NAME, otherUuid)).getRefreshTaskInterval()); + + // Update the cluster.default.index.refresh_interval & cluster.minimum.index.refresh_interval setting to null + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putNull(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey()) + .putNull(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey()) + ) + .get(); + // verify the index is still using the refresh interval passed in the update settings call + assertEquals(validRefreshInterval, indexService.getRefreshTaskInterval()); + + // Remove the index setting as well now, it should reset the refresh task interval to the default refresh interval + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) + ) + ) + .get(); + assertEquals(getDefaultRefreshInterval(), indexService.getRefreshTaskInterval()); + } + + public void testRefreshIntervalDisabled() throws ExecutionException, InterruptedException { + TimeValue clusterMinimumRefreshInterval = client().settings() + .getAsTime(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE); + boolean createIndexSuccess = clusterMinimumRefreshInterval.equals(TimeValue.MINUS_ONE); + String clusterManagerName = internalCluster().getClusterManagerName(); + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.MINIMUM_REFRESH_INTERVAL) + .build(); + if (createIndexSuccess) { + createIndex(INDEX_NAME, settings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + assertEquals(IndexSettings.MINIMUM_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); + } else { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, settings)); + assertEquals( + "invalid index.refresh_interval [-1]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + getMinRefreshIntervalForRefreshDisabled() + + "]", + exception.getMessage() + ); + } + } + + protected TimeValue getMinRefreshIntervalForRefreshDisabled() { + throw new RuntimeException("This is not expected to be called here, but for the implementor"); + } + + public void testInvalidRefreshInterval() { + String invalidRefreshInterval = "-10s"; + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, settings)); + assertEquals( + "failed to parse setting [index.refresh_interval] with value [" + + invalidRefreshInterval + + "] as a time value: negative durations are not supported", + exception.getMessage() + ); + } + + public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionException, InterruptedException { + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + GetIndexResponse getIndexResponse = client(internalCluster().getClusterManagerName()).admin() + .indices() + .getIndex(new GetIndexRequest()) + .get(); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + + assertEquals(IndexSettings.DEFAULT_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); + } + + /** + * In this test we check the case where an index is created with index setting `index.refresh_interval` with the value + * being lesser than the `cluster.minimum.index.refresh_interval`. Later we change the cluster minimum to be more than + * the index setting. The underlying index should continue to use the same refresh interval as earlier. + */ + public void testClusterMinimumChangeOnIndexWithCustomRefreshInterval() throws ExecutionException, InterruptedException { + List<String> dataNodes = new ArrayList<>(internalCluster().getDataNodeNames()); + TimeValue customRefreshInterval = TimeValue.timeValueSeconds(getDefaultRefreshInterval().getSeconds() + randomIntBetween(1, 5)); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), customRefreshInterval) + .build(); + createIndex(INDEX_NAME, indexSettings); + + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + GetIndexResponse getIndexResponse = client(internalCluster().getClusterManagerName()).admin() + .indices() + .getIndex(new GetIndexRequest()) + .get(); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + + // Update the cluster.minimum.index.refresh_interval setting to a valid value higher the custom refresh interval. + // At the same time, due to certain degree of randomness in the test, we update the cluster.default.refresh_interval + // to a valid value as well to be deterministic in test behaviour. + TimeValue clusterMinimum = TimeValue.timeValueSeconds(customRefreshInterval.getSeconds() + randomIntBetween(1, 5)); + TimeValue clusterDefault = TimeValue.timeValueSeconds(customRefreshInterval.getSeconds() + 6); + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterDefault) + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinimum) + ) + .get(); + + // Validate that the index refresh interval is still the existing one that was used during index creation + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + + // Update index setting to a value >= current cluster minimum and this should happen successfully. + customRefreshInterval = TimeValue.timeValueSeconds(clusterMinimum.getSeconds() + randomIntBetween(1, 5)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), customRefreshInterval) + ) + ) + .get(); + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + } + + protected TimeValue getDefaultRefreshInterval() { + return IndexSettings.DEFAULT_REFRESH_INTERVAL; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java new file mode 100644 index 0000000000000..2817d0e6a5951 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.indices.IndicesService; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; + +import java.util.Locale; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateMissing; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ClusterIndexRefreshIntervalWithNodeSettingsIT extends ClusterIndexRefreshIntervalIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), getDefaultRefreshInterval()) + .put( + IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), + getMinRefreshIntervalForRefreshDisabled().toString() + ) + .build(); + } + + public void testIndexTemplateCreationFailsWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> putIndexTemplate("0s")); + assertEquals( + throwable.getMessage(), + String.format( + Locale.ROOT, + "invalid index.refresh_interval [%s]: cannot be smaller than cluster.minimum.index.refresh_interval [%s]", + "0s", + getMinRefreshIntervalForRefreshDisabled() + ) + ); + } + + public void testIndexTemplateSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + putIndexTemplate("2s"); + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(0)); + + assertThat(client().admin().indices().prepareDeleteTemplate("my-template").get().isAcknowledged(), equalTo(true)); + + GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateMissing(getIndexTemplatesResponse, "my-template"); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> restore cluster state"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); + + getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get(); + assertIndexTemplateExists(getIndexTemplatesResponse, "my-template"); + + } + + public void testIndexSnapshotRestoreWithLessThanMinimumRefreshInterval() throws ExecutionException, InterruptedException { + createIndex( + "my-index", + Settings.builder() + .put(indexSettings()) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), "2s") + .build() + ); + + createRepository("test-repo", "fs"); + + final SnapshotInfo snapshotInfo = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() + .get() + .getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + + assertThat(snapshotInfo.totalShards(), equalTo(1)); + assertThat(snapshotInfo.successfulShards(), equalTo(1)); + + GetIndexResponse getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(1, getIndexResponse.indices().length); + assertEquals("2s", getIndexResponse.getSetting("my-index", INDEX_REFRESH_INTERVAL_SETTING.getKey())); + + assertThat(client().admin().indices().prepareDelete("my-index").get().isAcknowledged(), equalTo(true)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest()).get(); + assertEquals(getIndexResponse.indices().length, 0); + + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "4s") + ) + .get(); + + logger.info("--> try restore cluster state -- should fail"); + Throwable throwable = assertThrows( + IllegalArgumentException.class, + () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet() + ); + assertEquals( + throwable.getMessage(), + "invalid index.refresh_interval [2s]: cannot be smaller than cluster.minimum.index.refresh_interval [4s]" + ); + + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "5s") + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + ) + .get(); + + logger.info("--> try restore cluster state -- should pass"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setRestoreGlobalState(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(1)); + + getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices("my-index")).get(); + assertEquals(getIndexResponse.indices().length, 1); + } + + @Override + protected TimeValue getMinRefreshIntervalForRefreshDisabled() { + return TimeValue.timeValueSeconds(1); + } + + @Override + protected TimeValue getDefaultRefreshInterval() { + return TimeValue.timeValueSeconds(5); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java index dd0faa0ea8f61..ba1679d873bf4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -32,13 +32,13 @@ package org.opensearch.cluster.metadata; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.client.Client; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/node/DiscoveryNodeRoleIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/node/DiscoveryNodeRoleIT.java index 29c2f6e970144..8c0eb891475cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/node/DiscoveryNodeRoleIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/node/DiscoveryNodeRoleIT.java @@ -38,7 +38,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; - import org.hamcrest.Matcher; import java.util.Collection; @@ -46,13 +45,13 @@ import java.util.List; import java.util.Set; -import static org.hamcrest.Matchers.startsWith; import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class DiscoveryNodeRoleIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java index 3f0243136d790..82159065bcc8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing; import org.apache.lucene.store.NIOFSDirectory; - import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; @@ -45,20 +44,20 @@ import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.RemoveCorruptedShardDataCommandIT; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; import org.opensearch.test.DummyShardLock; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/DelayedAllocationIT.java index b4b08a4c9ad73..45c72eefa2285 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/DelayedAllocationIT.java @@ -37,8 +37,8 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Collections; import java.util.List; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java index 637cc96bdfc44..0dd5f036457ad 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java @@ -46,22 +46,22 @@ import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; import org.opensearch.test.transport.MockTransportService; @@ -136,7 +136,7 @@ public void testBulkWeirdScenario() throws Exception { assertThat(bulkResponse.hasFailures(), equalTo(false)); assertThat(bulkResponse.getItems().length, equalTo(2)); - logger.info(Strings.toString(XContentType.JSON, bulkResponse, true, true)); + logger.info(Strings.toString(MediaTypeRegistry.JSON, bulkResponse, true, true)); internalCluster().assertSeqNos(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index c9456b2418610..d6d22c95ee5a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -13,20 +13,22 @@ import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.core.rest.RestStatus; +import org.opensearch.discovery.ClusterManagerNotDiscoveredException; +import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; -import org.opensearch.plugins.Plugin; import org.opensearch.test.transport.MockTransportService; import java.io.IOException; import java.util.Arrays; -import java.util.HashSet; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -715,4 +717,144 @@ public void testClusterHealthResponseWithEnsureNodeWeighedInParam() throws Excep assertFalse(nodeLocalHealth.isTimedOut()); assertTrue(nodeLocalHealth.hasDiscoveredClusterManager()); } + + public void testReadWriteWeightedRoutingMetadataOnNodeRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startClusterManagerOnlyNode(Settings.builder().put(commonSettings).build()); + + logger.info("--> starting 1 nodes on zones 'a' & 'b' & 'c'"); + List<String> nodes_in_zone_a = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List<String> nodes_in_zone_b = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List<String> nodes_in_zone_c = internalCluster().startDataOnlyNodes( + 1, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map<String, Double> weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .setVersion(-1) + .get(); + assertEquals(response.isAcknowledged(), true); + + ClusterDeleteWeightedRoutingResponse deleteResponse = client().admin().cluster().prepareDeleteWeightedRouting().setVersion(0).get(); + assertTrue(deleteResponse.isAcknowledged()); + + // check weighted routing metadata after node restart, ensure node comes healthy after restart + internalCluster().restartNode(nodes_in_zone_a.get(0), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + internalCluster().restartNode(internalCluster().getClusterManagerName(), new InternalTestCluster.RestartCallback()); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + // make sure restarted node joins the cluster + assertEquals(3, internalCluster().clusterService().state().nodes().getDataNodes().size()); + assertNotNull( + internalCluster().client(nodes_in_zone_a.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_b.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(nodes_in_zone_c.get(0)) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + assertNotNull( + internalCluster().client(internalCluster().getClusterManagerName()) + .admin() + .cluster() + .state(new ClusterStateRequest().local(true)) + .get() + .getState() + .metadata() + .weightedRoutingMetadata() + ); + + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index cafce1a194caa..cc8747e5f5666 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -36,13 +36,11 @@ import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterPath; import org.apache.lucene.util.Constants; - import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; - import org.opensearch.cluster.ClusterInfoService; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.InternalClusterInfoService; @@ -59,8 +57,8 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; @@ -71,9 +69,8 @@ import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; @@ -88,9 +85,9 @@ import java.nio.file.NotDirectoryException; import java.nio.file.Path; import java.util.Arrays; -import java.util.List; import java.util.Collection; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -248,8 +245,10 @@ public void testIndexCreateBlockIsRemovedWhenAnyNodesNotExceedHighWatermarkWithA (discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, TOTAL_SPACE_BYTES, TOTAL_SPACE_BYTES) ); - // Validate if index create block is removed on the cluster + // Validate if index create block is removed on the cluster. Need to refresh this periodically as well to remove + // the node from high watermark breached list. assertBusy(() -> { + clusterInfoService.refresh(); ClusterState state1 = client().admin().cluster().prepareState().setLocal(true).get().getState(); assertFalse(state1.blocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id())); }, 30L, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java index 79b674b23fd48..541f1048bb246 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/ClusterSettingsIT.java @@ -42,11 +42,10 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.test.OpenSearchIntegTestCase; - import org.junit.After; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/SettingsFilteringIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/SettingsFilteringIT.java index 6de13ebe457a2..b6fbb21087b4d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/settings/SettingsFilteringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/settings/SettingsFilteringIT.java @@ -47,10 +47,8 @@ import java.util.List; import static org.opensearch.action.admin.cluster.node.info.NodesInfoRequest.Metric.SETTINGS; - import static org.opensearch.test.OpenSearchIntegTestCase.Scope.SUITE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index 23335f6e82ef1..5eef7074e1dd6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.shards; import org.opensearch.Version; - import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -46,8 +45,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; @@ -246,23 +245,22 @@ public void testIndexCreationOverLimitForDotIndexesFail() { assertFalse(clusterState.getMetadata().hasIndex(".test-index")); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6287") public void testCreateIndexWithMaxClusterShardSetting() { - int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - setMaxShardLimit(dataNodes, shardsPerNodeKey); + int maxAllowedShardsPerNode = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + setMaxShardLimit(maxAllowedShardsPerNode, shardsPerNodeKey); - int maxAllowedShards = dataNodes + 1; - int extraShardCount = maxAllowedShards + 1; + // Always keep + int maxAllowedShardsPerCluster = maxAllowedShardsPerNode * 1000; + int extraShardCount = 1; // Getting total active shards in the cluster. int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); try { - setMaxShardLimit(maxAllowedShards, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); + setMaxShardLimit(maxAllowedShardsPerCluster, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); prepareCreate("test_index_with_cluster_shard_limit").setSettings( Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, extraShardCount).put(SETTING_NUMBER_OF_REPLICAS, 0).build() ).get(); } catch (final IllegalArgumentException ex) { - verifyException(Math.min(maxAllowedShards, dataNodes * dataNodes), currentActiveShards, extraShardCount, ex); + verifyException(maxAllowedShardsPerCluster, currentActiveShards, extraShardCount, ex); } finally { setMaxShardLimit(-1, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); } diff --git a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java index 9817861c88e9a..3b80ef7820e08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java @@ -8,15 +8,15 @@ package org.opensearch.clustermanager; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.transport.TransportService; import org.opensearch.transport.TransportMessageListener; +import org.opensearch.transport.TransportService; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/internalClusterTest/java/org/opensearch/common/settings/UpgradeSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/common/settings/UpgradeSettingsIT.java index 3c8f914ba4f2f..da0ad316db874 100644 --- a/server/src/internalClusterTest/java/org/opensearch/common/settings/UpgradeSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/common/settings/UpgradeSettingsIT.java @@ -37,7 +37,6 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; - import org.junit.After; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionCleanSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionCleanSettingsIT.java index 7040bfb950663..39a4f2aa82828 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionCleanSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionCleanSettingsIT.java @@ -32,19 +32,20 @@ package org.opensearch.discovery; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.indices.store.IndicesStoreIntegrationIT; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +81,7 @@ public void testSearchWithRelocationAndSlowClusterStateProcessing() throws Excep final String node_2 = internalCluster().startDataOnlyNode(); List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>(); for (int i = 0; i < 100; i++) { - indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setSource("{\"int_field\":1}", XContentType.JSON)); + indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setSource("{\"int_field\":1}", MediaTypeRegistry.JSON)); } indexRandom(true, indexRequestBuilderList); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 184c866aee2db..737b272613a44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -34,9 +34,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CorruptIndexException; - import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.get.GetResponse; import org.opensearch.action.index.IndexRequestBuilder; @@ -53,13 +51,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.VersionType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.Bridge; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; @@ -173,7 +172,10 @@ public void testAckedIndexing() throws Exception { logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard); IndexRequestBuilder indexRequestBuilder = client.prepareIndex("test") .setId(id) - .setSource(Collections.singletonMap(randomFrom(fieldNames), randomNonNegativeLong()), XContentType.JSON) + .setSource( + Collections.singletonMap(randomFrom(fieldNames), randomNonNegativeLong()), + MediaTypeRegistry.JSON + ) .setTimeout(timeout); if (conflictMode == ConflictMode.external) { @@ -459,7 +461,7 @@ public boolean validateClusterForming() { /** * Tests that indices are properly deleted even if there is a cluster-manager transition in between. - * Test for https://github.com/elastic/elasticsearch/issues/11665 + * Test for <a href="https://github.com/elastic/elasticsearch/issues/11665">Elasticsearch issue #11665</a> */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; @@ -515,7 +517,10 @@ public void testRestartNodeWhileIndexing() throws Exception { try { IndexResponse response = client().prepareIndex(index) .setId(id) - .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .setSource( + Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), + MediaTypeRegistry.JSON + ) .get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 7a7cdb5885054..79f6ba6dfa642 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -39,9 +39,10 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.BlockClusterManagerServiceOnClusterManager; import org.opensearch.test.disruption.IntermittentLongGCDisruption; @@ -199,6 +200,8 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc } } + ClusterStateStats clusterStateStats = internalCluster().clusterService().getClusterManagerService().getClusterStateStats(); + assertTrue(clusterStateStats.getUpdateFailed() > 0); }); } @@ -326,9 +329,9 @@ public void testMappingTimeout() throws Exception { disruption.startDisrupting(); BulkRequestBuilder bulk = client().prepareBulk(); - bulk.add(client().prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", XContentType.JSON)); - bulk.add(client().prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", XContentType.JSON)); + bulk.add(client().prepareIndex("test").setId("2").setSource("{ \"f\": 1 }", MediaTypeRegistry.JSON)); + bulk.add(client().prepareIndex("test").setId("3").setSource("{ \"g\": 1 }", MediaTypeRegistry.JSON)); + bulk.add(client().prepareIndex("test").setId("4").setSource("{ \"f\": 1 }", MediaTypeRegistry.JSON)); BulkResponse bulkResponse = bulk.get(); assertTrue(bulkResponse.hasFailures()); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a2864b6dfd1da..70124c8c46700 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { // shutting down the nodes, to avoid the leakage check tripping // on the states associated with the commit requests we may have dropped - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); } public void testClusterFormingWithASlowNode() { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiskDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiskDisruptionIT.java index ef00150b7c814..b7aae73056f6f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiskDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiskDisruptionIT.java @@ -32,6 +32,7 @@ package org.opensearch.discovery; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; @@ -41,9 +42,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; - +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java index 691e3ca51eb8c..68eb367108954 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java @@ -33,7 +33,6 @@ package org.opensearch.discovery; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.index.IndexRequestBuilder; @@ -43,6 +42,7 @@ import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.json.JsonXContent; diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java index 47df0aeced3cb..1f6c8eac6c391 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java @@ -41,11 +41,11 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node.DiscoverySettings; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.MockLogAppender; import org.opensearch.test.NodeConfigurationSource; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportService; @@ -76,6 +76,7 @@ public void testSingleNodesDoNotDiscoverEachOther() throws IOException, Interrup @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "single-node") .put("transport.type", getTestTransportType()) /* @@ -142,6 +143,7 @@ public boolean innerMatch(final LogEvent event) { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "zen") .put("transport.type", getTestTransportType()) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") diff --git a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java index 581b352e917f0..0336ccf3f4647 100644 --- a/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/document/DocumentActionsIT.java @@ -43,10 +43,9 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; @@ -59,7 +58,6 @@ import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -117,10 +115,10 @@ public void testIndexActions() throws Exception { for (int i = 0; i < 5; i++) { getResult = client().prepareGet("test", "1").execute().actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").toString())); assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test")); getResult = client().get(getRequest("test").id("1")).actionGet(); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").toString())); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); } @@ -168,10 +166,10 @@ public void testIndexActions() throws Exception { for (int i = 0; i < 5; i++) { getResult = client().get(getRequest("test").id("1")).actionGet(); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").toString())); getResult = client().get(getRequest("test").id("2")).actionGet(); String ste1 = getResult.getSourceAsString(); - String ste2 = Strings.toString(source("2", "test2")); + String ste2 = source("2", "test2").toString(); assertThat("cycle #" + i, ste1, equalTo(ste2)); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); } @@ -211,7 +209,7 @@ public void testBulk() throws Exception { .add(client().prepareIndex().setIndex("test").setSource(source("3", "test"))) .add(client().prepareIndex().setIndex("test").setCreate(true).setSource(source("4", "test"))) .add(client().prepareDelete().setIndex("test").setId("1")) - .add(client().prepareIndex().setIndex("test").setSource("{ xxx }", XContentType.JSON)) // failure + .add(client().prepareIndex().setIndex("test").setSource("{ xxx }", MediaTypeRegistry.JSON)) // failure .execute() .actionGet(); @@ -258,15 +256,15 @@ public void testBulk() throws Exception { assertThat("cycle #" + i, getResult.isExists(), equalTo(false)); getResult = client().get(getRequest("test").id("2")).actionGet(); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("2", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("2", "test").toString())); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); getResult = client().get(getRequest("test").id(generatedId3)).actionGet(); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("3", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("3", "test").toString())); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); getResult = client().get(getRequest("test").id(generatedId4)).actionGet(); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("4", "test")))); + assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("4", "test").toString())); assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/document/ShardInfoIT.java b/server/src/internalClusterTest/java/org/opensearch/document/ShardInfoIT.java index 5f217548794db..77c0c6edef623 100644 --- a/server/src/internalClusterTest/java/org/opensearch/document/ShardInfoIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/document/ShardInfoIT.java @@ -44,11 +44,10 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -60,7 +59,7 @@ public class ShardInfoIT extends OpenSearchIntegTestCase { public void testIndexAndDelete() throws Exception { prepareIndex(1); - IndexResponse indexResponse = client().prepareIndex("idx").setSource("{}", XContentType.JSON).get(); + IndexResponse indexResponse = client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON).get(); assertShardInfo(indexResponse); DeleteResponse deleteResponse = client().prepareDelete("idx", indexResponse.getId()).get(); assertShardInfo(deleteResponse); @@ -68,7 +67,7 @@ public void testIndexAndDelete() throws Exception { public void testUpdate() throws Exception { prepareIndex(1); - UpdateResponse updateResponse = client().prepareUpdate("idx", "1").setDoc("{}", XContentType.JSON).setDocAsUpsert(true).get(); + UpdateResponse updateResponse = client().prepareUpdate("idx", "1").setDoc("{}", MediaTypeRegistry.JSON).setDocAsUpsert(true).get(); assertShardInfo(updateResponse); } @@ -76,7 +75,7 @@ public void testBulkWithIndexAndDeleteItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { - bulkRequestBuilder.add(client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + bulkRequestBuilder.add(client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); } BulkResponse bulkResponse = bulkRequestBuilder.get(); @@ -98,7 +97,9 @@ public void testBulkWithUpdateItems() throws Exception { prepareIndex(1); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); for (int i = 0; i < 10; i++) { - bulkRequestBuilder.add(client().prepareUpdate("idx", Integer.toString(i)).setDoc("{}", XContentType.JSON).setDocAsUpsert(true)); + bulkRequestBuilder.add( + client().prepareUpdate("idx", Integer.toString(i)).setDoc("{}", MediaTypeRegistry.JSON).setDocAsUpsert(true) + ); } BulkResponse bulkResponse = bulkRequestBuilder.get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java index a150bf5c86a59..43f1608210668 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java @@ -39,9 +39,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.NodeRoles; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.nio.file.Path; diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java index 7a5c7ac653ab4..420a24d6d3aae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java @@ -37,9 +37,8 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.NodeRoles; - +import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matcher; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java index 746b010c0f4fd..2949fa34a0795 100644 --- a/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/explain/ExplainActionIT.java @@ -35,10 +35,10 @@ import org.apache.lucene.search.Explanation; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.explain.ExplainResponse; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; -import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java index 017865a1397e8..47ef55bd61290 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -61,10 +60,10 @@ import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.ShardLimitValidator; +import org.opensearch.test.InternalTestCluster.RestartCallback; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster.RestartCallback; import java.io.IOException; import java.nio.file.Path; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java new file mode 100644 index 0000000000000..2b6a5b4ee6867 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayRecoveryTestUtils.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.store.ShardAttributes; + +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.OpenSearchIntegTestCase.client; +import static org.opensearch.test.OpenSearchIntegTestCase.internalCluster; +import static org.opensearch.test.OpenSearchIntegTestCase.resolveIndex; + +public class GatewayRecoveryTestUtils { + + public static DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List<DiscoveryNode> nodes = new LinkedList<>(clusterStateResponse.getState().nodes().getDataNodes().values()); + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public static Map<ShardId, ShardAttributes> prepareRequestMap(String[] indices, int primaryShardCount) { + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = new HashMap<>(); + for (String indexName : indices) { + final Index index = resolveIndex(indexName); + final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get( + client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName) + ); + for (int shardIdNum = 0; shardIdNum < primaryShardCount; shardIdNum++) { + final ShardId shardId = new ShardId(index, shardIdNum); + shardIdShardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + } + } + return shardIdShardAttributesMap; + } + + public static void corruptShard(String nodeName, ShardId shardId) throws IOException, InterruptedException { + for (Path path : internalCluster().getInstance(NodeEnvironment.class, nodeName).availableShardPaths(shardId)) { + final Path indexPath = path.resolve(ShardPath.INDEX_FOLDER_NAME); + if (Files.exists(indexPath)) { // multi data path might only have one path in use + try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) { + for (Path item : stream) { + if (item.getFileName().toString().startsWith("segments_")) { + Files.delete(item); + } + } + } + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java index 5e30c1ad9c08e..681b112428c92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java @@ -37,14 +37,14 @@ import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.discovery.Discovery; import org.opensearch.env.NodeEnvironment; -import org.opensearch.core.index.Index; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.InternalTestCluster.RestartCallback; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.InternalTestCluster.RestartCallback; import java.nio.file.Files; import java.nio.file.Path; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java index 7e983b114450f..612430facdf19 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/QuorumGatewayIT.java @@ -35,10 +35,10 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.test.InternalTestCluster.RestartCallback; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster.RestartCallback; import java.util.concurrent.TimeUnit; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java index c18d94e02ab9c..b81bfe6b0a51a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java @@ -43,8 +43,8 @@ import java.util.Set; -import static org.opensearch.test.NodeRoles.dataOnlyNode; import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode; +import static org.opensearch.test.NodeRoles.dataOnlyNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 0b3a689e81b94..6c248a32c9928 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -32,10 +32,14 @@ package org.opensearch.gateway; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.admin.indices.stats.IndexStats; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -44,29 +48,32 @@ import org.opensearch.cluster.coordination.ElectionSchedulerFactory; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster.RestartCallback; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.InternalTestCluster.RestartCallback; import org.opensearch.test.store.MockFSIndexStore; import java.nio.file.DirectoryStream; @@ -80,12 +87,18 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes; +import static org.opensearch.gateway.GatewayRecoveryTestUtils.prepareRequestMap; import static org.opensearch.gateway.GatewayService.RECOVER_AFTER_NODES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -110,16 +123,15 @@ public void testOneNodeRecoverFromGateway() throws Exception { internalCluster().startNode(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("appAccountIds") - .field("type", "text") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("appAccountIds") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate("test").setMapping(mapping)); client().prepareIndex("test") @@ -204,19 +216,18 @@ private Map<String, long[]> assertAndCapturePrimaryTerms(Map<String, long[]> pre public void testSingleNodeNoFlush() throws Exception { internalCluster().startNode(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .startObject("num") - .field("type", "integer") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .endObject() + .startObject("num") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .toString(); // note: default replica settings are tied to #data nodes-1 which is 0 here. We can do with 1 in this test. int numberOfShards = numberOfShards(); assertAcked( @@ -522,7 +533,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") @@ -737,4 +748,222 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception { internalCluster().fullRestart(); ensureGreen("test"); } + + public void testSingleShardFetchUsingBatchAction() { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(searchShardsResponse.getNodes()[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNodeGatewayStartedShardsHappyCase(nodeGatewayStartedShards); + } + + public void testShardFetchMultiNodeMultiIndexesUsingBatchAction() { + // start node + internalCluster().startNode(); + String indexName1 = "test1"; + String indexName2 = "test2"; + int numShards = internalCluster().numDataNodes(); + // assign one primary shard each to the data nodes + prepareIndex(indexName1, numShards); + prepareIndex(indexName2, numShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName1, indexName2 }, numShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); + assertEquals(internalCluster().numDataNodes(), searchShardsResponse.getNodes().length); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(searchShardsResponse.getNodes(), shardIdShardAttributesMap) + ); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + assertEquals(1, clusterSearchShardsGroup.getShards().length); + String nodeId = clusterSearchShardsGroup.getShards()[0].currentNodeId(); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(nodeId) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNodeGatewayStartedShardsHappyCase(nodeGatewayStartedShards); + } + } + + public void testShardFetchCorruptedShardsUsingBatchAction() throws Exception { + String indexName = "test"; + int numOfShards = 1; + prepareIndex(indexName, numOfShards); + Map<ShardId, ShardAttributes> shardIdShardAttributesMap = prepareRequestMap(new String[] { indexName }, numOfShards); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName).get(); + final Index index = resolveIndex(indexName); + final ShardId shardId = new ShardId(index, 0); + corruptShard(searchShardsResponse.getNodes()[0].getName(), shardId); + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch response; + internalCluster().restartNode(searchShardsResponse.getNodes()[0].getName()); + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListGatewayStartedShardsBatch.class), + new TransportNodesListGatewayStartedShardsBatch.Request(getDiscoveryNodes(), shardIdShardAttributesMap) + ); + DiscoveryNode[] discoveryNodes = getDiscoveryNodes(); + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards = response.getNodesMap() + .get(discoveryNodes[0].getId()) + .getNodeGatewayStartedShardsBatch() + .get(shardId); + assertNotNull(nodeGatewayStartedShards.storeException()); + assertNotNull(nodeGatewayStartedShards.allocationId()); + assertTrue(nodeGatewayStartedShards.primary()); + } + + public void testSingleShardStoreFetchUsingBatchAction() throws ExecutionException, InterruptedException { + String indexName = "test"; + DiscoveryNode[] nodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName }, + nodes + ); + Index index = resolveIndex(indexName); + ShardId shardId = new ShardId(index, 0); + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata = response.getNodesMap() + .get(nodes[0].getId()) + .getNodeStoreFilesMetadataBatch() + .get(shardId); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata, shardId); + } + + public void testShardStoreFetchMultiNodeMultiIndexesUsingBatchAction() throws Exception { + internalCluster().startNodes(2); + String indexName1 = "test1"; + String indexName2 = "test2"; + DiscoveryNode[] nodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName1, indexName2 }, + nodes + ); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(indexName1, indexName2).get(); + for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { + ShardId shardId = clusterSearchShardsGroup.getShardId(); + ShardRouting[] shardRoutings = clusterSearchShardsGroup.getShards(); + assertEquals(2, shardRoutings.length); + for (ShardRouting shardRouting : shardRoutings) { + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata = response.getNodesMap() + .get(shardRouting.currentNodeId()) + .getNodeStoreFilesMetadataBatch() + .get(shardId); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata, shardId); + } + } + } + + public void testShardStoreFetchNodeNotConnectedUsingBatchAction() { + DiscoveryNode nonExistingNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + String indexName = "test"; + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response = prepareAndSendRequest( + new String[] { indexName }, + new DiscoveryNode[] { nonExistingNode } + ); + assertTrue(response.hasFailures()); + assertEquals(1, response.failures().size()); + assertEquals(nonExistingNode.getId(), response.failures().get(0).nodeId()); + } + + public void testShardStoreFetchCorruptedIndexUsingBatchAction() throws Exception { + internalCluster().startNodes(2); + String index1Name = "test1"; + String index2Name = "test2"; + prepareIndices(new String[] { index1Name, index2Name }, 1, 1); + Map<ShardId, ShardAttributes> shardAttributesMap = prepareRequestMap(new String[] { index1Name, index2Name }, 1); + Index index1 = resolveIndex(index1Name); + ShardId shardId1 = new ShardId(index1, 0); + ClusterSearchShardsResponse searchShardsResponse = client().admin().cluster().prepareSearchShards(index1Name).get(); + assertEquals(2, searchShardsResponse.getNodes().length); + + // corrupt test1 index shards + corruptShard(searchShardsResponse.getNodes()[0].getName(), shardId1); + corruptShard(searchShardsResponse.getNodes()[1].getName(), shardId1); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(false).get(); + DiscoveryNode[] discoveryNodes = getDiscoveryNodes(); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response; + response = ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListShardStoreMetadataBatch.class), + new TransportNodesListShardStoreMetadataBatch.Request(shardAttributesMap, discoveryNodes) + ); + Map<ShardId, TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata> nodeStoreFilesMetadata = response.getNodesMap() + .get(discoveryNodes[0].getId()) + .getNodeStoreFilesMetadataBatch(); + // We don't store exception in case of corrupt index, rather just return an empty response + assertNull(nodeStoreFilesMetadata.get(shardId1).getStoreFileFetchException()); + assertEquals(shardId1, nodeStoreFilesMetadata.get(shardId1).storeFilesMetadata().shardId()); + assertTrue(nodeStoreFilesMetadata.get(shardId1).storeFilesMetadata().isEmpty()); + + Index index2 = resolveIndex(index2Name); + ShardId shardId2 = new ShardId(index2, 0); + assertNodeStoreFilesMetadataSuccessCase(nodeStoreFilesMetadata.get(shardId2), shardId2); + } + + private void prepareIndices(String[] indices, int numberOfPrimaryShards, int numberOfReplicaShards) { + for (String index : indices) { + createIndex( + index, + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards) + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicaShards) + .build() + ); + index(index, "type", "1", Collections.emptyMap()); + flush(index); + } + } + + private TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch prepareAndSendRequest( + String[] indices, + DiscoveryNode[] nodes + ) { + Map<ShardId, ShardAttributes> shardAttributesMap = null; + prepareIndices(indices, 1, 1); + shardAttributesMap = prepareRequestMap(indices, 1); + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch response; + return ActionTestUtils.executeBlocking( + internalCluster().getInstance(TransportNodesListShardStoreMetadataBatch.class), + new TransportNodesListShardStoreMetadataBatch.Request(shardAttributesMap, nodes) + ); + } + + private void assertNodeStoreFilesMetadataSuccessCase( + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata nodeStoreFilesMetadata, + ShardId shardId + ) { + assertNull(nodeStoreFilesMetadata.getStoreFileFetchException()); + TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata storeFileMetadata = nodeStoreFilesMetadata.storeFilesMetadata(); + assertFalse(storeFileMetadata.isEmpty()); + assertEquals(shardId, storeFileMetadata.shardId()); + assertNotNull(storeFileMetadata.peerRecoveryRetentionLeases()); + } + + private void assertNodeGatewayStartedShardsHappyCase( + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard nodeGatewayStartedShards + ) { + assertNull(nodeGatewayStartedShards.storeException()); + assertNotNull(nodeGatewayStartedShards.allocationId()); + assertTrue(nodeGatewayStartedShards.primary()); + } + + private void prepareIndex(String indexName, int numberOfPrimaryShards) { + createIndex( + indexName, + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + index(indexName, "type", "1", Collections.emptyMap()); + flush(indexName); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index 9a465c2f9121c..5a429d5f7d910 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -38,9 +38,9 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -53,9 +53,9 @@ import org.opensearch.indices.recovery.RecoveryCleanFilesRequest; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java new file mode 100644 index 0000000000000..dfde1b958882c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); + internalCluster().startDataOnlyNodes(numDataOnlyNodes); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + private Map<String, Long> initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map<String, Long> indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + public void testFullClusterRestoreStaleDelete() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + BlobPath baseMetadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(getClusterState().metadata().clusterUUID()); + + assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); + + Map<String, IndexMetadata> indexMetadataMap = remoteClusterStateService.getLatestClusterState( + cluster().getClusterName(), + getClusterState().metadata().clusterUUID() + ).getMetadata().getIndices(); + assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); + assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); + } + + public void testRemoteStateStats() { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String clusterManagerNode = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().getDataNodeNames().stream().collect(Collectors.toList()).get(0); + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + + // assert cluster state stats + assertClusterManagerClusterStateStats(nodesStatsResponse); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + + // call nodes/stats with nodeId filter + NodesStatsResponse nodesStatsNodeIdFilterResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(clusterManagerNode) + .get(); + + assertClusterManagerClusterStateStats(nodesStatsNodeIdFilterResponse); + } + + private void assertClusterManagerClusterStateStats(NodesStatsResponse nodesStatsResponse) { + // assert cluster state stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + + assertNotNull(discoveryStats.getClusterStateStats()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateSuccess() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getUpdateFailed()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() > 0); + // assert remote state stats + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + } + + public void testRemoteStateStatsFromAllNodes() { + int shardCount = randomIntBetween(1, 5); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String[] allNodes = internalCluster().getNodeNames(); + // call _nodes/stats/discovery from all the nodes + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + + // call _nodes/stats/discovery from all the nodes with random nodeId filter + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(allNodes[randomIntBetween(0, allNodes.length - 1)]) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + } + + private void validateNodesStatsResponse(NodesStatsResponse nodesStatsResponse) { + // _nodes/stats/discovery must never fail due to any exception + assertFalse(nodesStatsResponse.toString().contains("exception")); + assertNotNull(nodesStatsResponse.getNodes()); + assertNotNull(nodesStatsResponse.getNodes().get(0)); + assertNotNull(nodesStatsResponse.getNodes().get(0).getDiscoveryStats()); + } + + private void setReplicaCount(int replicaCount) { + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java index 997e8e9d5258b..c44b7c7736d21 100644 --- a/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/get/GetActionIT.java @@ -35,7 +35,6 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushResponse; - import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.get.GetRequestBuilder; import org.opensearch.action.get.GetResponse; @@ -43,19 +42,19 @@ import org.opensearch.action.get.MultiGetRequestBuilder; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.util.Collection; @@ -288,17 +287,16 @@ public void testSimpleMultiGet() throws Exception { } public void testGetDocWithMultivaluedFields() throws Exception { - String mapping1 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .field("store", true) - .endObject() - .endObject() - .endObject() - ); + String mapping1 = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("store", true) + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate("test").setMapping(mapping1)); ensureGreen(); @@ -633,7 +631,7 @@ public void testGetFieldsComplexField() throws Exception { logger.info("indexing documents"); - client().prepareIndex("my-index").setId("1").setSource(source, XContentType.JSON).get(); + client().prepareIndex("my-index").setId("1").setSource(source, MediaTypeRegistry.JSON).get(); logger.info("checking real time retrieval"); @@ -692,7 +690,7 @@ public void testUngeneratedFieldsThatAreNeverStored() throws IOException { + " }\n" + " }\n" + "}"; - assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); + assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, MediaTypeRegistry.JSON)); ensureGreen(); String doc = "{\n" + " \"suggest\": {\n" @@ -722,10 +720,10 @@ public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException { + " \"refresh_interval\": \"-1\"\n" + " }\n" + "}"; - assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); + assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, MediaTypeRegistry.JSON)); ensureGreen(); - client().prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setRouting("routingValue").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog @@ -746,10 +744,10 @@ public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException { + " }\n" + "}"; - assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); + assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, MediaTypeRegistry.JSON)); ensureGreen(); String doc = "{\n" + " \"text\": \"some text.\"\n" + "}\n"; - client().prepareIndex("test").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get(); + client().prepareIndex("test").setId("1").setSource(doc, MediaTypeRegistry.JSON).setRouting("1").get(); String[] fieldsList = { "_routing" }; // before refresh - document is only in translog assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "1"); @@ -817,7 +815,7 @@ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolea + " }\n" + "}"; - assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON)); + assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource, MediaTypeRegistry.JSON)); ensureGreen(); String doc = "{\n" + " \"text1\": \"some text.\"\n," + " \"text2\": \"more text.\"\n" + "}\n"; index("test", "_doc", "1", doc); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java index 22dc1224f6e09..03b8fb5ff7afc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/FinalPipelineIT.java @@ -45,12 +45,13 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.ingest.AbstractProcessor; @@ -60,12 +61,10 @@ import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptService; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; - import org.junit.After; import java.io.IOException; @@ -103,7 +102,10 @@ public void testFinalPipelineCantChangeDestination() { createIndex("index", settings); final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); final IllegalStateException e = expectThrows( IllegalStateException.class, @@ -122,11 +124,14 @@ public void testFinalPipelineOfOldDestinationIsNotInvoked() { BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"no_such_field\"}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); IndexResponse indexResponse = client().prepareIndex("index") .setId("1") @@ -149,11 +154,14 @@ public void testFinalPipelineOfNewDestinationIsInvoked() { BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); IndexResponse indexResponse = client().prepareIndex("index") .setId("1") @@ -176,13 +184,13 @@ public void testDefaultPipelineOfNewDestinationIsNotInvoked() { BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"changing_dest\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); BytesReference targetPipeline = new BytesArray("{\"processors\": [{\"final\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, MediaTypeRegistry.JSON)) .actionGet(); IndexResponse indexResponse = client().prepareIndex("index") @@ -212,10 +220,13 @@ public void testRequestPipelineAndFinalPipeline() { final BytesReference requestPipelineBody = new BytesArray("{\"processors\": [{\"request\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("request_pipeline", requestPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("request_pipeline", requestPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"request\"}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); final IndexRequestBuilder index = client().prepareIndex("index").setId("1"); @@ -238,10 +249,13 @@ public void testDefaultAndFinalPipeline() { final BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"default\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"default\"}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); final Settings settings = Settings.builder() .put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline") .put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline") @@ -266,10 +280,13 @@ public void testDefaultAndFinalPipelineFromTemplates() { final BytesReference defaultPipelineBody = new BytesArray("{\"processors\": [{\"default\": {}}]}"); client().admin() .cluster() - .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)) + .putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, MediaTypeRegistry.JSON)) .actionGet(); final BytesReference finalPipelineBody = new BytesArray("{\"processors\": [{\"final\": {\"exists\":\"default\"}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); + client().admin() + .cluster() + .putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, MediaTypeRegistry.JSON)) + .actionGet(); final int lowOrder = randomIntBetween(0, Integer.MAX_VALUE - 1); final int highOrder = randomIntBetween(lowOrder + 1, Integer.MAX_VALUE); final int finalPipelineOrder; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexRequestBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexRequestBuilderIT.java index 925a1b50fd6a8..57bdaff645838 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexRequestBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexRequestBuilderIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; @@ -54,11 +54,11 @@ public void testSetSource() throws InterruptedException, ExecutionException { map.put("test_field", "foobar"); IndexRequestBuilder[] builders = new IndexRequestBuilder[] { client().prepareIndex("test").setSource("test_field", "foobar"), - client().prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), - client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), XContentType.JSON), + client().prepareIndex("test").setSource("{\"test_field\" : \"foobar\"}", MediaTypeRegistry.JSON), + client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), MediaTypeRegistry.JSON), + client().prepareIndex("test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}"), MediaTypeRegistry.JSON), client().prepareIndex("test") - .setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), XContentType.JSON), + .setSource(BytesReference.toBytes(new BytesArray("{\"test_field\" : \"foobar\"}")), MediaTypeRegistry.JSON), client().prepareIndex("test").setSource(map) }; indexRandom(true, builders); SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java index d547ded8152dd..369c9f9b1a653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java @@ -32,22 +32,39 @@ package org.opensearch.index; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; -public class IndexSortIT extends OpenSearchIntegTestCase { +public class IndexSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final XContentBuilder TEST_MAPPING = createTestMapping(); + public IndexSortIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private static XContentBuilder createTestMapping() { try { return jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexingPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexingPressureIT.java index b3040f832f5fd..766ae502c0f19 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexingPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexingPressureIT.java @@ -31,7 +31,6 @@ package org.opensearch.index; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.bulk.BulkRequest; @@ -43,14 +42,15 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index 60ff82e617dbd..033ea75b68958 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -12,16 +12,16 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.indices.replication.SegmentReplicationBaseIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -37,7 +37,8 @@ import static java.util.Arrays.asList; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS; -import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -52,7 +53,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueSeconds(1)) .put(MAX_INDEXING_CHECKPOINTS.getKey(), MAX_CHECKPOINTS_BEHIND) .build(); } @@ -173,10 +174,10 @@ public void testAddReplicaWhileWritesBlocked() throws Exception { .prepareUpdateSettings(INDEX_NAME) .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 2)) ); - ensureGreen(INDEX_NAME); replicaNodes.add(replica_2); - waitForSearchableDocs(totalDocs.get(), replica_2); } + ensureGreen(INDEX_NAME); + waitForSearchableDocs(totalDocs.get(), replicaNodes); refresh(INDEX_NAME); // wait for the replicas to catch up after block is released. assertReplicaCheckpointUpdated(primaryShard); @@ -223,7 +224,10 @@ public void testBelowReplicaLimit() throws Exception { public void testFailStaleReplica() throws Exception { - Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); + Settings settings = Settings.builder() + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(1000)) + .build(); // Starts a primary and replica node. final String primaryNode = internalCluster().startNode(settings); createIndex(INDEX_NAME); @@ -258,11 +262,13 @@ public void testFailStaleReplica() throws Exception { } public void testWithDocumentReplicationEnabledIndex() throws Exception { - assumeTrue( - "Can't create DocRep index with remote store enabled. Skipping.", - indexSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store. Cannot create DocRep indices with Remote store enabled", + segmentReplicationWithRemoteEnabled() ); - Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); + Settings settings = Settings.builder() + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .build(); // Starts a primary and replica node. final String primaryNode = internalCluster().startNode(settings); createIndex( @@ -344,7 +350,7 @@ private BulkResponse executeBulkRequest(List<String> nodes, int docsPerBatch) { private int indexUntilCheckpointCount() { int total = 0; for (int i = 0; i < MAX_CHECKPOINTS_BEHIND; i++) { - final int numDocs = randomIntBetween(1, 100); + final int numDocs = randomIntBetween(1, 5); for (int j = 0; j < numDocs; ++j) { indexDoc(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureIT.java index 43d8173103656..69c394d2da133 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureIT.java @@ -6,30 +6,30 @@ package org.opensearch.index; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.bulk.BulkItemRequest; import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.bulk.BulkShardRequest; import org.opensearch.action.bulk.TransportShardBulkAction; -import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java index 707a0f6ab8818..5426f4037294f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/ShardIndexingPressureSettingsIT.java @@ -6,7 +6,6 @@ package org.opensearch.index; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -21,17 +20,18 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java index 0927d274b1265..eb1b721cdad25 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/WaitUntilRefreshIT.java @@ -32,7 +32,6 @@ package org.opensearch.index; -import org.opensearch.action.ActionFuture; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -42,14 +41,14 @@ import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.plugins.Plugin; import org.opensearch.core.rest.RestStatus; +import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchIntegTestCase; - import org.junit.Before; import java.util.Collection; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java deleted file mode 100644 index 2866292e5e2e0..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Segment; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.hamcrest.Matchers.is; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class MultiCodecMergeIT extends OpenSearchIntegTestCase { - - public void testForceMergeMultipleCodecs() throws ExecutionException, InterruptedException { - - Map<String, String> codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", - "default", - "BEST_SPEED" - ); - - for (Map.Entry<String, String> codec : codecMap.entrySet()) { - forceMergeMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void forceMergeMultipleCodecs(String finalCodec, String finalCodecMode, Map<String, String> codecMap) throws ExecutionException, - InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index" + finalCodec; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - // ingesting and asserting segment codec mode for all four codecs - for (Map.Entry<String, String> codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // force merge into final codec - useCodec(index, finalCodec); - flushAndRefreshIndex(index); - final ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - - assertThat(forceMergeResponse.getFailedShards(), is(0)); - assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); - - flushAndRefreshIndex(index); - - List<Segment> segments = getSegments(index).stream().filter(Segment::isSearch).collect(Collectors.toList()); - assertEquals(1, segments.size()); - assertTrue(segments.stream().findFirst().get().attributes.containsValue(finalCodecMode)); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - } - - private void ingestDocs(String index) throws InterruptedException { - ingest(index); - flushAndRefreshIndex(index); - } - - private ArrayList<Segment> getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - - private void ingest(String index) throws InterruptedException { - - final int nbDocs = randomIntBetween(1, 5); - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - RefreshResponse response = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(response); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - -} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java new file mode 100644 index 0000000000000..9b1fa77fc9a5a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class ZstdNotEnabledIT extends OpenSearchIntegTestCase { + + public void testZStdCodecsWithoutPluginInstalled() { + + internalCluster().startNode(); + final String index = "test-index"; + + // creating index with zstd and zstd_no_dict should fail if custom-codecs plugin is not installed + for (String codec : List.of("zstd", "zstd_no_dict")) { + assertThrows( + IllegalArgumentException.class, + () -> createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", codec) + .build() + ) + ); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java index 2a47e6ce74e58..8321630d34229 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/engine/MaxDocsLimitIT.java @@ -36,16 +36,17 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.translog.Translog; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; - import org.junit.After; import org.junit.Before; @@ -66,6 +67,7 @@ public class MaxDocsLimitIT extends OpenSearchIntegTestCase { private static final AtomicInteger maxDocs = new AtomicInteger(); + private static final ShardId shardId = new ShardId(new Index("test", "_na_"), 0); public static class TestEnginePlugin extends Plugin implements EnginePlugin { @Override @@ -123,7 +125,10 @@ public void testMaxDocsLimit() throws Exception { IllegalArgumentException.class, () -> client().prepareDelete("test", "any-id").get() ); - assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + deleteError.getMessage(), + containsString("Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs.get() + "] documents per shard") + ); client().admin().indices().prepareRefresh("test").get(); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(new MatchAllQueryBuilder()) @@ -204,12 +209,21 @@ static IndexingResult indexDocs(int numRequests, int numThreads) throws Exceptio phaser.arriveAndAwaitAdvance(); while (completedRequests.incrementAndGet() <= numRequests) { try { - final IndexResponse resp = client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + final IndexResponse resp = client().prepareIndex("test").setSource("{}", MediaTypeRegistry.JSON).get(); numSuccess.incrementAndGet(); assertThat(resp.status(), equalTo(RestStatus.CREATED)); } catch (IllegalArgumentException e) { numFailure.incrementAndGet(); - assertThat(e.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); + assertThat( + e.getMessage(), + containsString( + "Number of documents in shard " + + shardId + + " exceeds the limit of [" + + maxDocs.get() + + "] documents per shard" + ) + ); } } }); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/CopyToMapperIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/CopyToMapperIntegrationIT.java index f4ccea40e6e3f..0df84261ade63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/CopyToMapperIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/CopyToMapperIntegrationIT.java @@ -33,9 +33,8 @@ package org.opensearch.index.mapper; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -78,16 +77,15 @@ public void testDynamicTemplateCopyTo() throws Exception { } public void testDynamicObjectCopyTo() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("foo") - .field("type", "text") - .field("copy_to", "root.top.child") - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("foo") + .field("type", "text") + .field("copy_to", "root.top.child") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(client().admin().indices().prepareCreate("test-idx").setMapping(mapping)); client().prepareIndex("test-idx").setId("1").setSource("foo", "bar").get(); client().admin().indices().prepareRefresh("test-idx").execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java index f9b2fb44e24be..2267ef1bb6739 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java @@ -42,9 +42,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/ExternalValuesMapperIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/ExternalValuesMapperIntegrationIT.java index 138081e56dd63..3734bbbe8aa6c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/ExternalValuesMapperIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/ExternalValuesMapperIntegrationIT.java @@ -40,11 +40,12 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.test.OpenSearchIntegTestCase; -import org.locationtech.jts.geom.Coordinate; import java.util.Collection; import java.util.Collections; +import org.locationtech.jts.geom.Coordinate; + import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/MultiFieldsIntegrationIT.java index 9b5eb58e663fc..50440f9527a6a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/MultiFieldsIntegrationIT.java @@ -37,9 +37,9 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java index 6d76ee48a5b95..df423edeca9c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java @@ -32,26 +32,44 @@ package org.opensearch.index.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.index.query.QueryBuilders.matchPhraseQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class MatchPhraseQueryIT extends OpenSearchIntegTestCase { +public class MatchPhraseQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + private static final String INDEX = "test"; + public MatchPhraseQueryIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/GlobalCheckpointSyncIT.java index ce7cb81dbd2df..9388d7344cf3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/GlobalCheckpointSyncIT.java @@ -37,15 +37,15 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -82,7 +82,7 @@ public void testGlobalCheckpointSyncWithAsyncDurability() throws Exception { for (int j = 0; j < 10; j++) { final String id = Integer.toString(j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", MediaTypeRegistry.JSON).get(); } assertBusy(() -> { @@ -194,7 +194,7 @@ private void runGlobalCheckpointSyncTest( } for (int j = 0; j < numberOfDocuments; j++) { final String id = Integer.toString(index * numberOfDocuments + j); - client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", XContentType.JSON).get(); + client().prepareIndex("test").setId(id).setSource("{\"foo\": " + id + "}", MediaTypeRegistry.JSON).get(); } try { barrier.await(); @@ -251,7 +251,7 @@ public void testPersistGlobalCheckpoint() throws Exception { } int numDocs = randomIntBetween(1, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } ensureGreen("test"); assertBusy(() -> { @@ -281,7 +281,7 @@ public void testPersistLocalCheckpoint() { logger.info("numDocs {}", numDocs); long maxSeqNo = 0; for (int i = 0; i < numDocs; i++) { - maxSeqNo = client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get().getSeqNo(); + maxSeqNo = client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get().getSeqNo(); logger.info("got {}", maxSeqNo); } for (IndicesService indicesService : internalCluster().getDataNodeInstances(IndicesService.class)) { diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 686911f8d4741..83dd409e750b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -38,8 +38,8 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.nio.file.Path; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java index e12c5d44df69b..6163edada9f6e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java @@ -33,7 +33,6 @@ package org.opensearch.index.seqno; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.node.DiscoveryNode; @@ -41,10 +40,11 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.plugins.Plugin; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/GlobalCheckpointListenersIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/GlobalCheckpointListenersIT.java index 76ff2f809cb83..d60e852a82ca0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/GlobalCheckpointListenersIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/GlobalCheckpointListenersIT.java @@ -34,7 +34,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -88,7 +88,7 @@ public void accept(final long g, final Exception e) { } }, null); - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); assertBusy(() -> assertThat(globalCheckpoint.get(), equalTo((long) index))); // adding a listener expecting a lower global checkpoint should fire immediately final AtomicLong immediateGlobalCheckpint = new AtomicLong(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index ba9f335cd24d4..c394a1f631690 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -35,7 +35,6 @@ import org.apache.lucene.store.LockObtainFailedException; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; @@ -53,21 +52,23 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedRunnable; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLock; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; @@ -84,16 +85,14 @@ import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.DummyShardLock; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.Assert; import java.io.IOException; @@ -117,7 +116,6 @@ import java.util.function.Predicate; import java.util.stream.Stream; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -130,13 +128,13 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; public class IndexShardIT extends OpenSearchSingleNodeTestCase { @@ -176,7 +174,7 @@ public void testLockTryingToDelete() throws Exception { public void testDurableFlagHasEffect() throws Exception { createIndex("test"); ensureGreen(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); @@ -196,7 +194,7 @@ public void testDurableFlagHasEffect() throws Exception { setDurability(shard, Translog.Durability.REQUEST); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("2").setSource("{}", MediaTypeRegistry.JSON).get(); assertTrue(needsSync.test(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "1").get(); @@ -208,7 +206,7 @@ public void testDurableFlagHasEffect() throws Exception { setDurability(shard, Translog.Durability.REQUEST); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("3").setSource("{}", XContentType.JSON)) + .add(client().prepareIndex("test").setId("3").setSource("{}", MediaTypeRegistry.JSON)) .add(client().prepareDelete("test", "1")) .get() ); @@ -217,7 +215,7 @@ public void testDurableFlagHasEffect() throws Exception { setDurability(shard, Translog.Durability.ASYNC); assertNoFailures( client().prepareBulk() - .add(client().prepareIndex("test").setId("4").setSource("{}", XContentType.JSON)) + .add(client().prepareIndex("test").setId("4").setSource("{}", MediaTypeRegistry.JSON)) .add(client().prepareDelete("test", "3")) .get() ); @@ -255,7 +253,7 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, idxPath).build(); createIndex("test", idxSettings); ensureGreen("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); client().admin().indices().prepareDelete("test").get(); @@ -271,7 +269,7 @@ public void testExpectedShardSizeIsPresent() throws InterruptedException { .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) ); for (int i = 0; i < 50; i++) { - client().prepareIndex("test").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setSource("{}", MediaTypeRegistry.JSON).get(); } ensureGreen("test"); InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); @@ -394,14 +392,14 @@ public void testMaybeFlush() throws Exception { .get(); client().prepareIndex("test") .setId("0") - .setSource("{}", XContentType.JSON) + .setSource("{}", MediaTypeRegistry.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); assertFalse(shard.shouldPeriodicallyFlush()); shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse("test", "1", new BytesArray("{}"), XContentType.JSON), + new SourceToParse("test", "1", new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -413,7 +411,7 @@ public void testMaybeFlush() throws Exception { assertThat(shard.flushStats().getTotal(), equalTo(0L)); client().prepareIndex("test") .setId("2") - .setSource("{}", XContentType.JSON) + .setSource("{}", MediaTypeRegistry.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); assertThat(shard.getLastKnownGlobalCheckpoint(), equalTo(2L)); @@ -454,7 +452,7 @@ public void testMaybeFlush() throws Exception { final FlushStats flushStats = shard.flushStats(); logger.info( "--> translog stats [{}] gen [{}] commit_stats [{}] flush_stats [{}/{}]", - Strings.toString(XContentType.JSON, translogStats), + Strings.toString(MediaTypeRegistry.JSON, translogStats), translog.getGeneration().translogFileGeneration, commitStats.getUserData(), flushStats.getPeriodic(), @@ -486,7 +484,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { final Engine.IndexResult result = shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse("test", "1", new BytesArray("{}"), XContentType.JSON), + new SourceToParse("test", "1", new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -522,7 +520,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); client().prepareIndex("test") .setId("0") - .setSource("{}", XContentType.JSON) + .setSource("{}", MediaTypeRegistry.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); assertFalse(shard.shouldPeriodicallyFlush()); @@ -547,7 +545,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { final CheckedRunnable<Exception> check; if (flush) { final FlushStats initialStats = shard.flushStats(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); check = () -> { assertFalse(shard.shouldPeriodicallyFlush()); final FlushStats currentStats = shard.flushStats(); @@ -572,7 +570,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { }; } else { final long generation = getTranslog(shard).currentFileGeneration(); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); check = () -> { assertFalse(shard.shouldRollTranslogGeneration()); assertEquals(generation + 1, getTranslog(shard).currentFileGeneration()); @@ -593,7 +591,7 @@ public void testFlushStats() throws Exception { client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); final int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } // A flush stats may include the new total count but the old period count - assert eventually. assertBusy(() -> { @@ -604,7 +602,7 @@ public void testFlushStats() throws Exception { settings = Settings.builder().put("index.translog.flush_threshold_size", (String) null).build(); client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); - client().prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(UUIDs.randomBase64UUID()).setSource("{}", MediaTypeRegistry.JSON).get(); client().admin().indices().prepareFlush("test").setForce(randomBoolean()).setWaitIfOngoing(true).get(); final FlushStats flushStats = client().admin().indices().prepareStats("test").clear().setFlush(true).get().getTotal().flush; assertThat(flushStats.getTotal(), greaterThan(flushStats.getPeriodic())); @@ -616,9 +614,9 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).get(); client().prepareDelete("test", "0").get(); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE).get(); CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper = directoryReader -> directoryReader; shard.close("simon says", false, false); @@ -652,7 +650,15 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } }; - final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener); + NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); + final IndexShard newShard = newIndexShard( + indexService, + shard, + wrapper, + getInstanceFromNode(CircuitBreakerService.class), + env.nodeId(), + listener + ); shardRef.set(newShard); recoverShard(newShard); @@ -676,6 +682,7 @@ public static final IndexShard newIndexShard( final IndexShard shard, CheckedFunction<DirectoryReader, DirectoryReader, IOException> wrapper, final CircuitBreakerService cbs, + final String nodeId, final IndexingOperationListener... listeners ) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); @@ -703,6 +710,9 @@ public static final IndexShard newIndexShard( (indexSettings, shardRouting) -> new InternalTranslogFactory(), SegmentReplicationCheckpointPublisher.EMPTY, null, + null, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + nodeId, null ); } @@ -734,7 +744,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final SearchRequest countRequest = new SearchRequest("test").source(new SearchSourceBuilder().size(0)); final long numDocs = between(10, 20); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -756,7 +766,7 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti final long moreDocs = between(10, 20); for (int i = 0; i < moreDocs; i++) { - client().prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Long.toString(i + numDocs)).setSource("{}", MediaTypeRegistry.JSON).get(); if (randomBoolean()) { shard.refresh("test"); } @@ -787,7 +797,7 @@ public void testShardChangesWithDefaultDocType() throws Exception { int numOps = between(1, 10); for (int i = 0; i < numOps; i++) { if (randomBoolean()) { - client().prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index").setId(randomFrom("1", "2")).setSource("{}", MediaTypeRegistry.JSON).get(); } else { client().prepareDelete("index", randomFrom("1", "2")).get(); } @@ -850,7 +860,7 @@ public void testLimitNumberOfRetainedTranslogFiles() throws Exception { } }; for (int i = 0; i < 100; i++) { - client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); if (randomInt(100) < 10) { client().admin().indices().prepareFlush(indexName).setWaitIfOngoing(true).get(); checkTranslog.run(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index b6124ff09d992..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import joptsimple.OptionParser; import joptsimple.OptionSet; import org.apache.lucene.index.IndexWriter; @@ -40,9 +41,7 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; - import org.opensearch.ExceptionsHelper; - import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.flush.FlushRequest; @@ -65,16 +64,16 @@ import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -83,9 +82,9 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.CorruptionUtils; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.transport.MockTransportService; @@ -105,11 +104,10 @@ import java.util.stream.StreamSupport; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; - import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -137,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/SearchIdleIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/SearchIdleIT.java index 9382960b906e3..43d86b232de77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/SearchIdleIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/SearchIdleIT.java @@ -32,13 +32,13 @@ package org.opensearch.index.shard; -import org.opensearch.action.ActionListener; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -102,7 +102,7 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter int numDocs = scaledRandomIntBetween(25, 100); totalNumDocs.set(numDocs); CountDownLatch indexingDone = new CountDownLatch(numDocs); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).get(); indexingDone.countDown(); // one doc is indexed above blocking IndexShard shard = indexService.getShard(0); boolean hasRefreshed = shard.scheduledRefresh(); @@ -135,7 +135,7 @@ private void runTestAutomaticRefresh(final IntToLongFunction count) throws Inter for (int i = 1; i < numDocs; i++) { client().prepareIndex("test") .setId("" + i) - .setSource("{\"foo\" : \"bar\"}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON) .execute(new ActionListener<IndexResponse>() { @Override public void onResponse(IndexResponse indexResponse) { @@ -159,7 +159,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { IndexService indexService = createIndex("test", builder.build()); assertFalse(indexService.getIndexSettings().isExplicitRefresh()); ensureGreen(); - client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("0").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).get(); IndexShard shard = indexService.getShard(0); assertFalse(shard.scheduledRefresh()); assertTrue(shard.isSearchIdle()); @@ -167,7 +167,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { client().admin().indices().prepareRefresh().execute(ActionListener.wrap(refreshLatch::countDown));// async on purpose to make sure // it happens concurrently assertHitCount(client().prepareSearch().get(), 1); - client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).get(); assertFalse(shard.scheduledRefresh()); assertTrue(shard.hasRefreshPending()); @@ -186,7 +186,7 @@ public void testPendingRefreshWithIntervalChange() throws Exception { // We need to ensure a `scheduledRefresh` triggered by the internal refresh setting update is executed before we index a new doc; // otherwise, it will compete to call `Engine#maybeRefresh` with the `scheduledRefresh` that we are going to verify. ensureNoPendingScheduledRefresh(indexService.getThreadPool()); - client().prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("2").setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON).get(); assertTrue(shard.scheduledRefresh()); assertFalse(shard.hasRefreshPending()); assertTrue(shard.isSearchIdle()); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index d51e4bbff11b5..f46f413f4d23f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -32,14 +32,13 @@ package org.opensearch.index.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.BytesRef; - -import org.hamcrest.MatcherAssert; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -61,34 +60,35 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.env.NodeEnvironment; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.CorruptionUtils; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockIndexEventListener; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; import java.io.IOException; import java.io.OutputStream; @@ -112,11 +112,10 @@ import java.util.stream.Collectors; import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; - import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -168,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -287,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -553,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -625,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no @@ -711,6 +710,7 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro final NodeStats primaryNode = dataNodeStats.get(0); final NodeStats replicaNode = dataNodeStats.get(1); + assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -796,6 +796,17 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro // Assert the cluster returns to green status because the replica will be promoted to primary ensureGreen(); + + // After Lucene 9.9 check index will flag corruption with old (not the latest) commit points. + // For this test our previous corrupt commit "segments_2" will remain on the primary. + // This asserts this is the case, and then resets check index status. + assertEquals("Check index has a single failure", 1, checkIndexFailures.size()); + assertTrue( + checkIndexFailures.get(0) + .getMessage() + .contains("could not read old (not latest) commit point segments file \"segments_2\" in directory") + ); + resetCheckIndexStatus(); } private int numShards(String... index) { diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedTranslogIT.java index 1dd0f6a3d664e..f749593de13d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedTranslogIT.java @@ -38,16 +38,16 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexSettings; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.transport.MockTransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java index 9940b1eb13a52..a1ff2da249d69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java @@ -32,6 +32,8 @@ package org.opensearch.index.suggest.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -47,12 +49,16 @@ import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.equalTo; @@ -61,7 +67,20 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SuggestStatsIT extends OpenSearchIntegTestCase { +public class SuggestStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SuggestStatsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java index 73d6d9aff7b72..8cb54631b593f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indexing/IndexActionIT.java @@ -32,19 +32,23 @@ package org.opensearch.indexing; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.common.settings.Settings; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.indices.InvalidIndexNameException; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Random; @@ -57,7 +61,17 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class IndexActionIT extends OpenSearchIntegTestCase { +public class IndexActionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IndexActionIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + /** * This test tries to simulate load while creating an index and indexing documents * while the index is being created. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java index 7236c32697384..2f348e5d6218d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -41,9 +41,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchIntegTestCase; - import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -71,9 +70,9 @@ public void testIndexNameDateMathExpressions() { String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + client().prepareIndex(dateMathExp1).setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); + client().prepareIndex(dateMathExp2).setId("2").setSource("{}", MediaTypeRegistry.JSON).get(); + client().prepareIndex(dateMathExp3).setId("3").setSource("{}", MediaTypeRegistry.JSON).get(); refresh(); SearchResponse searchResponse = client().prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3).get(); @@ -131,9 +130,9 @@ public void testAutoCreateIndexWithDateMathExpression() throws Exception { String dateMathExp1 = "<.marvel-{now/d}>"; String dateMathExp2 = "<.marvel-{now/d-1d}>"; String dateMathExp3 = "<.marvel-{now/d-2d}>"; - client().prepareIndex(dateMathExp1).setId("1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp2).setId("2").setSource("{}", XContentType.JSON).get(); - client().prepareIndex(dateMathExp3).setId("3").setSource("{}", XContentType.JSON).get(); + client().prepareIndex(dateMathExp1).setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); + client().prepareIndex(dateMathExp2).setId("2").setSource("{}", MediaTypeRegistry.JSON).get(); + client().prepareIndex(dateMathExp3).setId("3").setSource("{}", MediaTypeRegistry.JSON).get(); refresh(); SearchResponse searchResponse = client().prepareSearch(dateMathExp1, dateMathExp2, dateMathExp3).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index e427fb8749e64..e383bbdded31e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -45,16 +45,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.plugins.Plugin; +import org.opensearch.test.MockIndexEventListener; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.MockIndexEventListener; - import org.hamcrest.Matchers; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 4d0399080b814..06d2d2a90de87 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -58,8 +58,8 @@ import org.opensearch.core.common.Strings; import org.opensearch.index.IndexNotFoundException; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 12fee85288bc2..52b4dad553180 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.search.SearchResponse; @@ -40,13 +42,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.time.ZoneId; @@ -54,8 +57,10 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -64,7 +69,25 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class IndicesRequestCacheIT extends OpenSearchIntegTestCase { +public class IndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public IndicesRequestCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } + ); + } + + @Override + protected boolean useRandomReplicationStrategy() { + return true; + } // One of the primary purposes of the query cache is to cache aggs results public void testCacheAggs() throws Exception { @@ -165,7 +188,7 @@ public void testQueryRewrite() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -235,7 +258,7 @@ public void testQueryRewriteMissingValues() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -301,7 +324,7 @@ public void testQueryRewriteDates() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -374,7 +397,7 @@ public void testQueryRewriteDatesWithNow() throws Exception { .setFlush(true) .get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index-1", "index-2", "index-3"); assertCacheState(client, "index-1", 0, 0); @@ -445,7 +468,7 @@ public void testCanCache() throws Exception { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); ensureSearchable("index"); assertCacheState(client, "index", 0, 0); @@ -518,7 +541,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 4); } - public void testCacheWithFilteredAlias() { + public void testCacheWithFilteredAlias() throws InterruptedException { Client client = client(); Settings settings = Settings.builder() .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) @@ -539,7 +562,9 @@ public void testCacheWithFilteredAlias() { // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge("index").setFlush(true).get(); OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); - refresh(); + refreshAndWaitForReplication(); + + indexRandomForConcurrentSearch("index"); assertCacheState(client, "index", 0, 0); @@ -613,6 +638,45 @@ public void testProfileDisableCache() throws Exception { } } + public void testCacheWithInvalidation() throws Exception { + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get() + ); + indexRandom(true, client.prepareIndex("index").setSource("k", "hello")); + ensureSearchable("index"); + SearchResponse resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + + assertCacheState(client, "index", 0, 1); + // Index but don't refresh + indexRandom(false, client.prepareIndex("index").setSource("k", "hello2")); + resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + // Should expect hit as here as refresh didn't happen + assertCacheState(client, "index", 1, 1); + + // Explicit refresh would invalidate cache + refreshAndWaitForReplication(); + // Hit same query again + resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + // Should expect miss as key has changed due to change in IndexReader.CacheKey (due to refresh) + assertCacheState(client, "index", 1, 2); + } + private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) { RequestCacheStats requestCacheStats = client.admin() .indices() @@ -627,6 +691,7 @@ private static void assertCacheState(Client client, String index, long expectedH Arrays.asList(expectedHits, expectedMisses, 0L), Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount(), requestCacheStats.getEvictions()) ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/analyze/AnalyzeActionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/analyze/AnalyzeActionIT.java index 1d25051eefe44..2e6bc7db6cae7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/analyze/AnalyzeActionIT.java @@ -37,9 +37,9 @@ import org.opensearch.action.admin.indices.analyze.AnalyzeRequestBuilder; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; +import org.opensearch.test.MockKeywordPlugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.MockKeywordPlugin; import org.hamcrest.core.IsNull; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/ConcurrentDynamicTemplateIT.java index e731b0074f04d..f732f5c41dec0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -32,8 +32,8 @@ package org.opensearch.indices.mapping; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetFieldMappingsIT.java index 30c6a0dc068e5..c6519c1fd851b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -34,14 +34,13 @@ import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -187,22 +186,22 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { .get(); XContentBuilder responseBuilder = XContentFactory.jsonBuilder().prettyPrint(); response.toXContent(responseBuilder, new ToXContent.MapParams(params)); - String responseStrings = Strings.toString(responseBuilder); + String responseStrings = responseBuilder.toString(); XContentBuilder prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); - assertThat(responseStrings, equalTo(Strings.toString(prettyJsonBuilder))); + assertThat(responseStrings, equalTo(prettyJsonBuilder.toString())); params.put("pretty", "false"); response = client().admin().indices().prepareGetFieldMappings("index").setFields("field1", "obj.subfield").get(); responseBuilder = XContentFactory.jsonBuilder().prettyPrint().lfAtEnd(); response.toXContent(responseBuilder, new ToXContent.MapParams(params)); - responseStrings = Strings.toString(responseBuilder); + responseStrings = responseBuilder.toString(); prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); - assertThat(responseStrings, not(equalTo(Strings.toString(prettyJsonBuilder)))); + assertThat(responseStrings, not(equalTo(prettyJsonBuilder).toString())); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetMappingsIT.java index 01e5fdb668d8b..4ca808a1e66e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/SimpleGetMappingsIT.java @@ -38,8 +38,8 @@ import org.opensearch.common.Priority; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index da3dcdc6b750e..575c1956d0fda 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -43,17 +43,16 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -153,7 +152,7 @@ public void testUpdateMappingWithoutType() { AcknowledgedResponse putMappingResponse = client().admin() .indices() .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) + .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", MediaTypeRegistry.JSON) .execute() .actionGet(); @@ -178,7 +177,7 @@ public void testUpdateMappingWithoutTypeMultiObjects() { AcknowledgedResponse putMappingResponse = client().admin() .indices() .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) + .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", MediaTypeRegistry.JSON) .execute() .actionGet(); @@ -207,7 +206,7 @@ public void testUpdateMappingWithConflicts() { .preparePutMapping("test") .setSource( "{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}", - XContentType.JSON + MediaTypeRegistry.JSON ) .execute() .actionGet(); @@ -230,7 +229,7 @@ public void testUpdateMappingWithNormsConflicts() { .preparePutMapping("test") .setSource( "{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{\"properties\":{\"body\":{\"type\":\"text\", \"norms\": true }}}}", - XContentType.JSON + MediaTypeRegistry.JSON ) .execute() .actionGet(); @@ -256,7 +255,7 @@ public void testUpdateMappingNoChanges() { AcknowledgedResponse putMappingResponse = client().admin() .indices() .preparePutMapping("test") - .setSource("{\"properties\":{\"body\":{\"type\":\"text\"}}}", XContentType.JSON) + .setSource("{\"properties\":{\"body\":{\"type\":\"text\"}}}", MediaTypeRegistry.JSON) .execute() .actionGet(); @@ -347,7 +346,7 @@ public void testPutMappingsWithBlocks() { client().admin() .indices() .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) + .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", MediaTypeRegistry.JSON) ); } finally { disableIndexBlock("test", block); @@ -361,7 +360,7 @@ public void testPutMappingsWithBlocks() { client().admin() .indices() .preparePutMapping("test") - .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON) + .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", MediaTypeRegistry.JSON) ); } finally { disableIndexBlock("test", block); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 2ab44f8318617..73e888eea362c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.memory.breaker; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -46,25 +48,27 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.indices.breaker.CircuitBreakerStats; -import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; +import org.opensearch.search.SearchService; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; @@ -73,12 +77,12 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.BREAKER; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; - import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; @@ -89,7 +93,27 @@ * Integration tests for InternalCircuitBreakerService */ @ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1) -public class CircuitBreakerServiceIT extends OpenSearchIntegTestCase { +public class CircuitBreakerServiceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CircuitBreakerServiceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, randomIntBetween(1, 2)) + .build(); + } + /** Reset all breaker settings back to their defaults */ private void reset() { logger.info("--> resetting breaker settings"); @@ -197,7 +221,7 @@ public void testRamAccountingTermsEnum() throws Exception { prepareCreate("ramtest").setSource( "{\"mappings\": {\"type\": {\"properties\": {\"test\": " + "{\"type\": \"text\",\"fielddata\": true,\"fielddata_frequency_filter\": {\"max\": 10000}}}}}}", - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 341c0a965f94e..6d87cafdd4216 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; - import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -43,13 +42,12 @@ import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; -import org.opensearch.common.Strings; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesService; @@ -70,7 +68,6 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.BREAKER; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; - import static org.hamcrest.Matchers.equalTo; /** @@ -99,22 +96,20 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L)); } - String mapping = Strings // {} - .toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("test-str") - .field("type", "keyword") - .field("doc_values", randomBoolean()) - .endObject() // test-str - .startObject("test-num") - // I don't use randomNumericType() here because I don't want "byte", and I want "float" and "double" - .field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer"))) - .endObject() // test-num - .endObject() // properties - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("test-str") + .field("type", "keyword") + .field("doc_values", randomBoolean()) + .endObject() // test-str + .startObject("test-num") + // I don't use randomNumericType() here because I don't want "byte", and I want "float" and "double" + .field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer"))) + .endObject() // test-num + .endObject() // properties + .endObject() + .toString(); final double topLevelRate; final double lowLevelRate; if (frequently()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java index 79e1df656484e..8fd7961cab3a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/DanglingIndicesIT.java @@ -40,11 +40,11 @@ import org.opensearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.IndicesService; import org.opensearch.core.rest.RestStatus; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.InternalTestCluster; import java.util.ArrayList; import java.util.List; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index 32a10451a0dd3..9decd17d95eab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -56,30 +56,26 @@ public class IndexPrimaryRelocationIT extends OpenSearchIntegTestCase { private static final int RELOCATION_COUNT = 15; + public Settings indexSettings() { + return Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build(); + } + public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) - .setMapping("field", "type=text") - .get(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (finished.get() == false && numAutoGenDocs.get() < 10_000) { - IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - client().prepareIndex("test").setSource("auto", true).get(); - numAutoGenDocs.incrementAndGet(); - } + Thread indexingThread = new Thread(() -> { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { + IndexResponse indexResponse = client().prepareIndex("test").setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete("test", "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex("test").setSource("auto", true).get(); + numAutoGenDocs.incrementAndGet(); } - }; + }); indexingThread.start(); ClusterState initialState = client().admin().cluster().prepareState().get().getState(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index c31b5e1f3bc5b..8ce87f37d77cd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.IndexCommit; -import org.hamcrest.Matcher; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -73,18 +72,19 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.gateway.ReplicaShardAllocatorIT; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.gateway.ReplicaShardAllocatorIT; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.MockEngineFactoryPlugin; @@ -96,7 +96,6 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreStats; import org.opensearch.indices.IndicesService; @@ -132,6 +131,7 @@ import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; +import org.hamcrest.Matcher; import java.io.IOException; import java.util.ArrayList; @@ -154,6 +154,11 @@ import static java.util.Collections.singletonMap; import static java.util.stream.Collectors.toList; +import static org.opensearch.action.DocWriteResponse.Result.CREATED; +import static org.opensearch.action.DocWriteResponse.Result.UPDATED; +import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -164,11 +169,6 @@ import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; -import static org.opensearch.action.DocWriteResponse.Result.CREATED; -import static org.opensearch.action.DocWriteResponse.Result.UPDATED; -import static org.opensearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexRecoveryIT extends OpenSearchIntegTestCase { @@ -298,7 +298,6 @@ public void testGatewayRecovery() throws Exception { logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT)); - assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1)); List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME); assertThat(recoveryStates.size(), equalTo(1)); @@ -523,12 +522,12 @@ public void testRerouteRecovery() throws Exception { logger.info("--> waiting for recovery to start both on source and target"); final Index index = resolveIndex(INDEX_NAME); - assertBusy(() -> { + assertBusyWithFixedSleepTime(() -> { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsSource(), equalTo(1)); indicesService = internalCluster().getInstance(IndicesService.class, nodeB); assertThat(indicesService.indexServiceSafe(index).getShard(0).recoveryStats().currentAsTarget(), equalTo(1)); - }); + }, TimeValue.timeValueSeconds(10), TimeValue.timeValueMillis(500)); logger.info("--> request recoveries"); RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); @@ -899,14 +898,14 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { // is a mix of file chunks and translog ops int threeFourths = (int) (numDocs * 0.75); for (int i = 0; i < threeFourths; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); flush(indexName); requests.clear(); for (int i = threeFourths; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); @@ -1098,7 +1097,7 @@ public void testDisconnectsWhileRecovering() throws Exception { List<IndexRequestBuilder> requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); @@ -1252,7 +1251,7 @@ public void testDisconnectsDuringRecovery() throws Exception { List<IndexRequestBuilder> requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); ensureSearchable(indexName); @@ -1395,7 +1394,7 @@ public void testHistoryRetention() throws Exception { final List<IndexRequestBuilder> requests = new ArrayList<>(); final int replicatedDocCount = scaledRandomIntBetween(25, 250); while (requests.size() < replicatedDocCount) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); if (randomBoolean()) { @@ -1417,7 +1416,7 @@ public void testHistoryRetention() throws Exception { final int numNewDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numNewDocs; i++) { - client().prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } // Flush twice to update the safe commit's local checkpoint assertThat(client().admin().indices().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); @@ -1458,7 +1457,7 @@ public void testDoNotInfinitelyWaitForMapping() { for (int i = 0; i < numDocs; i++) { client().prepareIndex("test") .setId("u" + i) - .setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON) + .setSource(singletonMap("test_field", Integer.toString(i)), MediaTypeRegistry.JSON) .get(); } Semaphore recoveryBlocked = new Semaphore(1); @@ -1610,7 +1609,7 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { throw new AssertionError( "expect an operation-based recovery:" + "retention leases" - + Strings.toString(XContentType.JSON, retentionLeases) + + Strings.toString(MediaTypeRegistry.JSON, retentionLeases) + "]" ); } @@ -2187,7 +2186,10 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { while (stopped.get() == false) { try { IndexResponse response = client().prepareIndex(indexName) - .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .setSource( + Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), + MediaTypeRegistry.JSON + ) .get(); assertThat(response.getResult(), isOneOf(CREATED, UPDATED)); } catch (IllegalStateException | OpenSearchException ignored) {} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 939743355f5f4..3df4ecff5250c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -38,8 +38,8 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Locale; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index b6ea3a094f496..30edea6551067 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase.ShardAllocations; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingNode; @@ -21,6 +22,7 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import java.util.ArrayList; import java.util.List; @@ -31,8 +33,6 @@ import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import org.opensearch.cluster.OpenSearchAllocationTestCase.ShardAllocations; - @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationAllocationIT extends SegmentReplicationBaseIT { @@ -92,7 +92,7 @@ public void testGlobalPrimaryAllocation() throws Exception { /** * This test verifies the happy path where primary shard allocation is balanced when multiple indices are created. - * + * <p> * This test in general passes without primary shard balance as well due to nature of allocation algorithm which * assigns all primary shards first followed by replica copies. */ @@ -129,6 +129,7 @@ public void testPerIndexPrimaryAllocation() throws Exception { * ensures the primary shard distribution is balanced. * */ + @TestLogging(reason = "Enable debug logs from cluster and index replication package", value = "org.opensearch.cluster:DEBUG,org.opensearch.indices.replication:DEBUG") public void testSingleIndexShardAllocation() throws Exception { internalCluster().startClusterManagerOnlyNode(); final int maxReplicaCount = 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 64c6ebbb33482..be849452c0f5e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -8,7 +8,7 @@ package org.opensearch.indices.replication; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.apache.lucene.index.SegmentInfos; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -17,26 +17,27 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; -import org.opensearch.common.settings.Settings; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; -import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -63,11 +64,6 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return asList(MockTransportService.TestPlugin.class); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - @Override public Settings indexSettings() { return Settings.builder() @@ -134,24 +130,6 @@ protected void waitForSearchableDocs(long docCount, String... nodes) throws Exce waitForSearchableDocs(docCount, Arrays.stream(nodes).collect(Collectors.toList())); } - protected void waitForSegmentReplication(String node) throws Exception { - assertBusy(() -> { - SegmentReplicationStatsResponse segmentReplicationStatsResponse = client(node).admin() - .indices() - .prepareSegmentReplicationStats(INDEX_NAME) - .setDetailed(true) - .execute() - .actionGet(); - final SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats() - .get(INDEX_NAME) - .get(0); - assertEquals( - perGroupStats.getReplicaStats().stream().findFirst().get().getCurrentReplicationState().getStage(), - SegmentReplicationState.Stage.DONE - ); - }, 1, TimeUnit.MINUTES); - } - protected void verifyStoreContent() throws Exception { assertBusy(() -> { final ClusterState clusterState = getClusterState(); @@ -201,31 +179,20 @@ private IndexShard getIndexShard(ClusterState state, ShardRouting routing, Strin return getIndexShard(state.nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); } - /** - * Fetch IndexShard by shardId, multiple shards per node allowed. - */ - protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { - final Index index = resolveIndex(indexName); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); - final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid == shardId.id()).findFirst(); - return indexService.getShard(id.get()); - } - /** * Fetch IndexShard, assumes only a single shard per node. */ protected IndexShard getIndexShard(String node, String indexName) { final Index index = resolveIndex(indexName); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); final Optional<Integer> shardId = indexService.shardIds().stream().findFirst(); - return indexService.getShard(shardId.get()); + return shardId.map(indexService::getShard).orElse(null); } protected boolean segmentReplicationWithRemoteEnabled() { - return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue() - && "true".equalsIgnoreCase(featureFlagSettings().get(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL)); + return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue(); } protected Releasable blockReplication(List<String> nodes, CountDownLatch latch) { @@ -261,11 +228,21 @@ protected Releasable blockReplication(List<String> nodes, CountDownLatch latch) protected void assertReplicaCheckpointUpdated(IndexShard primaryShard) throws Exception { assertBusy(() -> { - Set<SegmentReplicationShardStats> groupStats = primaryShard.getReplicationStats(); + Set<SegmentReplicationShardStats> groupStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(primaryShard.indexSettings().getNumberOfReplicas(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(0, shardStat.getCheckpointsBehindCount()); } }, 30, TimeUnit.SECONDS); } + + /** + * Returns the latest SIS for a shard but does not incref the segments. + */ + protected SegmentInfos getLatestSegmentInfos(IndexShard shard) throws IOException { + final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = shard.getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable<SegmentInfos> closeable = tuple.v1()) { + return closeable.get(); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index a82fd8d845709..f2cb7c9c6bfc8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -10,7 +10,10 @@ import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; @@ -18,8 +21,15 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; +import static org.hamcrest.Matchers.hasSize; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase { @@ -29,6 +39,9 @@ public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase protected static final int SHARD_COUNT = 1; protected static final int REPLICA_COUNT = 1; + protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + @Override public Settings indexSettings() { return Settings.builder() @@ -39,19 +52,6 @@ public Settings indexSettings() { .build(); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); - } - public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Exception { Settings settings = Settings.builder().put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final String ANOTHER_INDEX = "test-index"; @@ -123,4 +123,125 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } + public void testReplicationTypesOverrideNotAllowed_IndexAPI() { + // Generate mutually exclusive replication strategies at cluster and index level + List<ReplicationType> replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType indexLevelReplication = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, indexLevelReplication).build(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + public void testReplicationTypesOverrideNotAllowed_WithTemplates() { + // Generate mutually exclusive replication strategies at cluster and index level + List<ReplicationType> replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType templateReplicationType = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + logger.info( + "--> Create index with template replication {} and cluster level replication {}", + templateReplicationType, + clusterLevelReplication + ); + // Create index template + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("test-idx*")) + .setSettings(Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, templateReplicationType).build()) + .setOrder(0) + .get(); + + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get(); + assertThat(response.getIndexTemplates(), hasSize(1)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + public void testReplicationTypesOverrideNotAllowed_WithResizeAction() { + // Generate mutually exclusive replication strategies at cluster and index level + List<ReplicationType> replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType indexLevelReplication = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + logger.info( + "--> Create index with index level replication {} and cluster level replication {}", + indexLevelReplication, + clusterLevelReplication + ); + + // Define resize action and target shard count. + List<Tuple<ResizeType, Integer>> resizeActionsList = new ArrayList<>(); + final int initialShardCount = 2; + resizeActionsList.add(new Tuple<>(ResizeType.SPLIT, 2 * initialShardCount)); + resizeActionsList.add(new Tuple<>(ResizeType.SHRINK, SHARD_COUNT)); + resizeActionsList.add(new Tuple<>(ResizeType.CLONE, initialShardCount)); + + Tuple<ResizeType, Integer> resizeActionTuple = resizeActionsList.get(random().nextInt(resizeActionsList.size())); + final String targetIndexName = resizeActionTuple.v1().name().toLowerCase(Locale.ROOT) + "-target"; + + logger.info("--> Performing resize action {} with shard count {}", resizeActionTuple.v1(), resizeActionTuple.v2()); + + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, initialShardCount) + .put(SETTING_REPLICATION_TYPE, clusterLevelReplication) + .build(); + createIndex(INDEX_NAME, indexSettings); + + // Block writes + client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + // Validate resize action fails + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex(INDEX_NAME, targetIndexName) + .setResizeType(resizeActionTuple.v1()) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeActionTuple.v2()) + .putNull("index.blocks.write") + .put(SETTING_REPLICATION_TYPE, indexLevelReplication) + .build() + ) + .get() + ); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + /** + * Generate a list of ReplicationType with random ordering + * + * @return List of ReplicationType values + */ + private List<ReplicationType> getRandomReplicationTypesAsList() { + List<ReplicationType> replicationStrategies = List.of(ReplicationType.SEGMENT, ReplicationType.DOCUMENT); + int randomReplicationIndex = random().nextInt(replicationStrategies.size()); + ReplicationType clusterLevelReplication = replicationStrategies.get(randomReplicationIndex); + ReplicationType indexLevelReplication = replicationStrategies.get(1 - randomReplicationIndex); + return List.of(clusterLevelReplication, indexLevelReplication); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java new file mode 100644 index 0000000000000..66b26b5d25cfe --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * These tests simulate corruption cases during replication. They are skipped on WindowsFS simulation where file renaming + * can fail with an access denied IOException because deletion is not permitted. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class SegmentReplicationDisruptionIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + public void testSendCorruptBytesToReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean failed = new AtomicBoolean(false); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { + FileChunkRequest req = (FileChunkRequest) request; + TransportRequest corrupt = new FileChunkRequest( + req.recoveryId(), + ((FileChunkRequest) request).requestSeqNo(), + ((FileChunkRequest) request).shardId(), + ((FileChunkRequest) request).metadata(), + ((FileChunkRequest) request).position(), + new BytesArray("test"), + false, + 0, + 0L + ); + connection.sendRequest(requestId, action, corrupt, options); + latch.countDown(); + } else { + connection.sendRequest(requestId, action, request, options); + } + } + ); + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + assertNotEquals(originalRecoveryTime, 0); + refresh(INDEX_NAME); + latch.await(); + assertTrue(failed.get()); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + // reset checkIndex to ensure our original shard doesn't throw + resetCheckIndexStatus(); + waitForSearchableDocs(100, primaryNode, replicaNode); + } + + public void testWipeSegmentBetweenSyncs() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); + indexShard.store().directory().deleteFile("_0.si"); + + for (int i = 11; i < 21; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + resetCheckIndexStatus(); + waitForSearchableDocs(20, primaryNode, replicaNode); + } + + private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { + assertBusy(() -> { + // assert we have a peer recovery after the original + final long time = getRecoveryStopTime(replicaNode); + assertNotEquals(time, 0); + assertNotEquals(originalRecoveryTime, time); + + }, 1, TimeUnit.MINUTES); + } + + private long getRecoveryStopTime(String nodeName) { + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); + final List<RecoveryState> recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + return recoveryState.getTimer().stopTime(); + } + } + return 0L; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 3ab1a2a8564c5..70da3b0e38472 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -9,22 +9,28 @@ package org.opensearch.indices.replication; import com.carrotsearch.randomizedtesting.RandomizedTest; + import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; -import org.junit.Before; -import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.get.MultiGetRequest; +import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; @@ -35,21 +41,29 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.termvectors.TermVectorsRequestBuilder; +import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; -import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -58,41 +72,52 @@ import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.NodeClosedException; import org.opensearch.search.SearchService; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.sort.SortOrder; -import org.opensearch.node.NodeClosedException; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; +import org.junit.Before; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Arrays.asList; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.search.SearchContextId.decode; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.indices.replication.SegmentReplicationTarget.REPLICATION_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationIT extends SegmentReplicationBaseIT { @@ -102,6 +127,10 @@ private void setup() { internalCluster().startClusterManagerOnlyNode(); } + private static String indexOrAlias() { + return randomBoolean() ? INDEX_NAME : "alias"; + } + public void testPrimaryStopped_ReplicaPromoted() throws Exception { final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); @@ -124,8 +153,9 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); assertNotNull(replicaShardRouting); assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); - refresh(INDEX_NAME); - assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + final SearchResponse response = client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(); + // new primary should have at least the doc count from the first set of segments. + assertTrue(response.getHits().getTotalHits().value >= 1); // assert we can index into the new primary. client().prepareIndex(INDEX_NAME).setId("3").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -201,15 +231,16 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { final String nodeB = internalCluster().startDataOnlyNode(); final Settings settings = Settings.builder() .put(indexSettings()) - .put( - EngineConfig.INDEX_CODEC_SETTING.getKey(), - randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC, CodecService.LUCENE_DEFAULT_CODEC) - ) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + })) .build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -226,7 +257,7 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -245,7 +276,7 @@ public void testIndexReopenClose() throws Exception { createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(100, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -274,14 +305,13 @@ public void testIndexReopenClose() throws Exception { } public void testScrollWithConcurrentIndexAndSearch() throws Exception { - assumeFalse("Skipping the test with Remote store as its flaky.", segmentReplicationWithRemoteEnabled()); final String primary = internalCluster().startDataOnlyNode(); final String replica = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); final List<ActionFuture<IndexResponse>> pendingIndexResponses = new ArrayList<>(); final List<ActionFuture<SearchResponse>> pendingSearchResponse = new ArrayList<>(); - final int searchCount = randomIntBetween(10, 20); + final int searchCount = randomIntBetween(1, 2); final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values()); for (int i = 0; i < searchCount; i++) { @@ -326,6 +356,7 @@ public void testScrollWithConcurrentIndexAndSearch() throws Exception { waitForSearchableDocs(INDEX_NAME, 2 * searchCount, List.of(primary, replica)); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testMultipleShards() throws Exception { Settings indexSettings = Settings.builder() .put(super.indexSettings()) @@ -339,7 +370,7 @@ public void testMultipleShards() throws Exception { createIndex(INDEX_NAME, indexSettings); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(1, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -356,7 +387,7 @@ public void testMultipleShards() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -370,13 +401,21 @@ public void testMultipleShards() throws Exception { } public void testReplicationAfterForceMerge() throws Exception { + performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT)); + } + + public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception { + performReplicationAfterForceMerge(true, SHARD_COUNT); + } + + private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(0, 10); + final int additionalDocCount = scaledRandomIntBetween(0, 10); final int expectedHitCount = initialDocCount + additionalDocCount; try ( BackgroundIndexer indexer = new BackgroundIndexer( @@ -400,8 +439,16 @@ public void testReplicationAfterForceMerge() throws Exception { waitForDocs(expectedHitCount, indexer); waitForSearchableDocs(expectedHitCount, nodeA, nodeB); - // Force a merge here so that the in memory SegmentInfos does not reference old segments on disk. - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get(); + // Perform force merge only on the primary shards. + final ForceMergeResponse forceMergeResponse = client().admin() + .indices() + .prepareForceMerge(INDEX_NAME) + .setPrimaryOnly(primaryOnly) + .setMaxNumSegments(1) + .setFlush(false) + .get(); + assertThat(forceMergeResponse.getFailedShards(), is(0)); + assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards)); refresh(INDEX_NAME); verifyStoreContent(); } @@ -481,7 +528,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { connection.sendRequest(requestId, action, request, options); } ); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); } @@ -542,7 +589,7 @@ public void testCancellation() throws Exception { } ); - final int docCount = scaledRandomIntBetween(0, 200); + final int docCount = scaledRandomIntBetween(0, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -564,6 +611,67 @@ public void testCancellation() throws Exception { assertDocCounts(docCount, primaryNode); } + public void testCancellationDuringGetCheckpointInfo() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO); + } + + public void testCancellationDuringGetSegments() throws Exception { + cancelDuringReplicaAction(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES); + } + + private void cancelDuringReplicaAction(String actionToblock) throws Exception { + // this test stubs transport calls specific to node-node replication. + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store.", + segmentReplicationWithRemoteEnabled() + ); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureYellow(INDEX_NAME); + + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final SegmentReplicationTargetService targetService = internalCluster().getInstance( + SegmentReplicationTargetService.class, + replicaNode + ); + final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + CountDownLatch startCancellationLatch = new CountDownLatch(1); + CountDownLatch latch = new CountDownLatch(1); + + MockTransportService primaryTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + ); + primaryTransportService.addRequestHandlingBehavior(actionToblock, (handler, request, channel, task) -> { + logger.info("action {}", actionToblock); + try { + startCancellationLatch.countDown(); + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + // index a doc and trigger replication + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // remove the replica and ensure it is cleaned up. + startCancellationLatch.await(); + SegmentReplicationTarget target = targetService.get(replicaShard.shardId()); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertEquals("Replication not closed: " + target.getId(), 0, target.refCount()); + assertEquals("Store has a positive refCount", 0, replicaShard.store().refCount()); + // stop the replica, this will do additional checks on shutDown to ensure the replica and its store are closed properly + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + latch.countDown(); + } + public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()); @@ -602,13 +710,14 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { verifyStoreContent(); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testDeleteOperations() throws Exception { final String nodeA = internalCluster().startDataOnlyNode(); final String nodeB = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -625,7 +734,7 @@ public void testDeleteOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, nodeA, nodeB); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 2); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -634,6 +743,7 @@ public void testDeleteOperations() throws Exception { ensureGreen(INDEX_NAME); Set<String> ids = indexer.getIds(); + assertFalse(ids.isEmpty()); String id = ids.toArray()[0].toString(); client(nodeA).prepareDelete(INDEX_NAME, id).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -649,19 +759,18 @@ public void testDeleteOperations() throws Exception { * from xlog. */ public void testReplicationPostDeleteAndForceMerge() throws Exception { - assumeFalse("Skipping the test with Remote store as its flaky.", segmentReplicationWithRemoteEnabled()); final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(10, 200); + final int initialDocCount = scaledRandomIntBetween(1, 10); for (int i = 0; i < initialDocCount; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, primary, replica); - final int deletedDocCount = randomIntBetween(10, initialDocCount); + final int deletedDocCount = randomIntBetween(1, initialDocCount); for (int i = 0; i < deletedDocCount; i++) { client(primary).prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); } @@ -682,7 +791,7 @@ public void testReplicationPostDeleteAndForceMerge() throws Exception { ); // add some docs to the xlog and drop primary. - final int additionalDocs = randomIntBetween(1, 50); + final int additionalDocs = randomIntBetween(1, 5); for (int i = initialDocCount; i < initialDocCount + additionalDocs; i++) { client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); } @@ -713,7 +822,7 @@ public void testUpdateOperations() throws Exception { final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - final int initialDocCount = scaledRandomIntBetween(0, 200); + final int initialDocCount = scaledRandomIntBetween(1, 5); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -730,7 +839,7 @@ public void testUpdateOperations() throws Exception { refresh(INDEX_NAME); waitForSearchableDocs(initialDocCount, asList(primary, replica)); - final int additionalDocCount = scaledRandomIntBetween(0, 200); + final int additionalDocCount = scaledRandomIntBetween(0, 5); final int expectedHitCount = initialDocCount + additionalDocCount; indexer.start(additionalDocCount); waitForDocs(expectedHitCount, indexer); @@ -765,7 +874,7 @@ public void testDropPrimaryDuringReplication() throws Exception { final List<String> dataNodes = internalCluster().startDataOnlyNodes(6); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(5, 10); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -799,6 +908,7 @@ public void testDropPrimaryDuringReplication() throws Exception { } } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testReplicaHasDiffFilesThanPrimary() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); @@ -810,7 +920,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig().setOpenMode(IndexWriterConfig.OpenMode.APPEND); // create a doc to index - int numDocs = 2 + random().nextInt(100); + int numDocs = 2 + random().nextInt(10); List<Document> docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { @@ -839,7 +949,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { replicaShard.finalizeReplication(segmentInfos); ensureYellow(INDEX_NAME); - final int docCount = scaledRandomIntBetween(10, 200); + final int docCount = scaledRandomIntBetween(10, 20); for (int i = 0; i < docCount; i++) { client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); // Refresh, this should trigger round of segment replication @@ -859,7 +969,7 @@ public void testPressureServiceStats() throws Exception { final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - int initialDocCount = scaledRandomIntBetween(100, 200); + int initialDocCount = scaledRandomIntBetween(10, 20); try ( BackgroundIndexer indexer = new BackgroundIndexer( INDEX_NAME, @@ -958,32 +1068,33 @@ private void assertAllocationIdsInReplicaShardStats(Set<String> expected, Set<Se * @throws Exception when issue is encountered */ public void testScrollCreatedOnReplica() throws Exception { - assumeFalse("Skipping the test with Remote store as its flaky.", segmentReplicationWithRemoteEnabled()); // create the cluster with one primary node containing primary shard and replica node containing replica shard final String primary = internalCluster().startDataOnlyNode(); - createIndex(INDEX_NAME); + prepareCreate( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - // index 100 docs - for (int i = 0; i < 100; i++) { - client().prepareIndex(INDEX_NAME) - .setId(String.valueOf(i)) - .setSource(jsonBuilder().startObject().field("field", i).endObject()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - } + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(0)) + .setSource(jsonBuilder().startObject().field("field", 0).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + assertBusy( () -> assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ) ); - final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); - final SegmentInfos segmentInfos = replicaShard.getLatestSegmentInfosAndCheckpoint().v1().get(); - final Collection<String> snapshottedSegments = segmentInfos.files(false); + // opens a scrolled query before a flush is called. // this is for testing scroll segment consistency between refresh and flush SearchResponse searchResponse = client(replica).prepareSearch() @@ -997,17 +1108,20 @@ public void testScrollCreatedOnReplica() throws Exception { .setScroll(TimeValue.timeValueDays(1)) .get(); - // force call flush - flush(INDEX_NAME); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + SegmentInfos latestSegmentInfos = getLatestSegmentInfos(replicaShard); + final Set<String> snapshottedSegments = new HashSet<>(latestSegmentInfos.files(false)); + logger.info("Segments {}", snapshottedSegments); - for (int i = 3; i < 50; i++) { - client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + // index more docs and force merge down to 1 segment + for (int i = 1; i < 5; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); refresh(INDEX_NAME); - if (randomBoolean()) { - client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); - flush(INDEX_NAME); - } } + // create new on-disk segments and copy them out. assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), @@ -1015,13 +1129,19 @@ public void testScrollCreatedOnReplica() throws Exception { ); }); + // force merge and flush. client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + // wait for replication to complete assertBusy(() -> { assertEquals( getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() ); }); + logger.info("Local segments after force merge and commit {}", getLatestSegmentInfos(replicaShard).files(false)); + List<String> filesBeforeClearScroll = List.of(replicaShard.store().directory().listAll()); + assertTrue("Files should be preserved", filesBeforeClearScroll.containsAll(snapshottedSegments)); + // Test stats logger.info("--> Collect all scroll query hits"); long scrollHits = 0; @@ -1030,15 +1150,23 @@ public void testScrollCreatedOnReplica() throws Exception { searchResponse = client(replica).prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueDays(1)).get(); assertAllSuccessful(searchResponse); } while (searchResponse.getHits().getHits().length > 0); - - List<String> currentFiles = List.of(replicaShard.store().directory().listAll()); - assertTrue("Files should be preserved", currentFiles.containsAll(snapshottedSegments)); + assertEquals(1, scrollHits); client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments)); - assertEquals(100, scrollHits); + final Set<String> filesAfterClearScroll = Arrays.stream(replicaShard.store().directory().listAll()).collect(Collectors.toSet()); + // there should be no active readers, snapshots, or on-disk commits containing the snapshotted files, check that they have been + // deleted. + Set<String> latestCommitSegments = new HashSet<>(replicaShard.store().readLastCommittedSegmentsInfo().files(false)); + assertEquals( + "Snapshotted files are no longer part of the latest commit", + Collections.emptySet(), + Sets.intersection(latestCommitSegments, snapshottedSegments) + ); + assertEquals( + "All snapshotted files should be deleted", + Collections.emptySet(), + Sets.intersection(filesAfterClearScroll, snapshottedSegments) + ); } /** @@ -1048,8 +1176,9 @@ public void testScrollCreatedOnReplica() throws Exception { * @throws Exception when issue is encountered */ public void testScrollWithOngoingSegmentReplication() throws Exception { + // this test stubs transport calls specific to node-node replication. assumeFalse( - "Skipping the test as its not compatible with segment replication with remote store yet.", + "Skipping the test as its not compatible with segment replication with remote store.", segmentReplicationWithRemoteEnabled() ); @@ -1177,28 +1306,13 @@ public void testScrollWithOngoingSegmentReplication() throws Exception { } public void testPitCreatedOnReplica() throws Exception { - assumeFalse( - "Skipping the test as it is flaky with remote store. Tracking issue https://github.com/opensearch-project/OpenSearch/issues/8850", - segmentReplicationWithRemoteEnabled() - ); final String primary = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); final String replica = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("1") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - refresh(INDEX_NAME); - client().prepareIndex(INDEX_NAME) - .setId("2") - .setSource("foo", randomInt()) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .get(); - for (int i = 3; i < 100; i++) { + for (int i = 0; i < 10; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) @@ -1248,7 +1362,7 @@ public void testPitCreatedOnReplica() throws Exception { } flush(INDEX_NAME); - for (int i = 101; i < 200; i++) { + for (int i = 11; i < 20; i++) { client().prepareIndex(INDEX_NAME) .setId(String.valueOf(i)) .setSource("foo", randomInt()) @@ -1300,9 +1414,12 @@ public void testPitCreatedOnReplica() throws Exception { // delete the PIT DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId()); client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet(); - - currentFiles = List.of(replicaShard.store().directory().listAll()); - assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments)); + assertBusy( + () -> assertFalse( + "Files should be cleaned up", + List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments) + ) + ); } /** @@ -1342,4 +1459,468 @@ public void testPrimaryReceivesDocsDuringReplicaRecovery() throws Exception { ensureGreen(INDEX_NAME); waitForSearchableDocs(2, nodes); } + + public void testIndexWhileRecoveringReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate(INDEX_NAME).setMapping( + jsonBuilder().startObject() + .startObject("_routing") + .field("required", true) + .endObject() + .startObject("properties") + .startObject("online") + .field("type", "boolean") + .endObject() + .startObject("ts") + .field("type", "date") + .field("ignore_malformed", false) + .field("format", "epoch_millis") + .endObject() + .startObject("bs") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + ) + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + + client().prepareIndex(INDEX_NAME) + .setId("1") + .setRouting("Y") + .setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100, "type", "s") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("2") + .setRouting("X") + .setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000, "type", "s") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("3") + .setRouting(randomAlphaOfLength(2)) + .setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs") + .get(); + client().prepareIndex(INDEX_NAME) + .setId("4") + .setRouting(randomAlphaOfLength(2)) + .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") + .get(); + refresh(); + ensureGreen(INDEX_NAME); + waitForSearchableDocs(4, primaryNode, replicaNode); + + SearchResponse response = client().prepareSearch(INDEX_NAME) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery( + boolQuery().must(termQuery("online", true)) + .must( + boolQuery().should( + boolQuery().must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))).must(termQuery("type", "bs")) + ) + .should( + boolQuery().must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000))).must(termQuery("type", "s")) + ) + ) + ) + .setVersion(true) + .setFrom(0) + .setSize(100) + .setExplain(true) + .get(); + assertNoFailures(response); + } + + public void testRestartPrimary_NoReplicas() throws Exception { + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); + + assertEquals(getNodeContainingPrimaryShard().getName(), primary); + + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + if (randomBoolean()) { + flush(INDEX_NAME); + } else { + refresh(INDEX_NAME); + } + + internalCluster().restartNode(primary); + ensureYellow(INDEX_NAME); + assertDocCounts(1, primary); + } + + /** + * Tests whether segment replication supports realtime get requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeGetRequestsSuccessful() { + final String primary = internalCluster().startDataOnlyNode(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime get + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put("index.refresh_interval", -1).put(indexSettings())) + .addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + + GetResponse response = client(replica).prepareGet(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").get(); + + // non realtime get 1 + response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime get 1 + response = client(replica).prepareGet(indexOrAlias(), "1").get(); + assertTrue(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat(response.getSourceAsMap().get("foo").toString(), equalTo("bar")); + + // index doc 2 + client().prepareIndex(indexOrAlias()).setId("2").setSource("foo2", "bar2").setRouting(id).get(); + + // realtime get 2 (with routing) + response = client(replica).prepareGet(indexOrAlias(), "2").setRouting(id).get(); + assertTrue(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat(response.getSourceAsMap().get("foo2").toString(), equalTo("bar2")); + } + + public void testRealtimeGetRequestsUnsuccessful() { + final String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").setRouting(id).get(); + + // non realtime get 1 + GetResponse response = client().prepareGet(indexOrAlias(), "1").setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime get 1 (preference = _replica) + response = client(replica).prepareGet(indexOrAlias(), "1").setPreference(Preference.REPLICA.type()).get(); + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime get 1 (with routing set) + response = client(replica).prepareGet(INDEX_NAME, "1").setRouting(routingOtherShard).get(); + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + } + + /** + * Tests whether segment replication supports realtime MultiGet requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeMultiGetRequestsSuccessful() { + final String primary = internalCluster().startDataOnlyNode(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime multi get + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + + // index doc 1 + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", "bar").get(); + + // index doc 2 + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo2", "bar2").setRouting(id).get(); + + // multi get non realtime 1 + MultiGetResponse mgetResponse = client().prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .setRealtime(false) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].isFailed()); + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + + // multi get realtime 1 + mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .add(new MultiGetRequest.Item(INDEX_NAME, "2").routing(id)) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + + assertThat(mgetResponse.getResponses().length, is(3)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].isFailed()); + assertThat(mgetResponse.getResponses()[0].getResponse().getSourceAsMap().get("foo").toString(), equalTo("bar")); + + assertThat(mgetResponse.getResponses()[1].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[1].isFailed()); + assertThat(mgetResponse.getResponses()[1].getResponse().getSourceAsMap().get("foo2").toString(), equalTo("bar2")); + + assertThat(mgetResponse.getResponses()[2].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[2].isFailed()); + assertThat(mgetResponse.getResponses()[2].getFailure().getMessage(), is("no such index [nonExistingIndex]")); + } + + public void testRealtimeMultiGetRequestsUnsuccessful() { + final String primary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder().put("index.refresh_interval", -1).put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ).addAlias(new Alias("alias")) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(indexOrAlias()).setId("1").setSource("foo", "bar").setRouting(id).get(); + + // realtime multi get 1 (preference = _replica) + MultiGetResponse mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1")) + .setPreference(Preference.REPLICA.type()) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + + assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[1].isFailed()); + + // realtime multi get 1 (routing set) + mgetResponse = client(replica).prepareMultiGet() + .add(new MultiGetRequest.Item(INDEX_NAME, "1").routing(routingOtherShard)) + .add(new MultiGetRequest.Item("nonExistingIndex", "1")) + .get(); + assertThat(mgetResponse.getResponses().length, is(2)); + assertThat(mgetResponse.getResponses()[0].getIndex(), is(INDEX_NAME)); + // expecting failure since we explicitly route request to a shard on which it doesn't exist + assertFalse(mgetResponse.getResponses()[0].getResponse().isExists()); + assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex")); + assertTrue(mgetResponse.getResponses()[1].isFailed()); + + } + + /** + * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeTermVectorRequestsSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + + TermVectorsResponse response = client(replica).prepareTermVectors(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // non realtime termvectors 1 + response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors 1 + TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + Fields fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + // index doc 2 with routing + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(2)) + .setRouting(id) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // realtime termvectors 2 with routing + resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(2)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(id) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + } + + public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .setRouting(id) + .execute() + .actionGet(); + + // non realtime termvectors 1 + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors (preference = _replica) + TermVectorsRequestBuilder resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setPreference(Preference.REPLICA.type()) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime termvectors (with routing set) + resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(routingOtherShard) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + } + + public void testReplicaAlreadyAtCheckpoint() throws Exception { + final List<String> nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startDataOnlyNode(); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. + final String replicaNode = internalCluster().startDataOnlyNode(); + nodes.add(replicaNode); + final String replicaNode2 = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + ); + ensureGreen(INDEX_NAME); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + // wait until a replica is promoted & finishes engine flip, we don't care which one + AtomicReference<IndexShard> primary = new AtomicReference<>(); + assertBusy(() -> { + assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); + primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); + }); + + FlushRequest request = new FlushRequest(INDEX_NAME); + request.force(true); + primary.get().flush(request); + + assertBusy(() -> { + assertEquals( + replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index 3024eeb798b48..a7be63bc61bc2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -9,7 +9,6 @@ package org.opensearch.indices.replication; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; @@ -20,12 +19,14 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) { * This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before * relocation and after relocation documents are indexed and documents are verified */ + @TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE") public void testPrimaryRelocation() throws Exception { final String oldPrimary = internalCluster().startNode(); createIndex(1); @@ -217,6 +219,7 @@ public void testPrimaryRelocationWithSegRepFailure() throws Exception { * This test verifies primary recovery behavior with continuous ingestion * */ + @TestLogging(reason = "Enable trace logs from replication and recovery package", value = "org.opensearch.indices.recovery:TRACE,org.opensearch.indices.replication:TRACE") public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws Exception { final String primary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java new file mode 100644 index 0000000000000..69411b2ff640a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java @@ -0,0 +1,249 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Preference; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * This test class verifies Resize Reequests (Shrink, Split, Clone) with segment replication as replication strategy. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class SegmentReplicationResizeRequestIT extends SegmentReplicationBaseIT { + + public void testCreateShrinkIndexThrowsExceptionWhenReplicasBehind() throws Exception { + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.refresh_interval", -1) + .put("index.number_of_replicas", 1) + .put("number_of_shards", 2) + ).get(); + + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + // block Segment Replication so that replicas never get the docs from primary + CountDownLatch latch = new CountDownLatch(1); + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + + // block writes on index before performing shrink operation + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + // Trigger Shrink operation, as replicas don't have any docs it will throw exception that replicas haven't caught up + IllegalStateException exception = assertThrows( + IllegalStateException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.SHRINK) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 1) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + assertEquals( + "Replication still in progress for index [test]. Please wait for replication to complete and retry. " + + "Use the _cat/segment_replication/test api to check if the index is up to date (e.g. bytes_behind == 0).", + exception.getMessage() + ); + + } + + } + + public void testCreateSplitIndexWithSegmentReplicationBlocked() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.refresh_interval", -1) + .put("index.number_of_replicas", 1) + .put("number_of_shards", 3) + ).get(); + + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + CountDownLatch latch = new CountDownLatch(1); + + // block Segment Replication so that replicas never get the docs from primary + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + refresh(); + assertBusy(() -> { + assertHitCount( + client().prepareSearch("test") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + }); + + // block writes on index before performing split operation + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + ) + .get(); + + // Trigger split operation + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 1) + .put("index.number_of_shards", 6) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + // verify that all docs are present in new target index + assertHitCount( + client().prepareSearch("target") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + } + + } + + public void testCloneIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()).put("index.number_of_replicas", 1).put("number_of_shards", randomIntBetween(1, 5)) + ).get(); + + final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + CountDownLatch latch = new CountDownLatch(1); + + // block Segment Replication so that replicas never get the docs from primary + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + refresh(); + assertBusy(() -> { + assertHitCount( + client().prepareSearch("test") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + }); + + // block writes on index before performing clone operation + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + ) + .get(); + + // Trigger split operation + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 1).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + // verify that all docs are present in new target index + assertHitCount( + client().prepareSearch("target") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 159de1a681f53..89aef6f0be1a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -8,9 +8,15 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; @@ -262,12 +268,12 @@ public void testMultipleIndices() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String index_2 = "tst-index-2"; List<String> nodes = new ArrayList<>(); - final String primaryNode = internalCluster().startNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); nodes.add(primaryNode); createIndex(INDEX_NAME, index_2); ensureYellowAndNoInitializingShards(INDEX_NAME, index_2); - nodes.add(internalCluster().startNode()); + nodes.add(internalCluster().startDataOnlyNode()); ensureGreen(INDEX_NAME, index_2); final long numDocs = scaledRandomIntBetween(50, 100); @@ -278,6 +284,7 @@ public void testMultipleIndices() throws Exception { refresh(INDEX_NAME, index_2); waitForSearchableDocs(INDEX_NAME, numDocs, nodes); waitForSearchableDocs(index_2, numDocs, nodes); + ensureSearchable(INDEX_NAME, index_2); final IndexShard index_1_primary = getIndexShard(primaryNode, INDEX_NAME); final IndexShard index_2_primary = getIndexShard(primaryNode, index_2); @@ -285,37 +292,39 @@ public void testMultipleIndices() throws Exception { assertTrue(index_1_primary.routingEntry().primary()); assertTrue(index_2_primary.routingEntry().primary()); - // test both indices are returned in the response. - SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin() - .indices() - .prepareSegmentReplicationStats() - .execute() - .actionGet(); + assertBusy(() -> { + // test both indices are returned in the response. + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats() + .execute() + .actionGet(); - Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); - assertEquals(2, replicationStats.size()); - List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); - assertEquals(1, replicationPerGroupStats.size()); - SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); - Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + Map<String, List<SegmentReplicationPerGroupStats>> replicationStats = segmentReplicationStatsResponse.getReplicationStats(); + assertEquals(2, replicationStats.size()); + List<SegmentReplicationPerGroupStats> replicationPerGroupStats = replicationStats.get(INDEX_NAME); + assertEquals(1, replicationPerGroupStats.size()); + SegmentReplicationPerGroupStats perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_1_primary.shardId()); + Set<SegmentReplicationShardStats> replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } - replicationPerGroupStats = replicationStats.get(index_2); - assertEquals(1, replicationPerGroupStats.size()); - perGroupStats = replicationPerGroupStats.get(0); - assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); - replicaStats = perGroupStats.getReplicaStats(); - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replica : replicaStats) { - assertNotNull(replica.getCurrentReplicationState()); - } + replicationPerGroupStats = replicationStats.get(index_2); + assertEquals(1, replicationPerGroupStats.size()); + perGroupStats = replicationPerGroupStats.get(0); + assertEquals(perGroupStats.getShardId(), index_2_primary.shardId()); + replicaStats = perGroupStats.getReplicaStats(); + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replica : replicaStats) { + assertNotNull(replica.getCurrentReplicationState()); + } + }, 30, TimeUnit.SECONDS); // test only single index queried. - segmentReplicationStatsResponse = client().admin() + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() .indices() .prepareSegmentReplicationStats() .setIndices(index_2) @@ -357,4 +366,71 @@ public void testQueryAgainstDocRepIndex() { .actionGet(); assertTrue(segmentReplicationStatsResponse.getReplicationStats().isEmpty()); } + + public void testSegmentReplicationNodeAndIndexStats() throws Exception { + logger.info("--> start primary node"); + final String primaryNode = internalCluster().startNode(); + + logger.info("--> create index on node: {}", primaryNode); + assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2))); + + ensureYellow(); + logger.info("--> start first replica node"); + final String replicaNode1 = internalCluster().startNode(); + + logger.info("--> start second replica node"); + final String replicaNode2 = internalCluster().startNode(); + + ensureGreen(); + CountDownLatch latch = new CountDownLatch(1); + // block replication + try (final Releasable ignored = blockReplication(List.of(replicaNode1, replicaNode2), latch)) { + // index another doc while blocked, this would not get replicated to the replicas. + Thread indexingThread = new Thread(() -> { + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo2", randomInt()).get(); + refresh(INDEX_NAME); + }); + + indexingThread.start(); + indexingThread.join(); + latch.await(); + + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats() + .clear() + .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Segments)) + .get(); + + for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { + ReplicationStats replicationStats = nodeStats.getIndices().getSegments().getReplicationStats(); + // primary node - should hold replication statistics + if (nodeStats.getNode().getName().equals(primaryNode)) { + assertTrue(replicationStats.getMaxBytesBehind() > 0); + assertTrue(replicationStats.getTotalBytesBehind() > 0); + assertTrue(replicationStats.getMaxReplicationLag() > 0); + // 2 replicas so total bytes should be double of max + assertEquals(replicationStats.getMaxBytesBehind() * 2, replicationStats.getTotalBytesBehind()); + } + // replica nodes - should hold empty replication statistics + if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { + assertEquals(0, replicationStats.getMaxBytesBehind()); + assertEquals(0, replicationStats.getTotalBytesBehind()); + assertEquals(0, replicationStats.getMaxReplicationLag()); + } + } + // get replication statistics at index level + IndicesStatsResponse stats = client().admin().indices().prepareStats().execute().actionGet(); + + // stats should be of non-zero value when aggregated at index level + ReplicationStats indexReplicationStats = stats.getIndex(INDEX_NAME).getTotal().getSegments().getReplicationStats(); + assertNotNull(indexReplicationStats); + assertTrue(indexReplicationStats.getMaxBytesBehind() > 0); + assertTrue(indexReplicationStats.getTotalBytesBehind() > 0); + assertTrue(indexReplicationStats.getMaxReplicationLag() > 0); + assertEquals(2 * indexReplicationStats.getMaxBytesBehind(), indexReplicationStats.getTotalBytesBehind()); + } + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java index 9025c1cc79884..8c045c1560dd3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationSuiteIT.java @@ -8,13 +8,15 @@ package org.opensearch.indices.replication; -import org.junit.Before; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9499") @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class SegmentReplicationSuiteIT extends SegmentReplicationBaseIT { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java new file mode 100644 index 0000000000000..8dc343abf8da2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.hamcrest.Matchers.startsWith; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) +public class ArchivedIndexSettingsIT extends OpenSearchIntegTestCase { + private volatile boolean installPlugin; + + public void testArchiveSettings() throws Exception { + installPlugin = true; + // Set up the cluster with an index containing dummy setting(owned by dummy plugin) + String oldClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String oldDataNode = internalCluster().startDataOnlyNode(); + assertEquals(2, internalCluster().numDataAndClusterManagerNodes()); + createIndex("test"); + ensureYellow(); + // Add a dummy setting + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.dummy", "foobar").put("index.dummy2", "foobar")) + .execute() + .actionGet(); + + // Remove dummy plugin and replace the cluster manager node so that the stale plugin setting moves to "archived". + installPlugin = false; + String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldClusterManagerNode)); + internalCluster().restartNode(newClusterManagerNode); + + // Verify that archived settings exists. + assertBusy(() -> { + // Verify that cluster state is in recovered state. + assertFalse(client().admin().cluster().prepareState().get().getState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy") + ); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy2") + ); + }, 30, TimeUnit.SECONDS); + + // Archived setting update should fail on open index. + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + ); + assertThat( + exception.getMessage(), + startsWith("Can't update non dynamic settings [[archived.index.dummy]] for open indices [[test") + ); + + // close the index. + client().admin().indices().prepareClose("test").get(); + + // Remove archived.index.dummy explicitly. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Remove archived.index.dummy2 using wildcard. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.*")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Verify that archived settings are cleaned up successfully. + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy") + ); + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy2") + ); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return installPlugin ? Arrays.asList(DummySettingPlugin.class) : Collections.emptyList(); + } + + public static class DummySettingPlugin extends Plugin { + public static final Setting<String> DUMMY_SETTING = Setting.simpleString( + "index.dummy", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + public static final Setting<String> DUMMY_SETTING2 = Setting.simpleString( + "index.dummy2", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + + @Override + public List<Setting<?>> getSettings() { + return Arrays.asList(DUMMY_SETTING, DUMMY_SETTING2); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java index 386b28a1a017c..c73168ec6ad17 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -47,11 +47,11 @@ import java.io.IOException; import java.util.EnumSet; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) public class UpdateNumberOfReplicasIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexDisableCloseAllIT.java index 9fa78811017be..3579ec61f0120 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -36,7 +36,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; - import org.junit.After; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index 28bd5a6ae252d..87e5df8c48981 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -46,9 +46,9 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; @@ -56,8 +56,8 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -287,7 +287,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - assertAcked(client().admin().indices().prepareDelete(indexToDelete)); + assertAcked(client().admin().indices().prepareDelete(indexToDelete).setTimeout("60s")); } catch (final Exception e) { assertException(e, indexToDelete); } @@ -301,7 +301,7 @@ public void testCloseWhileDeletingIndices() throws Exception { throw new AssertionError(e); } try { - client().admin().indices().prepareClose(indexToClose).get(); + client().admin().indices().prepareClose(indexToClose).setTimeout("60s").get(); } catch (final Exception e) { assertException(e, indexToClose); } @@ -509,7 +509,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { } /** - * Test for https://github.com/elastic/elasticsearch/issues/47276 which checks that the persisted metadata on a data node does not + * Test for <a href="https://github.com/elastic/elasticsearch/issues/47276">Elasticsearch issue #47276</a> which checks that the persisted metadata on a data node does not * become inconsistent when using replicated closed indices. */ public void testRelocatedClosedIndexIssue() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java index fde30f35d1b6d..0bf561c606a2d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java @@ -44,12 +44,11 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -301,16 +300,15 @@ public void testOpenWaitingForActiveShardsFailed() throws Exception { } public void testOpenCloseWithDocs() throws IOException, ExecutionException, InterruptedException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("test") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("test") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(client().admin().indices().prepareCreate("test").setMapping(mapping)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java index 92eefefab7867..e93bd68dca583 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/ReopenWhileClosingIT.java @@ -32,16 +32,16 @@ package org.opensearch.indices.state; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.close.CloseIndexResponse; import org.opensearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Glob; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.RunOnce; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.Strings; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/SimpleIndexStateIT.java index b75e36efe1f2f..6411598abd938 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/SimpleIndexStateIT.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.indices.create.CreateIndexResponse; @@ -49,7 +48,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index 35f2b99c94625..1d5da9370cce3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -49,34 +51,40 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchType; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -99,6 +107,8 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -112,7 +122,19 @@ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0) @SuppressCodecs("*") // requires custom completion format -public class IndexStatsIT extends OpenSearchIntegTestCase { +public class IndexStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public IndexStatsIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -143,7 +165,7 @@ private Settings.Builder settingsBuilder() { return Settings.builder().put(indexSettings()); } - public void testFieldDataStats() { + public void testFieldDataStats() throws InterruptedException { assertAcked( client().admin() .indices() @@ -155,7 +177,8 @@ public void testFieldDataStats() { ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value1", "field2", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2", "field2", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( @@ -278,7 +301,8 @@ public void testClearAllCaches() throws Exception { client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); client().prepareIndex("test").setId("1").setSource("field", "value1").execute().actionGet(); client().prepareIndex("test").setId("2").setSource("field", "value2").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); + indexRandomForConcurrentSearch("test"); NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats("data:true").setIndices(true).execute().actionGet(); assertThat( @@ -563,8 +587,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -595,8 +619,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) @@ -645,7 +669,7 @@ public void testSimpleStats() throws Exception { client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - refresh(); + refreshAndWaitForReplication(); NumShards test1 = getNumShards("test1"); long test1ExpectedWrites = 2 * test1.dataCopies; @@ -660,7 +684,13 @@ public void testSimpleStats() throws Exception { assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(0L)); assertThat(stats.getPrimaries().getIndexing().getTotal().isThrottled(), equalTo(false)); assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L)); - assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + + // This assert should not be done on segrep enabled indices because we are asserting Indexing/Write operations count on + // all primary and replica shards. But in case of segrep, Indexing/Write operation don't happen on replica shards. So we can + // ignore this assert check for segrep enabled indices. + if (isSegmentReplicationEnabledForIndex("test1") == false && isSegmentReplicationEnabledForIndex("test2") == false) { + assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites)); + } assertThat(stats.getTotal().getStore(), notNullValue()); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getFlush(), notNullValue()); @@ -803,6 +833,7 @@ public void testMergeStats() { client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); stats = client().admin().indices().prepareStats().setMerge(true).execute().actionGet(); + refreshAndWaitForReplication(); assertThat(stats.getTotal().getMerge(), notNullValue()); assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0L)); } @@ -829,7 +860,7 @@ public void testSegmentsStats() { client().admin().indices().prepareFlush().get(); client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); - client().admin().indices().prepareRefresh().get(); + refreshAndWaitForReplication(); stats = client().admin().indices().prepareStats().setSegments(true).get(); assertThat(stats.getTotal().getSegments(), notNullValue()); @@ -847,7 +878,7 @@ public void testAllFlags() throws Exception { client().prepareIndex("test_index").setId(Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test_index_2").setId(Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); Flag[] values = CommonStatsFlags.Flag.values(); for (Flag flag : values) { @@ -1012,7 +1043,10 @@ public void testCompletionFieldsParam() throws Exception { ); ensureGreen(); - client().prepareIndex("test1").setId(Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get(); + client().prepareIndex("test1") + .setId(Integer.toString(1)) + .setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", MediaTypeRegistry.JSON) + .get(); refresh(); IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); @@ -1357,7 +1391,7 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti } while (!stop.get()) { final String id = Integer.toString(idGenerator.incrementAndGet()); - final IndexResponse response = client().prepareIndex("test").setId(id).setSource("{}", XContentType.JSON).get(); + final IndexResponse response = client().prepareIndex("test").setId(id).setSource("{}", MediaTypeRegistry.JSON).get(); assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); } }); @@ -1415,6 +1449,51 @@ public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierExcepti assertThat(executionFailures.get(), emptyCollectionOf(Exception.class)); } + public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { + String indexName = "test-index"; + createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + ensureGreen(indexName); + assertEquals( + RestStatus.CREATED, + client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource("field", "value1", "field2", "value1") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get() + .status() + ); + refreshAndWaitForReplication(); + ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).setTranslog(true).get().getShards()[0]; + RemoteSegmentStats remoteSegmentStatsFromIndexStats = shard.getStats().getSegments().getRemoteSegmentStats(); + assertZeroRemoteSegmentStats(remoteSegmentStatsFromIndexStats); + RemoteTranslogStats remoteTranslogStatsFromIndexStats = shard.getStats().getTranslog().getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromIndexStats); + + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats(primaryNodeName(indexName)).get(); + RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertZeroRemoteSegmentStats(remoteSegmentStatsFromNodesStats); + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromNodesStats); + } + + private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); + } + /** * Persist the global checkpoint on all shards of the given index into disk. * This makes sure that the persisted global checkpoint on those shards will equal to the in-memory value. @@ -1431,4 +1510,37 @@ private void persistGlobalCheckpoint(String index) throws Exception { } } } + + public void testSegmentReplicationStats() { + String indexName = "test-index"; + createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + + ensureGreen(indexName); + + IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); + IndicesStatsResponse stats = builder.execute().actionGet(); + + // document replication enabled index should return empty segment replication stats + assertNotNull(stats.getIndex(indexName).getTotal().getSegments().getReplicationStats()); + + indexName = "test-index2"; + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(indexName); + + builder = client().admin().indices().prepareStats(); + stats = builder.execute().actionGet(); + + // segment replication enabled index should return segment replication stats + assertNotNull(stats.getIndex(indexName).getTotal().getSegments().getReplicationStats()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java index 957aa7fd05a13..0c6631b8d2307 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.store; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -52,17 +54,17 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.disruption.BlockClusterStateProcessing; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.ConnectTransportException; @@ -79,13 +81,22 @@ import java.util.concurrent.TimeUnit; import static java.lang.Thread.sleep; -import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; +import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class IndicesStoreIntegrationIT extends OpenSearchIntegTestCase { +public class IndicesStoreIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public IndicesStoreIntegrationIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return remoteStoreSettings; + } + @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/template/IndexTemplateBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/template/IndexTemplateBlocksIT.java index 21e3e58d4d091..f2852de865b27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/template/IndexTemplateBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/template/IndexTemplateBlocksIT.java @@ -34,7 +34,6 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; - import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java index a6381b4450010..14be51e977745 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/template/SimpleIndexTemplateIT.java @@ -32,7 +32,6 @@ package org.opensearch.indices.template; -import org.junit.After; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.alias.get.GetAliasesResponse; @@ -45,12 +44,12 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.InvalidAliasNameException; @@ -59,6 +58,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; import java.io.IOException; import java.util.ArrayList; @@ -69,6 +69,11 @@ import java.util.List; import java.util.Set; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -79,11 +84,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.opensearch.index.query.QueryBuilders.termQuery; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; public class SimpleIndexTemplateIT extends OpenSearchIntegTestCase { @@ -478,7 +478,7 @@ public void testBrokenMapping() throws Exception { .indices() .preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) - .setMapping("{\"foo\": \"abcde\"}", XContentType.JSON) + .setMapping("{\"foo\": \"abcde\"}", MediaTypeRegistry.JSON) .get() ); assertThat(e.getMessage(), containsString("Failed to parse mapping ")); @@ -591,7 +591,7 @@ public void testIndexTemplateWithAliasesInSource() { + " }\n" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); @@ -803,8 +803,8 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio .addAlias(new Alias("alias4").filter(termQuery("field", "value"))) .get(); - client().prepareIndex("a1").setId("test").setSource("{}", XContentType.JSON).get(); - BulkResponse response = client().prepareBulk().add(new IndexRequest("a2").id("test").source("{}", XContentType.JSON)).get(); + client().prepareIndex("a1").setId("test").setSource("{}", MediaTypeRegistry.JSON).get(); + BulkResponse response = client().prepareBulk().add(new IndexRequest("a2").id("test").source("{}", MediaTypeRegistry.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); assertThat(response.getItems()[0].getIndex(), equalTo("a2")); @@ -819,9 +819,9 @@ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exceptio // So the aliases defined in the index template for this index will not fail // even though the fields in the alias fields don't exist yet and indexing into // an index that doesn't exist yet will succeed - client().prepareIndex("b1").setId("test").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("b1").setId("test").setSource("{}", MediaTypeRegistry.JSON).get(); - response = client().prepareBulk().add(new IndexRequest("b2").id("test").source("{}", XContentType.JSON)).get(); + response = client().prepareBulk().add(new IndexRequest("b2").id("test").source("{}", MediaTypeRegistry.JSON)).get(); assertThat(response.hasFailures(), is(false)); assertThat(response.getItems()[0].isFailed(), equalTo(false)); assertThat(response.getItems()[0].getId(), equalTo("test")); @@ -854,7 +854,7 @@ public void testCombineTemplates() throws Exception { + " }\n" + " }\n" + " }\n", - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); @@ -992,7 +992,7 @@ public void testPartitionedTemplate() throws Exception { .indices() .preparePutTemplate("template_2") .setPatterns(Collections.singletonList("te*")) - .setMapping("{\"_routing\":{\"required\":false}}", XContentType.JSON) + .setMapping("{\"_routing\":{\"required\":false}}", MediaTypeRegistry.JSON) .setSettings(Settings.builder().put("index.number_of_shards", "6").put("index.routing_partition_size", "3")) .get() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 49d09cada59b3..9481a6116cdbc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -32,9 +32,11 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkRequest; @@ -51,12 +53,13 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.Requests; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -73,7 +76,16 @@ import static org.hamcrest.core.Is.is; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class IngestClientIT extends OpenSearchIntegTestCase { +public class IngestClientIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IngestClientIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Settings nodeSettings(int nodeOrdinal) { @@ -100,7 +112,7 @@ public void testSimulate() throws Exception { .endArray() .endObject() ); - client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline("_id", pipelineSource, MediaTypeRegistry.JSON).get(); GetPipelineResponse getResponse = client().admin().cluster().prepareGetPipeline("_id").get(); assertThat(getResponse.isFound(), is(true)); assertThat(getResponse.pipelines().size(), equalTo(1)); @@ -122,9 +134,9 @@ public void testSimulate() throws Exception { ); SimulatePipelineResponse response; if (randomBoolean()) { - response = client().admin().cluster().prepareSimulatePipeline(bytes, XContentType.JSON).setId("_id").get(); + response = client().admin().cluster().prepareSimulatePipeline(bytes, MediaTypeRegistry.JSON).setId("_id").get(); } else { - SimulatePipelineRequest request = new SimulatePipelineRequest(bytes, XContentType.JSON); + SimulatePipelineRequest request = new SimulatePipelineRequest(bytes, MediaTypeRegistry.JSON); request.setId("_id"); response = client().admin().cluster().simulatePipeline(request).get(); } @@ -160,7 +172,7 @@ public void testBulkWithIngestFailures() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); int numRequests = scaledRandomIntBetween(32, 128); @@ -211,7 +223,7 @@ public void testBulkWithUpsert() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); BulkRequest bulkRequest = new BulkRequest(); @@ -220,7 +232,7 @@ public void testBulkWithUpsert() throws Exception { bulkRequest.add(indexRequest); UpdateRequest updateRequest = new UpdateRequest("index", "2"); updateRequest.doc("{}", Requests.INDEX_CONTENT_TYPE); - updateRequest.upsert("{\"field1\":\"upserted_val\"}", XContentType.JSON).upsertRequest().setPipeline("_id"); + updateRequest.upsert("{\"field1\":\"upserted_val\"}", MediaTypeRegistry.JSON).upsertRequest().setPipeline("_id"); bulkRequest.add(updateRequest); BulkResponse response = client().bulk(bulkRequest).actionGet(); @@ -246,7 +258,7 @@ public void test() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); GetPipelineRequest getPipelineRequest = new GetPipelineRequest("_id"); @@ -290,7 +302,7 @@ public void testPutWithPipelineFactoryError() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, MediaTypeRegistry.JSON); Exception e = expectThrows( OpenSearchParseException.class, () -> client().admin().cluster().putPipeline(putPipelineRequest).actionGet() @@ -314,7 +326,7 @@ public void testWithDedicatedClusterManager() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); BulkItemResponse item = client(clusterManagerOnlyNode).prepareBulk() @@ -340,7 +352,7 @@ public void testPipelineOriginHeader() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } { @@ -357,7 +369,7 @@ public void testPipelineOriginHeader() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } { @@ -373,13 +385,13 @@ public void testPipelineOriginHeader() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } Exception e = expectThrows(Exception.class, () -> { IndexRequest indexRequest = new IndexRequest("test"); - indexRequest.source("{}", XContentType.JSON); + indexRequest.source("{}", MediaTypeRegistry.JSON); indexRequest.setPipeline("1"); client().index(indexRequest).get(); }); @@ -413,7 +425,7 @@ public void testPipelineProcessorOnFailure() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } { @@ -430,7 +442,7 @@ public void testPipelineProcessorOnFailure() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } { @@ -446,11 +458,11 @@ public void testPipelineProcessorOnFailure() throws Exception { source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), XContentType.JSON); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), MediaTypeRegistry.JSON); client().admin().cluster().putPipeline(putPipelineRequest).get(); } - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).setPipeline("1").get(); Map<String, Object> inserted = client().prepareGet("test", "1").get().getSourceAsMap(); assertThat(inserted.get("readme"), equalTo("pipeline with id [3] is a bad pipeline")); } diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 4b4a0d9d0157c..4c949e11459ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -32,13 +32,17 @@ package org.opensearch.ingest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.NodeService; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -52,12 +56,10 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class IngestProcessorNotInstalledOnAllNodesIT extends OpenSearchIntegTestCase { +public class IngestProcessorNotInstalledOnAllNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { - private final BytesReference pipelineSource; - private volatile boolean installPlugin; - - public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { + public IngestProcessorNotInstalledOnAllNodesIT(Settings settings) throws IOException { + super(settings); pipelineSource = BytesReference.bytes( jsonBuilder().startObject() .startArray("processors") @@ -70,6 +72,14 @@ public IngestProcessorNotInstalledOnAllNodesIT() throws IOException { ); } + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + + private final BytesReference pipelineSource; + private volatile boolean installPlugin; + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return installPlugin ? Arrays.asList(IngestTestPlugin.class) : Collections.emptyList(); @@ -84,7 +94,7 @@ public void testFailPipelineCreation() throws Exception { ensureStableCluster(2, node2); try { - client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline("_id", pipelineSource, MediaTypeRegistry.JSON).get(); fail("exception expected"); } catch (OpenSearchParseException e) { assertThat(e.getMessage(), containsString("Processor type [test] is not installed on node")); @@ -97,7 +107,7 @@ public void testFailPipelineCreationProcessorNotInstalledOnClusterManagerNode() internalCluster().startNode(); try { - client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline("_id", pipelineSource, MediaTypeRegistry.JSON).get(); fail("exception expected"); } catch (OpenSearchParseException e) { assertThat(e.getMessage(), equalTo("No processor type exists with name [test]")); @@ -110,7 +120,7 @@ public void testFailStartNode() throws Exception { installPlugin = true; String node1 = internalCluster().startNode(); - AcknowledgedResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); + AcknowledgedResponse response = client().admin().cluster().preparePutPipeline("_id", pipelineSource, MediaTypeRegistry.JSON).get(); assertThat(response.isAcknowledged(), is(true)); Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipeline("_id"); assertThat(pipeline, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java index 7109d8e331ce3..2f0d4959d217b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java @@ -32,6 +32,8 @@ package org.opensearch.mget; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.get.MultiGetItemResponse; @@ -39,17 +41,20 @@ import org.opensearch.action.get.MultiGetRequestBuilder; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Map; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -57,7 +62,19 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SimpleMgetIT extends OpenSearchIntegTestCase { +public class SimpleMgetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleMgetIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); @@ -159,7 +176,7 @@ public void testThatSourceFilteringIsSupported() throws Exception { .endObject() ); for (int i = 0; i < 100; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource(sourceBytesRef, MediaTypeRegistry.JSON).get(); } MultiGetRequestBuilder request = client().prepareMultiGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/opensearch/nodesinfo/SimpleNodesInfoIT.java index 086badb52a46c..7e066a610b82c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/nodesinfo/SimpleNodesInfoIT.java @@ -45,9 +45,7 @@ import java.util.List; import static org.opensearch.action.admin.cluster.node.info.NodesInfoRequest.Metric.INDICES; - import static org.opensearch.client.Requests.nodesInfoRequest; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; diff --git a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java new file mode 100644 index 0000000000000..f270cb1399072 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java @@ -0,0 +1,276 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.nodestats; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class NodeStatsIT extends OpenSearchIntegTestCase { + + private final DocStatusStats expectedDocStatusStats = new DocStatusStats(); + private static final String FIELD = "dummy_field"; + private static final String VALUE = "dummy_value"; + private static final Map<String, Object> SOURCE = singletonMap(FIELD, VALUE); + + public void testNodeIndicesStatsDocStatusStatsIndexBulk() { + { // Testing Index + final String INDEX = "test_index"; + final String ID = "id"; + { // Testing Normal Index + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_alias").setRequireAlias(true).source(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { + // Test Missing Pipeline: Ingestion failure, not Indexing failure + expectThrows( + IllegalArgumentException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_pipeline").setPipeline("missing").source(SOURCE)).actionGet() + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).setIfSeqNo(1L).setIfPrimaryTerm(99L)) + .actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Bulk + final String INDEX = "bulk_index"; + + int sizeOfIndexRequests = scaledRandomIntBetween(10, 20); + int sizeOfDeleteRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + int sizeOfNotFoundRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + + BulkRequest bulkRequest = new BulkRequest(); + + for (int i = 0; i < sizeOfIndexRequests; ++i) { + bulkRequest.add(new IndexRequest(INDEX).id(String.valueOf(i)).source(SOURCE)); + } + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfIndexRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + bulkRequest.requests().clear(); + + for (int i = 0; i < sizeOfDeleteRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(i))); + } + for (int i = 0; i < sizeOfNotFoundRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(25 + i))); + } + + response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfDeleteRequests + sizeOfNotFoundRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + assertDocStatusStats(); + } + } + + public void testNodeIndicesStatsDocStatusStatsCreateDeleteUpdate() { + { // Testing Create + final String INDEX = "create_index"; + final String ID = "id"; + { // Testing Creation + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE).create(true)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).create(true)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Delete + final String INDEX = "delete_index"; + final String ID = "id"; + { // Testing Deletion + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + DeleteResponse deleteResponse = client().delete(new DeleteRequest(INDEX, ID)).actionGet(); + updateExpectedDocStatusCounter(deleteResponse); + + MatcherAssert.assertThat(response.getSeqNo(), greaterThanOrEqualTo(0L)); + MatcherAssert.assertThat(deleteResponse.getResult(), equalTo(DocWriteResponse.Result.DELETED)); + assertDocStatusStats(); + } + { // Testing Non-Existing Doc + updateExpectedDocStatusCounter(client().delete(new DeleteRequest(INDEX, "does_not_exist")).actionGet()); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().delete(new DeleteRequest(INDEX, docId).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + + assertDocStatusStats(); + } + } + { // Testing Update + final String INDEX = "update_index"; + final String ID = "id"; + { // Testing Not Found + updateExpectedDocStatusCounter( + expectThrows( + DocumentMissingException.class, + () -> client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing NoOp Update + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet()); + + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOOP)); + assertDocStatusStats(); + } + { // Testing Update + final String UPDATED_VALUE = "updated_value"; + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(singletonMap(FIELD, UPDATED_VALUE))).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().update(new UpdateRequest(INDEX, ID).setRequireAlias(true).doc(new IndexRequest().source(SOURCE))) + .actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().update(new UpdateRequest(INDEX, docId).doc(SOURCE).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + } + + private void assertDocStatusStats() { + DocStatusStats docStatusStats = client().admin() + .cluster() + .prepareNodesStats() + .execute() + .actionGet() + .getNodes() + .get(0) + .getIndices() + .getIndexing() + .getTotal() + .getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + expectedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + + private void updateExpectedDocStatusCounter(DocWriteResponse r) { + expectedDocStatusStats.inc(r.status()); + } + + private void updateExpectedDocStatusCounter(Exception e) { + expectedDocStatusStats.inc(ExceptionsHelper.status(e)); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java index 13d7e838b920a..312b98a8dd918 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java @@ -38,6 +38,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.opensearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; import org.opensearch.persistent.TestPersistentTasksPlugin.State; @@ -45,7 +46,6 @@ import org.opensearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.opensearch.persistent.TestPersistentTasksPlugin.TestTasksRequestBuilder; import org.opensearch.plugins.Plugin; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java index aa1eeacbadd9d..a749ad54042ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -32,11 +32,11 @@ package org.opensearch.persistent.decider; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.persistent.PersistentTasksCustomMetadata; import org.opensearch.persistent.PersistentTasksService; import org.opensearch.persistent.TestPersistentTasksPlugin; diff --git a/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java new file mode 100644 index 0000000000000..3cc10b0c0b858 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/plugins/PluginsServiceIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.Version; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.containsString; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class PluginsServiceIT extends OpenSearchIntegTestCase { + + public void testNodeBootstrapWithCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithRangeCompatiblePlugin() throws IOException { + // Prepare the plugins directory and then start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + // Ensure plugins service was able to load the plugin + assertEquals(1, pluginsService.info().getPluginInfos().stream().filter(info -> info.getName().equals("dummyPlugin")).count()); + } + } + + public void testNodeBootstrapWithInCompatiblePlugin() throws IOException { + // Prepare the plugins directory with an incompatible plugin and attempt to start a node + Path baseDir = createTempDir(); + Path pluginDir = baseDir.resolve("plugins/dummy-plugin"); + String incompatibleRange = "~" + + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "dummy desc", + "name", + "dummyPlugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"" + incompatibleRange + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "test.DummyPlugin" + ); + try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { + Files.copy(jar, pluginDir.resolve("dummy-plugin.jar")); + } + IllegalArgumentException e = assertThrows( + IllegalArgumentException.class, + () -> internalCluster().startNode(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), baseDir)) + ); + assertThat(e.getMessage(), containsString("Plugin [dummyPlugin] was built for OpenSearch version ")); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index f636185fd4649..0752ab7c9d0f1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -44,15 +46,27 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class FullRollingRestartIT extends OpenSearchIntegTestCase { +public class FullRollingRestartIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FullRollingRestartIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) { ClusterHealthResponse clusterHealth = requestBuilder.get(); if (clusterHealth.isTimedOut()) { @@ -121,7 +135,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> refreshing and checking data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -154,7 +168,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> stopped two nodes, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -188,7 +202,7 @@ public void testFullRollingRestart() throws Exception { ); logger.info("--> one node left, verifying data"); - refresh(); + refreshAndWaitForReplication(); for (int i = 0; i < 10; i++) { assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2000L); } @@ -198,11 +212,11 @@ public void testNoRebalanceOnRollingRestart() throws Exception { // see https://github.com/elastic/elasticsearch/issues/14387 internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(3); - /** - * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. - * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject - * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. - * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. + /* + We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. + Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject + to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. + We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. */ prepareCreate("test").setSettings( Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java index a675eb7c77344..988aeac7da541 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RecoveryWhileUnderLoadIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -46,16 +48,16 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.DocsStats; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -74,7 +76,17 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -public class RecoveryWhileUnderLoadIT extends OpenSearchIntegTestCase { +public class RecoveryWhileUnderLoadIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RecoveryWhileUnderLoadIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); public static final class RetentionLeaseSyncIntervalSettingPlugin extends Plugin { @@ -150,7 +162,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -211,7 +223,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -325,7 +337,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception ); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numberOfShards, 10, indexer.getIds()); } @@ -375,7 +387,7 @@ public void testRecoverWhileRelocating() throws Exception { ensureGreen(TimeValue.timeValueMinutes(5)); logger.info("--> refreshing the index"); - refreshAndAssert(); + assertAfterRefreshAndWaitForReplication(); logger.info("--> verifying indexed content"); iterateAssertCount(numShards, 10, indexer.getIds()); } @@ -474,10 +486,11 @@ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iterat ); } - private void refreshAndAssert() throws Exception { + private void assertAfterRefreshAndWaitForReplication() throws Exception { assertBusy(() -> { RefreshResponse actionGet = client().admin().indices().prepareRefresh().get(); assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); + waitForReplication(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index b3821c7896b8e..8d8aea7fc1f3b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -32,9 +32,10 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.tests.util.English; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -52,9 +53,11 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -64,18 +67,17 @@ import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.plugins.Plugin; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.BackgroundIndexer; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockIndexEventListener; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.Transport; @@ -114,7 +116,17 @@ import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class RelocationIT extends OpenSearchIntegTestCase { +public class RelocationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RelocationIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES); @Override @@ -158,7 +170,7 @@ public void testSimpleRelocationNoIndexing() { } logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); logger.info("--> start another node"); @@ -186,7 +198,7 @@ public void testSimpleRelocationNoIndexing() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -265,7 +277,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> refreshing the index"); - client().admin().indices().prepareRefresh("test").execute().actionGet(); + refreshAndWaitForReplication("test"); logger.info("--> searching the index"); boolean ranOnce = false; for (int i = 0; i < 10; i++) { @@ -374,12 +386,12 @@ public void indexShardStateChanged( List<IndexRequestBuilder> builders1 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders1.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders1.add(client().prepareIndex("test").setSource("{}", MediaTypeRegistry.JSON)); } List<IndexRequestBuilder> builders2 = new ArrayList<>(); for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) { - builders2.add(client().prepareIndex("test").setSource("{}", XContentType.JSON)); + builders2.add(client().prepareIndex("test").setSource("{}", MediaTypeRegistry.JSON)); } logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]); @@ -439,7 +451,7 @@ public void testCancellationCleansTempFiles() throws Exception { List<IndexRequestBuilder> requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource("{}", MediaTypeRegistry.JSON)); } indexRandom(true, requests); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); @@ -650,7 +662,7 @@ public void testRelocateWhileWaitingForRefresh() { assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count"); - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertThat(client().prepareSearch("test").setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); } @@ -726,7 +738,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> verifying count"); assertBusy(() -> { - client().admin().indices().prepareRefresh().execute().actionGet(); + refreshAndWaitForReplication(); assertTrue(pendingIndexResponses.stream().allMatch(ActionFuture::isDone)); }, 1, TimeUnit.MINUTES); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java index 4ebb840c600d2..1f5fbae6e58e9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/SimpleRecoveryIT.java @@ -32,22 +32,34 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.get.GetResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Collection; import static org.opensearch.client.Requests.flushRequest; import static org.opensearch.client.Requests.getRequest; import static org.opensearch.client.Requests.indexRequest; -import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.equalTo; -public class SimpleRecoveryIT extends OpenSearchIntegTestCase { +public class SimpleRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } + @Override public Settings indexSettings() { return Settings.builder().put(super.indexSettings()).put(recoverySettings()).build(); @@ -67,13 +79,13 @@ public void testSimpleRecovery() throws Exception { NumShards numShards = getNumShards("test"); - client().index(indexRequest("test").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); + client().index(indexRequest("test").id("1").source(source("1", "test"), MediaTypeRegistry.JSON)).actionGet(); FlushResponse flushResponse = client().admin().indices().flush(flushRequest("test")).actionGet(); assertThat(flushResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); - client().index(indexRequest("test").id("2").source(source("2", "test"), XContentType.JSON)).actionGet(); - RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet(); + client().index(indexRequest("test").id("2").source(source("2", "test"), MediaTypeRegistry.JSON)).actionGet(); + RefreshResponse refreshResponse = refreshAndWaitForReplication("test"); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index b5d7bd476059d..bf0533143cf91 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -32,6 +32,8 @@ package org.opensearch.recovery; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.admin.cluster.node.stats.NodeStats; @@ -39,14 +41,15 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -65,7 +68,16 @@ @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) @SuppressCodecs("*") // test relies on exact file extensions -public class TruncatedRecoveryIT extends OpenSearchIntegTestCase { +public class TruncatedRecoveryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public TruncatedRecoveryIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return replicationSettings; + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java new file mode 100644 index 0000000000000..5240949ff87b9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/DocRepMigrationTestCase.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class DocRepMigrationTestCase extends MigrationBaseTestCase { + + public void testMixedModeAddDocRep() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + addRemote = false; + internalCluster().startNode(); + String[] allNodes = internalCluster().getNodeNames(); + assertBusy(() -> { assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), allNodes.length); }); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java new file mode 100644 index 0000000000000..88d6f6897ee68 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; + +public class MigrationBaseTestCase extends OpenSearchIntegTestCase { + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; + + protected Path segmentRepoPath; + protected Path translogRepoPath; + + boolean addRemote = false; + + protected Settings nodeSettings(int nodeOrdinal) { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (addRemote) { + logger.info("Adding remote store node"); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms") + .build(); + } else { + logger.info("Adding docrep node"); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("discovery.initial_state_timeout", "500ms").build(); + } + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java new file mode 100644 index 0000000000000..a31d203058565 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoteStoreMigrationTestCase extends MigrationBaseTestCase { + public void testMixedModeAddRemoteNodes() throws Exception { + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List<String> cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // add remote node in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + internalCluster().startNode(); + internalCluster().validateClusterFormed(); + + // assert repo gets registered + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().getRepositories(gr).actionGet(); + assertEquals(1, getRepositoriesResponse.repositories().size()); + + // add docrep mode in mixed mode cluster + addRemote = true; + internalCluster().startNode(); + assertBusy(() -> { + assertEquals(client.admin().cluster().prepareClusterStats().get().getNodes().size(), internalCluster().getNodeNames().length); + }); + + // add incompatible remote node in remote mixed cluster + Settings.Builder badSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, "REPOSITORY_2_NAME", translogRepoPath)) + .put("discovery.initial_state_timeout", "500ms"); + String badNode = internalCluster().startNode(badSettings); + assertTrue(client.admin().cluster().prepareClusterStats().get().getNodes().size() < internalCluster().getNodeNames().length); + internalCluster().stopRandomNode(settings -> settings.get("node.name").equals(badNode)); + } + + public void testMigrationDirections() { + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + // add remote node in docrep cluster + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "docrep")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store")); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), "random")); + assertThrows(IllegalArgumentException.class, () -> client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 709c027c3f347..8166c0008ed83 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -8,28 +8,31 @@ package org.opensearch.remotestore; -import org.junit.Before; import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; +import java.util.Locale; import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -37,18 +40,6 @@ public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends Abs protected static final String TRANSLOG_REPOSITORY_NAME = "my-translog-repo-1"; protected static final String INDEX_NAME = "remote-store-test-idx-1"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - - @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - internalCluster().startClusterManagerOnlyNode(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -62,40 +53,86 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, TRANSLOG_REPOSITORY_NAME) .build(); } - protected void deleteRepo() { - logger.info("--> Deleting the repository={}", REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - logger.info("--> Deleting the repository={}", TRANSLOG_REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(TRANSLOG_REPOSITORY_NAME)); + public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + TRANSLOG_REPOSITORY_NAME + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + TRANSLOG_REPOSITORY_NAME + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(segmentRepoTypeAttributeKey, "mock") + .put(segmentRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put(segmentRepoSettingsAttributeKeyPrefix + "random_control_io_exception_rate", ioFailureRate) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_verification_file", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_list_blobs", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_blobs", skipExceptionBlobList) + .put(segmentRepoSettingsAttributeKeyPrefix + "max_failure_number", maxFailure) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, TRANSLOG_REPOSITORY_NAME) + .put(translogRepoTypeAttributeKey, "mock") + .put(translogRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(stateRepoTypeAttributeKey, "mock") + .put(stateRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .build(); + } + + protected void cleanupRepo() { + logger.info("--> Cleanup the repository={}", REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).execute().actionGet(); + logger.info("--> Cleanup the repository={}", TRANSLOG_REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(TRANSLOG_REPOSITORY_NAME).execute().actionGet(); } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { - logger.info("--> Creating repository={} at the path={}", REPOSITORY_NAME, repoLocation); + return setup(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure, 0); + } + + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure, int replicaCount) { // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. - createRepository( - REPOSITORY_NAME, - "mock", - Settings.builder() - .put("location", repoLocation) - .put("random_control_io_exception_rate", ioFailureRate) - .put("skip_exception_on_verification_file", true) - .put("skip_exception_on_list_blobs", true) - // Skipping is required for metadata as it is part of recovery - .put("skip_exception_on_blobs", skipExceptionBlobList) - .put("max_failure_number", maxFailure) - ); - logger.info("--> Creating repository={} at the path={}", TRANSLOG_REPOSITORY_NAME, repoLocation); - createRepository(TRANSLOG_REPOSITORY_NAME, "mock", Settings.builder().put("location", repoLocation)); + Settings.Builder settings = Settings.builder() + .put(buildRemoteStoreNodeAttributes(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure)); + + if (randomBoolean()) { + settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); + } + + disableRepoConsistencyCheck("Remote Store Creates System Repository"); - String dataNodeName = internalCluster().startDataOnlyNodes(1).get(0); + internalCluster().startClusterManagerOnlyNode(settings.build()); + String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); + internalCluster().startDataOnlyNodes(replicaCount, settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); @@ -128,7 +165,7 @@ private String getLocalSegmentFilename(String remoteFilename) { return remoteFilename.split(RemoteSegmentStoreDirectory.SEGMENT_NAME_UUID_SEPARATOR)[0]; } - private IndexResponse indexSingleDoc() { + protected IndexResponse indexSingleDoc() { return client().prepareIndex(INDEX_NAME) .setId(UUIDs.randomBase64UUID()) .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java new file mode 100644 index 0000000000000..d29dacb001434 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.index.IndexResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; + static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; + static final String TOTAL_OPERATIONS = "total-operations"; + static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; + static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(0); + } + + public Settings indexSettings(int shards, int replicas) { + return remoteStoreIndexSettings(replicas, shards); + } + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + protected void restore(String... indices) { + restore(randomBoolean(), indices); + } + + protected void verifyRestoredData(Map<String, Long> indexStats, String indexName, boolean indexMoreData) throws Exception { + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + // This is to ensure that shards that were already assigned will get latest count + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), + 30, + TimeUnit.SECONDS + ); + if (indexMoreData == false) return; + + IndexResponse response = indexSingleDoc(indexName); + if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { + assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); + } + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), + 30, + TimeUnit.SECONDS + ); + } + + protected void verifyRestoredData(Map<String, Long> indexStats, String indexName) throws Exception { + verifyRestoredData(indexStats, indexName, true); + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); + } + + public void prepareCluster( + int numClusterManagerNodes, + int numDataOnlyNodes, + String indices, + int replicaCount, + int shardCount, + Settings settings + ) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, settings); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, Settings settings) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java deleted file mode 100644 index 2abf4fc50ec69..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.remotestore; - -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.IndexSettings; -import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.OpenSearchIntegTestCase; - -import static org.hamcrest.Matchers.containsString; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CreateRemoteIndexClusterDefaultDocRep extends CreateRemoteIndexIT { - - @Override - protected Settings nodeSettings(int nodeOriginal) { - Settings settings = super.nodeSettings(nodeOriginal); - Settings.Builder builder = Settings.builder() - .put(settings) - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT); - return builder.build(); - } - - @Override - public void testDefaultRemoteStoreNoUserOverride() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, - () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() - ); - assertThat( - exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") - ); - } - - public void testDefaultRemoteStoreNoUserOverrideExceptReplicationTypeSegment() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-segment-repo-1", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java new file mode 100644 index 0000000000000..e1ab101fddf55 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Locale; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class CreateRemoteIndexClusterDefaultDocRepIT extends CreateRemoteIndexIT { + + @Override + protected Settings nodeSettings(int nodeOriginal) { + Settings settings = super.nodeSettings(nodeOriginal); + Settings.Builder builder = Settings.builder() + .put(settings) + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT); + return builder.build(); + } + + @Override + public void testDefaultRemoteStoreNoUserOverride() throws Exception { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "To enable %s, %s should be set to %s", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REPLICATION_TYPE, + ReplicationType.SEGMENT + ) + ) + ); + } + + public void testDefaultRemoteStoreNoUserOverrideExceptReplicationTypeSegment() throws Exception { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); + GetIndexResponse getIndexResponse = client().admin() + .indices() + .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) + .get(); + Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + REPOSITORY_NAME, + REPOSITORY_2_NAME, + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index e52a12f66cff4..d427a4db84ba2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -8,80 +8,32 @@ package org.opensearch.remotestore; -import org.junit.After; -import org.junit.Before; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.util.Locale; -import static org.hamcrest.Matchers.containsString; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; -import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CreateRemoteIndexIT extends OpenSearchIntegTestCase { - - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository("my-segment-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-translog-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-custom-repo")); - } - - @Override - protected Settings nodeSettings(int nodeOriginal) { - Settings settings = super.nodeSettings(nodeOriginal); - Settings.Builder builder = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .put(settings); - return builder.build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } +public class CreateRemoteIndexIT extends RemoteStoreBaseIntegTestCase { @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - assertAcked( - clusterAdmin().preparePutRepository("my-segment-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-translog-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-custom-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); + public void setup() throws Exception { + internalCluster().startNodes(2); } public void testDefaultRemoteStoreNoUserOverride() throws Exception { @@ -98,8 +50,8 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-segment-repo-1", - "my-translog-repo-1", + REPOSITORY_NAME, + REPOSITORY_2_NAME, ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); @@ -111,19 +63,20 @@ public void testRemoteStoreDisabledByUser() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_REMOTE_STORE_ENABLED, false) .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "false", - null, - null, - client().settings().get(CLUSTER_SETTING_REPLICATION_TYPE), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED + ) + ) ); } @@ -161,8 +114,8 @@ public void testRemoteStoreEnabledByUserWithoutRemoteRepoIllegalArgumentExceptio containsString( String.format( Locale.ROOT, - "Setting %s should be provided with non-empty repository ID", - SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + "Validation Failed: 1: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED ) ) ); @@ -174,19 +127,21 @@ public void testReplicationTypeDocumentByUser() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "To enable %s, %s should be set to %s", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REPLICATION_TYPE, + ReplicationType.SEGMENT + ) + ) ); } @@ -213,7 +168,7 @@ public void testRemoteStoreSegmentRepoWithoutRemoteEnabledAndSegmentReplicationI ); } - public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { + public void testRemoteStoreEnabledByUserWithRemoteRepoIllegalArgumentException() throws Exception { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) @@ -222,19 +177,20 @@ public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo") .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-custom-repo", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;2: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) ); } @@ -270,41 +226,21 @@ public void testRemoteStoreOverrideTranslogRepoCorrectly() throws Exception { .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo") .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "my-custom-repo") .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-custom-repo", - "my-custom-repo", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() ); - } - - public void testRemoteStoreOverrideReplicationTypeIndexSettings() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;2: private index setting [%s] can not be set explicitly;3: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index 9d63c9b528314..e14a4062f7775 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -21,13 +21,15 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import org.junit.Before; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -35,22 +37,28 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.hamcrest.Matchers.equalTo; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) - public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; + protected Path absolutePath; + protected Path absolutePath2; @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + absolutePath2 = randomRepoPath().toAbsolutePath(); + } + public void testPrimaryTermValidation() throws Exception { // Follower checker interval is lower compared to leader checker so that the cluster manager can remove the node // with network partition faster. The follower check retry count is also kept 1. @@ -61,19 +69,12 @@ public void testPrimaryTermValidation() throws Exception { .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath, REPOSITORY_2_NAME, absolutePath2)) .build(); internalCluster().startClusterManagerOnlyNode(clusterSettings); - - // Create repository - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); - - // Start data nodes and create index internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create index createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -155,6 +156,7 @@ public void testPrimaryTermValidation() throws Exception { // received the following exception. ShardNotFoundException exception = assertThrows(ShardNotFoundException.class, () -> indexSameDoc(primaryNode, INDEX_NAME)); assertTrue(exception.getMessage().contains("no such shard")); + internalCluster().clearDisruptionScheme(); ensureStableCluster(3); ensureGreen(INDEX_NAME); } @@ -162,7 +164,7 @@ public void testPrimaryTermValidation() throws Exception { private IndexResponse indexSameDoc(String nodeName, String indexName) { return client(nodeName).prepareIndex(indexName) .setId(UUIDs.randomBase64UUID()) - .setSource("{\"foo\" : \"bar\"}", XContentType.JSON) + .setSource("{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON) .get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java new file mode 100644 index 0000000000000..869032a84c2c2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.recovery.IndexPrimaryRelocationIT; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { + + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + + protected Path absolutePath; + + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") + public void testPrimaryRelocationWhileIndexing() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + super.testPrimaryRelocationWhileIndexing(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 4f7961cec22d7..6de61cf203c60 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -8,48 +8,40 @@ package org.opensearch.remotestore; -import org.hamcrest.Matcher; -import org.hamcrest.Matchers; -import org.junit.After; -import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; import java.nio.file.Path; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexRecoveryIT extends IndexRecoveryIT { - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; - protected Path absolutePath; + protected Path repositoryPath; - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); + @Before + public void setup() { + repositoryPath = randomRepoPath().toAbsolutePath(); } - @Before @Override - public void setUp() throws Exception { - super.setUp(); - internalCluster().startClusterManagerOnlyNode(); - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryPath)) + .build(); } @Override @@ -57,9 +49,6 @@ public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); @@ -67,7 +56,7 @@ public Settings indexSettings() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } @Override @@ -81,7 +70,91 @@ protected int numDocs() { } @Override - protected boolean shouldAssertOngoingRecoveryInRerouteRecovery() { - return false; + public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() { + // Retention lease based tests not applicable for remote store; + } + + @Override + public void testPeerRecoveryTrimsLocalTranslog() { + // Peer recovery usecase not valid for remote enabled indices + } + + @Override + public void testHistoryRetention() { + // History retention not applicable for remote store + } + + @Override + public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonable() { + // History retention not applicable for remote store + } + + @Override + public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() { + // History retention not applicable for remote store + } + + @Override + public void testRecoverLocallyUpToGlobalCheckpoint() { + // History retention not applicable for remote store + } + + @Override + public void testCancelNewShardRecoveryAndUsesExistingShardCopy() { + // History retention not applicable for remote store + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testReservesBytesDuringPeerRecoveryPhaseOne() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDoesNotCopyOperationsInSafeCommit() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testRepeatedRecovery() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDisconnectsWhileRecovering() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testTransientErrorsDuringRecoveryAreRetried() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDoNotInfinitelyWaitForMapping() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDisconnectsDuringRecovery() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testReplicaRecovery() { + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java new file mode 100644 index 0000000000000..21ce4be9981fb --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -0,0 +1,659 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.Client; +import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { + private static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + private Path remoteRepoPath; + + @Before + public void setup() { + remoteRepoPath = randomRepoPath().toAbsolutePath(); + } + + @After + public void teardown() { + clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) + .build(); + } + + private Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); + return settingsBuilder; + } + + private void indexDocuments(Client client, String indexName, int numOfDocs) { + indexDocuments(client, indexName, 0, numOfDocs); + } + + private void indexDocuments(Client client, String indexName, int fromId, int toId) { + for (int i = fromId; i < toId; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { + for (int i = 0; i < numOfDocs; i++) { + String id = Integer.toString(i); + logger.info("checking for index " + indexName + " with docId" + id); + assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); + } + } + + public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName1 = indexName1 + "-restored"; + String restoredIndexName1Seg = indexName1 + "-restored-seg"; + String restoredIndexName1Doc = indexName1 + "-restored-doc"; + String restoredIndexName2 = indexName2 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1, indexName2))); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + SnapshotInfo snapshotInfo2 = createSnapshot( + snapshotRepoName, + snapshotName2, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) + ); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1, restoredIndexName2); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(restoredIndexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), + PlainActionFuture.newFuture() + ); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + + // restore index as seg rep enabled with remote store and remote translog disabled + RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Seg) + .get(); + assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Seg); + + GetIndexResponse getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Seg); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); + + // restore index as doc rep based from shallow copy snapshot + RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Doc) + .get(); + assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Doc); + + getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); + assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Doc); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); + } + + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName2 = indexName2 + "-restored"; + + boolean enableShallowCopy = randomBoolean(); + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot( + snapshotRepoName, + snapshotName1, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) + ); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + SnapshotInfo snapshotInfo2 = createSnapshot( + snapshotRepoName, + snapshotName2, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) + ); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + assertAcked(client().admin().indices().prepareClose(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1, restoredIndexName2); + + assertRemoteSegmentsAndTranslogUploaded(restoredIndexName2); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(indexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(indexName1)); + client.admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(true), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(indexName1); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1 + 2, numDocsInIndex1 + 4); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 4); + } + + void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { + String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); + Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + + try ( + Stream<Path> translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream<Path> translogData = Files.list(remoteTranslogDataPath); + Stream<Path> segmentMetadata = Files.list(segmentMetadataPath); + Stream<Path> segmentData = Files.list(segmentDataPath); + + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + assertTrue(segmentMetadata.count() > 0); + assertTrue(segmentData.count() > 0); + } + + } + + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + + assertRemoteSegmentsAndTranslogUploaded(indexName1); + + // Clear the local data before stopping the node. This will make sure that remote translog is empty. + IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); + try (Stream<Path> files = Files.list(indexShard.shardPath().resolveTranslog())) { + IOUtils.deleteFilesIgnoringExceptions(files.collect(Collectors.toList())); + } + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + + ensureRed(indexName1); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + + protected IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional<Integer> shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } + + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + createRepository(remoteStoreRepo2Name, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot( + snapshotRepoName, + snapshotName1, + new ArrayList<>(Arrays.asList(indexName1, indexName2)) + ); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) + .build(); + // restore index as a remote store index with different remote store repo + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), + PlainActionFuture.newFuture() + ); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + // indexing some new docs and validating + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + int extraNumDocsInIndex1 = randomIntBetween(20, 50); + indexDocuments(client, indexName1, extraNumDocsInIndex1); + refresh(indexName1); + + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java new file mode 100644 index 0000000000000..6b6a96dc42a84 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -0,0 +1,262 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.UncategorizedExecutionException; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexServiceTestUtils; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT; +import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreBackpressureAndResiliencyIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception { + // Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions + // fail leading to consecutive failure limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 10, ByteSizeUnit.KB.toIntBytes(1), 15, "failure_streak_count"); + } + + public void testWritesRejectedDueToBytesLagBreach() throws Exception { + // Initially indexing happens with doc size of 2 bytes, then all remote store interactions start failing. Now, the + // indexing happens with doc size of 1KB leading to bytes lag limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.BYTES.toIntBytes(2), 30, ByteSizeUnit.KB.toIntBytes(1), 15, "bytes_lag"); + } + + public void testWritesRejectedDueToTimeLagBreach() throws Exception { + // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the + // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag"); + } + + private void validateBackpressure( + int initialDocSize, + int initialDocsToIndex, + int onFailureDocSize, + int onFailureDocsToIndex, + String breachMode + ) throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + Settings request = Settings.builder() + .put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) + .put(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 10) + .build(); + ClusterUpdateSettingsResponse clusterUpdateResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(request) + .get(); + assertEquals(clusterUpdateResponse.getPersistentSettings().get(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey()), "true"); + assertEquals(clusterUpdateResponse.getPersistentSettings().get(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey()), "10"); + + logger.info("--> Indexing data"); + + String jsonString = generateString(initialDocSize); + BytesReference initialSource = new BytesArray(jsonString); + indexDocAndRefresh(initialSource, initialDocsToIndex); + + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) + .setRandomControlIOExceptionRate(1d); + + jsonString = generateString(onFailureDocSize); + BytesReference onFailureSource = new BytesArray(jsonString); + OpenSearchRejectedExecutionException ex = assertThrows( + OpenSearchRejectedExecutionException.class, + () -> indexDocAndRefresh(onFailureSource, onFailureDocsToIndex) + ); + assertTrue(ex.getMessage().contains("rejected execution on primary shard")); + assertTrue(ex.getMessage().contains(breachMode)); + + RemoteSegmentTransferTracker.Stats stats = stats(); + assertTrue(stats.bytesLag > 0); + assertTrue(stats.refreshTimeLagMs > 0); + assertTrue(stats.localRefreshNumber - stats.remoteRefreshNumber > 0); + assertTrue(stats.rejectionCount > 0); + + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) + .setRandomControlIOExceptionRate(0d); + + assertBusy(() -> { + RemoteSegmentTransferTracker.Stats finalStats = stats(); + assertEquals(0, finalStats.bytesLag); + assertEquals(0, finalStats.refreshTimeLagMs); + assertEquals(0, finalStats.localRefreshNumber - finalStats.remoteRefreshNumber); + }, 30, TimeUnit.SECONDS); + + long rejectionCount = stats.rejectionCount; + stats = stats(); + indexDocAndRefresh(initialSource, initialDocsToIndex); + assertEquals(rejectionCount, stats.rejectionCount); + cleanupRepo(); + } + + private RemoteSegmentTransferTracker.Stats stats() { + String shardId = "0"; + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); + List<RemoteStoreStats> matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + return matches.get(0).getSegmentStats(); + } + + private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException { + for (int i = 0; i < iterations; i++) { + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); + refresh(INDEX_NAME); + } + Thread.sleep(250); + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); + } + + /** + * Generates string of given sizeInBytes + * + * @param sizeInBytes size of the string + * @return the generated string + */ + private String generateString(int sizeInBytes) { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + int i = 0; + // Based on local tests, 1 char is occupying 1 byte + while (sb.length() < sizeInBytes) { + String key = "field" + i; + String value = "value" + i; + sb.append("\"").append(key).append("\":\"").append(value).append("\","); + i++; + } + if (sb.length() > 1 && sb.charAt(sb.length() - 1) == ',') { + sb.setLength(sb.length() - 1); + } + sb.append("}"); + return sb.toString(); + } + + /** + * Fixes <a href="https://github.com/opensearch-project/OpenSearch/issues/10398">Github#10398</a> + */ + public void testAsyncTrimTaskSucceeds() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + IndexServiceTestUtils.setTrimTranslogTaskInterval(indexService, TimeValue.timeValueMillis(100)); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(2, 5), true); + logger.info("--> Indexing succeeded"); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + UncategorizedExecutionException exception = assertThrows(UncategorizedExecutionException.class, this::indexSingleDoc); + assertEquals("Failed execution", exception.getMessage()); + } + + translogRepo.setRandomControlIOExceptionRate(0d); + indexSingleDoc(); + logger.info("Indexed single doc successfully"); + } + + /** + * Fixes <a href="https://github.com/opensearch-project/OpenSearch/issues/10400">Github#10400</a> + */ + public void testSkipLoadGlobalCheckpointToReplicationTracker() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + IndexShard indexShard = indexService.getShard(0); + indexShard.failShard("failing shard", null); + + ensureRed(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Retrying to allocate failed shards"); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + ensureGreen(INDEX_NAME); + } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java deleted file mode 100644 index 3fe7f3d553a1b..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.remotestore; - -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT; -import static org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteStoreBackpressureIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { - public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception { - // Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions - // fail leading to consecutive failure limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 10, ByteSizeUnit.KB.toIntBytes(1), 15, "failure_streak_count"); - } - - public void testWritesRejectedDueToBytesLagBreach() throws Exception { - // Initially indexing happens with doc size of 2 bytes, then all remote store interactions start failing. Now, the - // indexing happens with doc size of 1KB leading to bytes lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.BYTES.toIntBytes(2), 30, ByteSizeUnit.KB.toIntBytes(1), 15, "bytes_lag"); - } - - public void testWritesRejectedDueToTimeLagBreach() throws Exception { - // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the - // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); - } - - private void validateBackpressure( - int initialDocSize, - int initialDocsToIndex, - int onFailureDocSize, - int onFailureDocsToIndex, - String breachMode - ) throws Exception { - Path location = randomRepoPath().toAbsolutePath(); - String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); - - Settings request = Settings.builder() - .put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) - .put(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 10) - .build(); - ClusterUpdateSettingsResponse clusterUpdateResponse = client().admin() - .cluster() - .prepareUpdateSettings() - .setPersistentSettings(request) - .get(); - assertEquals(clusterUpdateResponse.getPersistentSettings().get(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey()), "true"); - assertEquals(clusterUpdateResponse.getPersistentSettings().get(MIN_CONSECUTIVE_FAILURES_LIMIT.getKey()), "10"); - - logger.info("--> Indexing data"); - - String jsonString = generateString(initialDocSize); - BytesReference initialSource = new BytesArray(jsonString); - indexDocAndRefresh(initialSource, initialDocsToIndex); - - ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) - .setRandomControlIOExceptionRate(1d); - - jsonString = generateString(onFailureDocSize); - BytesReference onFailureSource = new BytesArray(jsonString); - OpenSearchRejectedExecutionException ex = assertThrows( - OpenSearchRejectedExecutionException.class, - () -> indexDocAndRefresh(onFailureSource, onFailureDocsToIndex) - ); - assertTrue(ex.getMessage().contains("rejected execution on primary shard")); - assertTrue(ex.getMessage().contains(breachMode)); - - RemoteRefreshSegmentTracker.Stats stats = stats(); - assertTrue(stats.bytesLag > 0); - assertTrue(stats.refreshTimeLagMs > 0); - assertTrue(stats.localRefreshNumber - stats.remoteRefreshNumber > 0); - assertTrue(stats.rejectionCount > 0); - - ((MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName).repository(REPOSITORY_NAME)) - .setRandomControlIOExceptionRate(0d); - - assertBusy(() -> { - RemoteRefreshSegmentTracker.Stats finalStats = stats(); - assertEquals(0, finalStats.bytesLag); - assertEquals(0, finalStats.refreshTimeLagMs); - assertEquals(0, finalStats.localRefreshNumber - finalStats.remoteRefreshNumber); - }, 30, TimeUnit.SECONDS); - - long rejectionCount = stats.rejectionCount; - stats = stats(); - indexDocAndRefresh(initialSource, initialDocsToIndex); - assertEquals(rejectionCount, stats.rejectionCount); - deleteRepo(); - } - - private RemoteRefreshSegmentTracker.Stats stats() { - String shardId = "0"; - RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); - final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); - List<RemoteStoreStats> matches = Arrays.stream(response.getShards()) - .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) - .collect(Collectors.toList()); - assertEquals(1, matches.size()); - return matches.get(0).getStats(); - } - - private void indexDocAndRefresh(BytesReference source, int iterations) { - for (int i = 0; i < iterations; i++) { - client().prepareIndex(INDEX_NAME).setSource(source, XContentType.JSON).get(); - refresh(INDEX_NAME); - } - } - - /** - * Generates string of given sizeInBytes - * - * @param sizeInBytes size of the string - * @return the generated string - */ - private String generateString(int sizeInBytes) { - StringBuilder sb = new StringBuilder(); - sb.append("{"); - int i = 0; - // Based on local tests, 1 char is occupying 1 byte - while (sb.length() < sizeInBytes) { - String key = "field" + i; - String value = "value" + i; - sb.append("\"").append(key).append("\":\"").append(value).append("\","); - i++; - } - if (sb.length() > 1 && sb.charAt(sb.length() - 1) == ',') { - sb.setLength(sb.length() - 1); - } - sb.append("}"); - return sb.toString(); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 2887fbc56106c..3899c8a80f442 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -8,17 +8,39 @@ package org.opensearch.remotestore; -import org.junit.After; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; import java.io.IOException; import java.nio.file.FileVisitResult; @@ -26,18 +48,32 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.HashMap; import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; - protected static final String REPOSITORY_2_NAME = "test-remore-store-repo-2"; + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected static final int SHARD_COUNT = 1; - protected static final int REPLICA_COUNT = 1; - protected Path absolutePath; - protected Path absolutePath2; + protected static int REPLICA_COUNT = 1; + protected static final String TOTAL_OPERATIONS = "total-operations"; + protected static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; + protected static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; + protected static final String MAX_SEQ_NO_REFRESHED_OR_FLUSHED = "max-seq-no-refreshed-or-flushed"; + + protected Path segmentRepoPath; + protected Path translogRepoPath; + protected boolean clusterSettingsSuppliedByTest = false; private final List<String> documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -46,39 +82,115 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { randomAlphaOfLength(5) ); - @Override - protected boolean addMockInternalEngine() { - return false; + protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, String index) { + return indexData(numberOfIterations, invokeFlush, false, index); + } + + protected Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, String index) { + long totalOperations = 0; + long refreshedOrFlushedOperations = 0; + long maxSeqNo = -1; + long maxSeqNoRefreshedOrFlushed = -1; + int shardId = 0; + Map<String, Long> indexingStats = new HashMap<>(); + for (int i = 0; i < numberOfIterations; i++) { + if (invokeFlush) { + flushAndRefresh(index); + } else { + refresh(index); + } + + // skip indexing if last iteration as we dont want to have any data in remote translog + if (emptyTranslog && i == numberOfIterations - 1) { + continue; + } + maxSeqNoRefreshedOrFlushed = maxSeqNo; + indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED + "-shard-" + shardId, maxSeqNoRefreshedOrFlushed); + refreshedOrFlushedOperations = totalOperations; + int numberOfOperations = randomIntBetween(20, 50); + int numberOfBulk = randomIntBetween(1, 5); + for (int j = 0; j < numberOfBulk; j++) { + BulkResponse res = indexBulk(index, numberOfOperations); + for (BulkItemResponse singleResp : res.getItems()) { + indexingStats.put( + MAX_SEQ_NO_TOTAL + "-shard-" + singleResp.getResponse().getShardId().id(), + singleResp.getResponse().getSeqNo() + ); + maxSeqNo = singleResp.getResponse().getSeqNo(); + } + totalOperations += numberOfOperations; + } + } + + indexingStats.put(TOTAL_OPERATIONS, totalOperations); + indexingStats.put(REFRESHED_OR_FLUSHED_OPERATIONS, refreshedOrFlushedOperations); + indexingStats.put(MAX_SEQ_NO_TOTAL, maxSeqNo); + indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED, maxSeqNoRefreshedOrFlushed); + return indexingStats; } @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); + protected Settings nodeSettings(int nodeOrdinal) { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (clusterSettingsSuppliedByTest) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } + } + + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() + ); } public Settings indexSettings() { return defaultIndexSettings(); } - IndexResponse indexSingleDoc(String indexName) { - return client().prepareIndex(indexName) + protected IndexResponse indexSingleDoc(String indexName) { + return indexSingleDoc(indexName, false); + } + + protected IndexResponse indexSingleDoc(String indexName, boolean forceRefresh) { + IndexRequestBuilder indexRequestBuilder = client().prepareIndex(indexName) .setId(UUIDs.randomBase64UUID()) - .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) - .get(); + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)); + if (forceRefresh) { + indexRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + return indexRequestBuilder.get(); + } + + protected BulkResponse indexBulk(String indexName, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + final IndexRequest request = client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) + .request(); + bulkRequest.add(request); + } + return client().bulk(bulkRequest).actionGet(); } private Settings defaultIndexSettings() { - boolean sameRepoForRSSAndRTS = randomBoolean(); return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, sameRepoForRSSAndRTS ? REPOSITORY_NAME : REPOSITORY_2_NAME) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") @@ -98,36 +210,70 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { return remoteStoreIndexSettings(numberOfReplicas, 1); } - protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFieldLimit) { + protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFieldLimit, int refresh) { return Settings.builder() .put(remoteStoreIndexSettings(numberOfReplicas)) .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldLimit) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), String.valueOf(refresh)) .build(); } - protected void putRepository(Path path) { - putRepository(path, REPOSITORY_NAME); + @After + public void teardown() { + clusterSettingsSuppliedByTest = false; + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_2_NAME).get(); } - protected void putRepository(Path path, String repoName) { - assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder().put("location", path))); - } + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map<String, String> nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); - protected void setupRepo() { - internalCluster().startClusterManagerOnlyNode(); - absolutePath = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); + return new RepositoryMetadata(name, type, settings.build()); } - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); + public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { + RepositoriesMetadata repositories = internalCluster().getInstance(ClusterService.class, internalCluster().getNodeNames()[0]) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + RepositoryMetadata actualRepository = repositories.repository(repositoryName); + + final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + + for (String nodeName : internalCluster().getNodeNames()) { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); + DiscoveryNode node = clusterService.localNode(); + RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName); + + // Validated that all the restricted settings are entact on all the nodes. + repository.getRestrictedSystemRepositorySettings() + .stream() + .forEach( + setting -> assertEquals( + String.format(Locale.ROOT, "Restricted Settings mismatch [%s]", setting.getKey()), + setting.get(actualRepository.settings()), + setting.get(expectedRepository.settings()) + ) + ); + } } - public int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws Exception { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override @@ -145,4 +291,24 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { return filesExisting.get(); } + protected IndexShard getIndexShard(String dataNode, String indexName) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String uuid = getIndexResponse.getSettings().get(indexName).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(indexName, uuid)); + return indexService.getShard(0); + } + + protected void restore(boolean restoreAllShards, String... indices) { + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java new file mode 100644 index 0000000000000..3f90732f1f13d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -0,0 +1,472 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void addNewNodes(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().startNodes(dataNodeCount + clusterManagerNodeCount); + } + + private Map<String, Long> initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map<String, Long> indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().stopAllNodes(); + internalCluster().startClusterManagerOnlyNodes(clusterManagerNodeCount); + internalCluster().startDataOnlyNodes(dataNodeCount); + } + + protected void verifyRedIndicesAndTriggerRestore(Map<String, Long> indexStats, String indexName, boolean indexMoreDocs) + throws Exception { + ensureRed(indexName); + restore(false, indexName); + verifyRestoredData(indexStats, indexName, indexMoreDocs); + } + + public void testFullClusterRestore() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + + } + + /** + * This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts. + * This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk. + * After restart the local disk state will be read but should be again overridden with remote state. + * + * 1. Form a cluster and index few docs + * 2. Replace all nodes to remove all local disk state + * 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata + * 4. Restart the cluster manager node with correct seeding. + * 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID + * 6. The cluster manager will try to restore from remote again. + * 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise. + * 8. Add data nodes to recover index data + * 9. Verify Metadata and index data is restored. + */ + public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // index some data to generate files in remote directory + Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // stop all nodes + internalCluster().stopAllNodes(); + + // start a cluster manager node with no cluster manager seeding. + // This should fail with IllegalStateException as cluster manager fails to form without any initial seed + assertThrows( + IllegalStateException.class, + () -> internalCluster().startClusterManagerOnlyNodes( + clusterManagerNodeCount, + Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping + .build() + ) + ); + + // verify cluster manager not elected + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) + : "Disabling Cluster manager seeding failed. cluster uuid is not unknown"; + + // restart cluster manager with correct seed + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node + .build(); + } + }); + + // validate new cluster state formed + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + + // start data nodes to trigger index data recovery + internalCluster().startDataOnlyNodes(dataNodeCount); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } + + public void testFullClusterRestoreMultipleIndices() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String secondIndexName = INDEX_NAME + "-2"; + createIndex(secondIndexName, remoteStoreIndexSettings(replicaCount, shardCount + 1)); + Map<String, Long> indexStats2 = indexData(1, false, secondIndexName); + assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); + ensureGreen(secondIndexName); + updateIndexBlock(true, secondIndexName); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME, secondIndexName)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); + assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); + assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); + // Test is complete + + // Remove the block to ensure proper cleanup + updateIndexBlock(false, secondIndexName); + } + + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + String clusterName = clusterService().state().getClusterName().value(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + internalCluster().stopAllNodes(); + // Step - 3 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + // Test is complete + + // Starting a node without remote state to ensure test cleanup + internalCluster().startNode(Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build()); + } + + public void testRemoteStateFullRestart() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + + Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + // Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + + "/cluster-state/" + + prevClusterUUID + + "/manifest" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + internalCluster().fullRestart(); + ensureGreen(INDEX_NAME); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateCurrentMetadata(); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } + + private void validateMetadata(List<String> indexNames) { + assertEquals(clusterService().state().metadata().indices().size(), indexNames.size()); + for (String indexName : indexNames) { + assertTrue(clusterService().state().metadata().hasIndex(indexName)); + } + } + + private void validateCurrentMetadata() throws Exception { + RemoteClusterStateService remoteClusterStateService = internalCluster().getInstance( + RemoteClusterStateService.class, + internalCluster().getClusterManagerName() + ); + assertBusy(() -> { + ClusterMetadataManifest manifest; + try { + manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + } catch (IllegalStateException e) { + // AssertionError helps us use assertBusy and retry validation if failed due to a race condition. + throw new AssertionError("Error while validating latest cluster metadata", e); + } + ClusterState clusterState = getClusterState(); + Metadata currentMetadata = clusterState.metadata(); + assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); + assertEquals(currentMetadata.coordinationMetadata().term(), manifest.getClusterTerm()); + assertEquals(clusterState.version(), manifest.getStateVersion()); + assertEquals(clusterState.stateUUID(), manifest.getStateUUID()); + assertEquals(currentMetadata.clusterUUIDCommitted(), manifest.isClusterUUIDCommitted()); + for (UploadedIndexMetadata uploadedIndexMetadata : manifest.getIndices()) { + IndexMetadata currentIndexMetadata = currentMetadata.index(uploadedIndexMetadata.getIndexName()); + assertEquals(currentIndexMetadata.getIndex().getUUID(), uploadedIndexMetadata.getIndexUUID()); + } + }); + } + + public void testDataStreamPostRemoteStateRestore() throws Exception { + new DataStreamRolloverIT() { + protected boolean triggerRemoteStateRestore() { + return true; + } + }.testDataStreamRollover(); + } + + public void testFullClusterRestoreGlobalMetadata() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Create global metadata - register a custom repo + Path repoPath = registerCustomRepository(); + + // Create global metadata - persistent settings + updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); + + // Create global metadata - index template + putIndexTemplate(); + + // Create global metadata - Put cluster block + addClusterLevelReadOnlyBlock(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + + validateCurrentMetadata(); + assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); + assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + // Remote the cluster read only block to ensure proper cleanup + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(repoPath); + verifyRestoredIndexTemplate(); + } + + private Path registerCustomRepository() { + Path path = randomRepoPath(); + assertAcked( + client().admin() + .cluster() + .preparePutRepository("custom-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", path).put("compress", false)) + .get() + ); + return path; + } + + private void verifyRestoredRepositories(Path repoPath) { + RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); + assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); + assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + assertEquals( + Settings.builder().put("location", repoPath).put("compress", false).build(), + repositoriesMetadata.repository("custom-repo").settings() + ); + + // repo cleanup post verification + clusterAdmin().prepareDeleteRepository("custom-repo").get(); + } + + private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), true).build()); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + } + + private void updatePersistentSettings(Settings settings) throws ExecutionException, InterruptedException { + ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); + resetRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(resetRequest).get()); + } + + private void verifyRestoredIndexTemplate() { + Map<String, IndexTemplateMetadata> indexTemplateMetadataMap = clusterService().state().metadata().templates(); + assertEquals(1, indexTemplateMetadataMap.size()); + assertEquals(Arrays.asList("pattern-1", "log-*"), indexTemplateMetadataMap.get("my-template").patterns()); + assertEquals( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + .build(), + indexTemplateMetadataMap.get("my-template").settings() + ); + } + + private static void putIndexTemplate() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + private static void updateIndexBlock(boolean value, String secondIndexName) throws InterruptedException, ExecutionException { + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), value).build(), secondIndexName) + ) + .get() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index 4d5648c74ba5c..0bcde4b44c734 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -8,7 +8,6 @@ package org.opensearch.remotestore; -import org.junit.Before; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.PlainActionFuture; @@ -29,7 +28,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 3) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; @@ -41,11 +40,6 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -97,6 +91,7 @@ private void verifyRestoredData(Map<String, Long> indexStats, long deletedDocs) private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlush, boolean flushAfterMerge, long deletedDocs) throws IOException { + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -127,6 +122,7 @@ private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlus // Following integ tests use randomBoolean to control the number of integ tests. If we use the separate // values for each of the flags, number of integ tests become 16 in comparison to current 2. // We have run all the 16 tests on local and they run fine. + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9294") public void testRestoreForceMergeSingleIteration() throws IOException { boolean invokeFLush = randomBoolean(); boolean flushAfterMerge = randomBoolean(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 693c4113f8f3b..e1997fea3433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,501 +8,79 @@ package org.opensearch.remotestore; -import org.hamcrest.MatcherAssert; -import org.junit.Before; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.OpenSearchException; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; -import org.opensearch.index.shard.RemoteStoreRefreshListener; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.translog.Translog.Durability; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.oneOf; -import static org.hamcrest.Matchers.comparesEqualTo; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-store-test-idx-1"; - private static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; - private static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; - private static final String TOTAL_OPERATIONS = "total-operations"; - private static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; - private static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; - private static final String MAX_SEQ_NO_REFRESHED_OR_FLUSHED = "max-seq-no-refreshed-or-flushed"; + protected final String INDEX_NAME = "remote-store-test-idx-1"; @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); } - private Map<String, Long> indexData(int numberOfIterations, boolean invokeFlush, String index) { - long totalOperations = 0; - long refreshedOrFlushedOperations = 0; - long maxSeqNo = -1; - long maxSeqNoRefreshedOrFlushed = -1; - int shardId = 0; - Map<String, Long> indexingStats = new HashMap<>(); - for (int i = 0; i < numberOfIterations; i++) { - if (invokeFlush) { - flush(index); - } else { - refresh(index); - } - maxSeqNoRefreshedOrFlushed = maxSeqNo; - indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED + "-shard-" + shardId, maxSeqNoRefreshedOrFlushed); - refreshedOrFlushedOperations = totalOperations; - int numberOfOperations = randomIntBetween(20, 50); - for (int j = 0; j < numberOfOperations; j++) { - IndexResponse response = indexSingleDoc(index); - maxSeqNo = response.getSeqNo(); - shardId = response.getShardId().id(); - indexingStats.put(MAX_SEQ_NO_TOTAL + "-shard-" + shardId, maxSeqNo); - } - totalOperations += numberOfOperations; - } - - indexingStats.put(TOTAL_OPERATIONS, totalOperations); - indexingStats.put(REFRESHED_OR_FLUSHED_OPERATIONS, refreshedOrFlushedOperations); - indexingStats.put(MAX_SEQ_NO_TOTAL, maxSeqNo); - indexingStats.put(MAX_SEQ_NO_REFRESHED_OR_FLUSHED, maxSeqNoRefreshedOrFlushed); - return indexingStats; - } - - private void verifyRestoredData(Map<String, Long> indexStats, boolean checkTotal, String indexName) { - // This is required to get updated number from already active shards which were not restored - refresh(indexName); - String statsGranularity = checkTotal ? TOTAL_OPERATIONS : REFRESHED_OR_FLUSHED_OPERATIONS; - String maxSeqNoGranularity = checkTotal ? MAX_SEQ_NO_TOTAL : MAX_SEQ_NO_REFRESHED_OR_FLUSHED; - ensureYellowAndNoInitializingShards(indexName); - ensureGreen(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity)); - IndexResponse response = indexSingleDoc(indexName); - assertEquals(indexStats.get(maxSeqNoGranularity + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); - refresh(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity) + 1); - } - - private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); - internalCluster().startDataOnlyNodes(numDataOnlyNodes); - for (String index : indices.split(",")) { - createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); - ensureYellowAndNoInitializingShards(index); - ensureGreen(index); - } - } - - private void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - } - - private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long> indexStats) { - restore(INDEX_NAME); - ensureGreen(INDEX_NAME); - // This is required to get updated number from already active shards which were not restored - assertEquals(shardCount * (1 + replicaCount), getNumShards(INDEX_NAME).totalNumShards); - assertEquals(replicaCount, getNumShards(INDEX_NAME).numReplicas); - verifyRestoredData(indexStats, true, INDEX_NAME); - } - - /** - * Helper function to test restoring an index with no replication from remote store. Only primary node is dropped. - * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. - * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. - * @throws IOException IO Exception. - */ - private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException { - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); - ensureRed(INDEX_NAME); - - restoreAndVerify(shardCount, 0, indexStats); - } - - /** - * Helper function to test restoring an index having replicas from remote store when all the nodes housing the primary/replica drop. - * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. - * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. - * @throws IOException IO Exception. - */ - private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException { - prepareCluster(1, 2, INDEX_NAME, 1, shardCount); - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(INDEX_NAME))); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); - ensureRed(INDEX_NAME); - internalCluster().startDataOnlyNodes(2); - - restoreAndVerify(shardCount, 1, indexStats); - } - - /** - * Helper function to test restoring multiple indices from remote store when all the nodes housing the primary/replica drop. - * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. - * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. - * @throws IOException IO Exception. - */ - private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException { - prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); - String[] indices = INDEX_NAMES.split(","); - Map<String, Map<String, Long>> indicesStats = new HashMap<>(); - for (String index : indices) { - Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, index); - indicesStats.put(index, indexStats); - assertEquals(shardCount, getNumShards(index).totalNumShards); - } - - for (String index : indices) { - ClusterHealthStatus indexHealth = ensureRed(index); - if (ClusterHealthStatus.RED.equals(indexHealth)) { - continue; - } - - if (ClusterHealthStatus.GREEN.equals(indexHealth)) { - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); - } - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); - } - - ensureRed(indices); - internalCluster().startDataOnlyNodes(3); - - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(INDEX_NAMES_WILDCARD.split(",")).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - ensureGreen(indices); - for (String index : indices) { - assertEquals(shardCount, getNumShards(index).totalNumShards); - verifyRestoredData(indicesStats.get(index), true, index); - } - } - - public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { - int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); - indexData(randomIntBetween(2, 5), true, INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - - PlainActionFuture<RestoreRemoteStoreResponse> future = PlainActionFuture.newFuture(); - client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(true), future); - try { - future.get(); - } catch (ExecutionException e) { - // If the request goes to co-ordinator, e.getCause() can be RemoteTransportException - assertTrue(e.getCause() instanceof IllegalStateException || e.getCause().getCause() instanceof IllegalStateException); - } - } - - public void testRestoreFlowNoRedIndex() { - int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); - Map<String, Long> indexStats = indexData(randomIntBetween(2, 5), true, INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - - client().admin() - .cluster() - .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(false), PlainActionFuture.newFuture()); - - ensureGreen(INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); - verifyRestoredData(indexStats, true, INDEX_NAME); - } - - /** - * Simulates all data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") - public void testRemoteTranslogRestoreWithNoDataPostCommit() throws IOException { - testRestoreFlow(1, true, randomIntBetween(1, 5)); - } - - /** - * Simulates all data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws IOException { - testRestoreFlow(1, false, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - public void testRemoteTranslogRestoreWithRefreshedData() throws IOException { - testRestoreFlow(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - public void testRemoteTranslogRestoreWithCommittedData() throws IOException { - testRestoreFlow(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); - } - - /** - * Simulates all data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - // @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOException { - testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5)); - } - - /** - * Simulates all data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws IOException { - testRestoreFlowBothPrimaryReplicasDown(1, false, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws IOException { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws IOException { - testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store - * for multiple indices matching a wildcard name pattern. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataMultipleIndicesPatterns() throws IOException { - testRestoreFlowMultipleIndices(2, true, randomIntBetween(1, 5)); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store, - * with all remote-enabled red indices considered for the restore by default. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws IOException { - int shardCount = randomIntBetween(1, 5); - prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); - String[] indices = INDEX_NAMES.split(","); - Map<String, Map<String, Long>> indicesStats = new HashMap<>(); - for (String index : indices) { - Map<String, Long> indexStats = indexData(2, true, index); - indicesStats.put(index, indexStats); - assertEquals(shardCount, getNumShards(index).totalNumShards); - } - - for (String index : indices) { - if (ClusterHealthStatus.RED.equals(ensureRed(index))) { - continue; - } - - if (ClusterHealthStatus.GREEN.equals(ensureRed(index))) { - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); - } - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); - } - - ensureRed(indices); - internalCluster().startDataOnlyNodes(3); - - restore(indices); - ensureGreen(indices); - - for (String index : indices) { - assertEquals(shardCount, getNumShards(index).totalNumShards); - verifyRestoredData(indicesStats.get(index), true, index); - } - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store, - * with only some of the remote-enabled red indices requested for the restore. - * @throws IOException IO Exception. - */ - public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws IOException { - int shardCount = randomIntBetween(1, 5); - prepareCluster(1, 3, INDEX_NAMES, 0, shardCount); - String[] indices = INDEX_NAMES.split(","); - Map<String, Map<String, Long>> indicesStats = new HashMap<>(); - for (String index : indices) { - Map<String, Long> indexStats = indexData(2, true, index); - indicesStats.put(index, indexStats); - assertEquals(shardCount, getNumShards(index).totalNumShards); - } - - for (String index : indices) { - if (ClusterHealthStatus.RED.equals(ensureRed(index))) { - continue; - } - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); - } - - ensureRed(indices); - internalCluster().startDataOnlyNodes(3); - - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices[0], indices[1])); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices[0], indices[1]).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - ensureGreen(indices[0], indices[1]); - assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]); - assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[1]), true, indices[1]); - ensureRed(indices[2], indices[3]); - } - - /** - * Simulates refreshed data restored using Remote Segment Store - * and unrefreshed data restored using Remote Translog Store, - * with all remote-enabled red indices being considered for the restore - * except those matching the specified exclusion pattern. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOException { - int shardCount = randomIntBetween(1, 5); - prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); - String[] indices = INDEX_NAMES.split(","); - Map<String, Map<String, Long>> indicesStats = new HashMap<>(); - for (String index : indices) { - Map<String, Long> indexStats = indexData(2, true, index); - indicesStats.put(index, indexStats); - assertEquals(shardCount, getNumShards(index).totalNumShards); - } - - for (String index : indices) { - if (ClusterHealthStatus.RED.equals(ensureRed(index))) { - continue; - } - - if (ClusterHealthStatus.GREEN.equals(ensureRed(index))) { - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); - } - - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); - } - - ensureRed(indices); - internalCluster().startDataOnlyNodes(3); - - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices[0], indices[1])); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices("*", "-remote-store-test-index-*").restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - ensureGreen(indices[0], indices[1]); - assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]); - assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); - verifyRestoredData(indicesStats.get(indices[1]), true, indices[1]); - ensureRed(indices[2], indices[3]); - } - - /** - * Simulates no-op restore from remote store, - * when the index has no data. - * @throws IOException IO Exception. - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188") - public void testRTSRestoreNoData() throws IOException { - testRestoreFlow(0, true, randomIntBetween(1, 5)); - } - - // TODO: Restore flow - index aliases - private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -533,12 +111,8 @@ private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throw .filter(rs -> rs.getRecoverySource().getType() == RecoverySource.Type.PEER) .findFirst(); assertFalse(recoverySource.isEmpty()); - if (numberOfIterations == 1 && invokeFlush) { - // segments_N file is copied to new replica - assertEquals(1, recoverySource.get().getIndex().recoveredFileCount()); - } else { - assertEquals(0, recoverySource.get().getIndex().recoveredFileCount()); - } + // segments_N file is copied to new replica + assertEquals(1, recoverySource.get().getIndex().recoveredFileCount()); IndexResponse response = indexSingleDoc(INDEX_NAME); assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL) + 1, response.getSeqNo()); @@ -558,6 +132,16 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Excep testPeerRecovery(randomIntBetween(2, 5), true); } + public void testPeerRecoveryWithLowActivityTimeout() throws Exception { + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") + .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") + ); + internalCluster().client().admin().cluster().updateSettings(req).get(); + testPeerRecovery(randomIntBetween(2, 5), true); + } + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { testPeerRecovery(1, false); } @@ -567,7 +151,7 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exc } private void verifyRemoteStoreCleanup() throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); indexData(5, randomBoolean(), INDEX_NAME); @@ -576,7 +160,7 @@ private void verifyRemoteStoreCleanup() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID); assertTrue(getFileCount(indexPath) > 0); assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); // Delete is async. Give time for it @@ -587,14 +171,14 @@ private void verifyRemoteStoreCleanup() throws Exception { }, 30, TimeUnit.SECONDS); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9327") public void testRemoteTranslogCleanup() throws Exception { verifyRemoteStoreCleanup(); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8658") public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(3); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + String dataNode = internalCluster().startNode(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); String indexUUID = client().admin() @@ -602,24 +186,29 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); - if (numberOfIterations <= RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP) { - MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations, numberOfIterations + 1))); + if (numberOfIterations <= lastNMetadataFilesToKeep) { + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. - MatcherAssert.assertThat(actualFileCount, is(oneOf(10, 11))); + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); } }, 30, TimeUnit.SECONDS); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8658") public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(3); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + internalCluster().startNode(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); String indexUUID = client().admin() @@ -627,9 +216,590 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + int actualFileCount = getFileCount(indexPath); + // We also allow (numberOfIterations + 1) as index creation also triggers refresh. + MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + } + + public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { + Settings.Builder settings = Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + internalCluster().startNode(settings); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + int numberOfIterations = randomIntBetween(12, 18); + indexData(numberOfIterations, true, INDEX_NAME); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations, numberOfIterations + 1))); + MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); + } + + /** + * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster + * default. + */ + public void testDefaultBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); + + // Next, we change the default buffer interval and the same should reflect in the buffer interval of the index created + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests multiple cases where the index setting is passed during the index creation with multiple combinations + * with and without cluster default. + */ + public void testOverriddenBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + TimeValue bufferInterval = TimeValue.timeValueSeconds(randomIntBetween(0, 100)); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(bufferInterval, indexShard); + + // Set the cluster default with a different value, validate that the buffer interval is still the overridden value + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with a different value and validate that + // the buffer interval is updated + bufferInterval = TimeValue.timeValueSeconds(bufferInterval.seconds() + randomIntBetween(1, 100)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + ) + ) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with null and validate the buffer interval + // which will be the cluster default now. + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey()) + ) + ) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests validation which kicks in during index creation failing creation if the value is less than minimum allowed value. + */ + public void testOverriddenBufferIntervalValidation() { + internalCluster().startClusterManagerOnlyNode(); + TimeValue bufferInterval = TimeValue.timeValueSeconds(-1); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + IllegalArgumentException exceptionDuringCreateIndex = assertThrows( + IllegalArgumentException.class, + () -> createIndex(INDEX_NAME, indexSettings) + ); + assertEquals( + "failed to parse value [-1] for setting [index.remote_store.translog.buffer_interval], must be >= [0ms]", + exceptionDuringCreateIndex.getMessage() + ); + } + + /** + * This tests validation of the cluster setting when being set. + */ + public void testClusterBufferIntervalValidation() { + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(-1)) + ) + .get() + ); + assertEquals( + "failed to parse value [-1] for setting [cluster.remote_store.translog.buffer_interval], must be >= [0ms]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and request durability + testRestrictSettingFalse(true, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and async durability + testRestrictSettingFalse(true, Durability.ASYNC); + } + + public void testRequestDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and request durability + testRestrictSettingFalse(false, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and async durability + testRestrictSettingFalse(false, Durability.ASYNC); + } + + private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durability) throws ExecutionException, InterruptedException { + String clusterManagerName; + if (setRestrictFalse) { + clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() + ); + } else { + clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + } + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + .build(); + createIndex(INDEX_NAME, indexSettings); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + + durability = randomFrom(Durability.values()); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + ) + ) + .get(); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws ExecutionException, InterruptedException { + String expectedExceptionMsg = + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]"; + String clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() + ); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + // Case 1 - Test create index fails + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + + // Case 2 - Test update index fails + createIndex(INDEX_NAME); + IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); + assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); + exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings(new UpdateSettingsRequest(INDEX_NAME).settings(indexSettings)) + .actionGet() + ); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } + + private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + } + + private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { + assertEquals( + expectedBufferInterval, + ((BufferedAsyncIOProcessor<?>) indexShard.getTranslogSyncProcessor()).getBufferIntervalSupplier().get() + ); + } + + private void clearClusterBufferIntervalSetting(String clusterManagerName) { + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())) + .get(); + } + + public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List<String> dataNodes = internalCluster().startDataOnlyNodes(2); + + Path absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + + logger.info("--> Create index and ingest 50 docs"); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + String originalIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(originalIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, originalIndexUUID); + + ensureGreen(); + + logger.info("--> take a snapshot"); + client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices(INDEX_NAME).setWaitForCompletion(true).get(); + + logger.info("--> wipe all indices"); + cluster().wipeIndices(INDEX_NAME); + + logger.info("--> Create index with the same name, different UUID"); + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(TimeValue.timeValueSeconds(30), INDEX_NAME); + + String newIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(newIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, newIndexUUID); + assertNotEquals(newIndexUUID, originalIndexUUID); + + logger.info("--> close index"); + client().admin().indices().prepareClose(INDEX_NAME).get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + flushAndRefresh(INDEX_NAME); + + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode, INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode, INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + } + + public void testFallbackToNodeToNodeSegmentCopy() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List<String> dataNodes = internalCluster().startDataOnlyNodes(2); + + // 1. Create index with 0 replica + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + // 2. Index docs + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + // 3. Delete data from remote segment store + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + + try (Stream<Path> files = Files.list(segmentDataPath)) { + files.forEach(p -> { + try { + Files.delete(p); + } catch (IOException e) { + // Ignore + } + }); + } + + // 4. Start recovery by changing number of replicas to 1 + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // 5. Ensure green and verify number of docs + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoMultipleWriterDuringPrimaryRelocation() throws ExecutionException, InterruptedException { + // In this test, we trigger a force flush on existing primary while the primary mode on new primary has been + // activated. There was a bug in primary relocation of remote store enabled indexes where the new primary + // starts uploading translog and segments even before the cluster manager has started this shard. With this test, + // we check that we do not overwrite any file on remote store. Here we will also increase the replica count to + // check that there are no duplicate metadata files for translog or upload. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexBulk(INDEX_NAME, randomIntBetween(5, 10)); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch flushLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + flushLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + CountDownLatch flushDone = new CountDownLatch(1); + Thread flushThread = new Thread(() -> { + try { + flushLatch.await(2, TimeUnit.SECONDS); + oldPrimaryIndexShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + // newPrimaryTranslogRepo.setSleepSeconds(0); + } catch (IndexShardClosedException e) { + // this is fine + } catch (InterruptedException e) { + throw new AssertionError(e); + } finally { + flushDone.countDown(); + } + }); + flushThread.start(); + flushDone.await(5, TimeUnit.SECONDS); + flushThread.join(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ) + .get(); + + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(5)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + } + + public void testResumeUploadAfterFailedPrimaryRelocation() throws ExecutionException, InterruptedException, IOException { + // In this test, we fail the hand off during the primary relocation. This will undo the drainRefreshes and + // drainSync performed as part of relocation handoff (before performing the handoff transport action). + // We validate the same here by failing the peer recovery and ensuring we can index afterward as well. + + internalCluster().startClusterManagerOnlyNode(); + String oldPrimary = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + int docs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, docs); + flushAndRefresh(INDEX_NAME); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + String newPrimary = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + + IndexShard oldPrimaryIndexShard = getIndexShard(oldPrimary, INDEX_NAME); + CountDownLatch handOffLatch = new CountDownLatch(1); + + MockTransportService mockTargetTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + oldPrimary + )); + mockTargetTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT.equals(action)) { + handOffLatch.countDown(); + throw new OpenSearchException("failing recovery for test purposes"); + } + connection.sendRequest(requestId, action, request, options); + }); + + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + + handOffLatch.await(30, TimeUnit.SECONDS); + + assertTrue(oldPrimaryIndexShard.isStartedPrimary()); + assertEquals(oldPrimary, primaryNodeName(INDEX_NAME)); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs); + + SearchPhaseExecutionException ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + + int moreDocs = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, moreDocs); + flushAndRefresh(INDEX_NAME); + int uncommittedOps = randomIntBetween(5, 10); + indexBulk(INDEX_NAME, uncommittedOps); + assertHitCount(client(oldPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), docs + moreDocs); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + + restore(true, INDEX_NAME); + ensureGreen(INDEX_NAME); + assertHitCount( + client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); + + String newNode = internalCluster().startDataOnlyNodes(1).get(0); + ensureStableCluster(3); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, newPrimary, newNode)) + .execute() + .actionGet(); + + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForStatus(ClusterHealthStatus.GREEN) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(TimeValue.timeValueSeconds(10)) + .execute() + .actionGet(); + assertFalse(clusterHealthResponse.isTimedOut()); + + ex = assertThrows( + SearchPhaseExecutionException.class, + () -> client(newPrimary).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get() + ); + assertEquals("all shards failed", ex.getMessage()); + assertHitCount( + client(newNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), + docs + moreDocs + uncommittedOps + ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index 4005e6359a2f7..acdb21d072320 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -22,15 +22,19 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; +import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { public void testRemoteRefreshRetryOnFailure() throws Exception { - Path location = randomRepoPath().toAbsolutePath(); setup(location, randomDoubleBetween(0.1, 0.15, true), "metadata", 10L); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), false)) + .get(); // Here we are having flush/refresh after each iteration of indexing. However, the refresh will not always succeed // due to IOExceptions that are thrown while doing uploadBlobs. @@ -56,7 +60,7 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { logger.info("Local files = {}, Repo files = {}", sortedFilesInLocal, sortedFilesInRepo); assertTrue(filesInRepo.containsAll(filesInLocal)); }, 90, TimeUnit.SECONDS); - deleteRepo(); + cleanupRepo(); } public void testRemoteRefreshSegmentPressureSettingChanged() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java new file mode 100644 index 0000000000000..ef2dcf3217df6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -0,0 +1,184 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + public void testSingleNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNode(); + } + + public void testMultiNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterRepositoryRegistrationWithMultipleClusterManager() throws Exception { + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterActiveClusterManagerShutDown() throws Exception { + internalCluster().startNodes(3); + internalCluster().stopCurrentClusterManagerNode(); + ensureStableCluster(2); + } + + public void testMultiNodeClusterActiveMClusterManagerRestart() throws Exception { + internalCluster().startNodes(3); + String clusterManagerNodeName = internalCluster().getClusterManagerName(); + internalCluster().restartNode(clusterManagerNodeName); + ensureStableCluster(3); + } + + public void testMultiNodeClusterRandomNodeRestart() throws Exception { + internalCluster().startNodes(3); + internalCluster().restartRandomDataNode(); + ensureStableCluster(3); + } + + public void testMultiNodeClusterActiveClusterManagerRecoverNetworkIsolation() { + internalCluster().startClusterManagerOnlyNodes(3); + String dataNode = internalCluster().startNode(); + + NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(partition); + + partition.startDisrupting(); + ensureStableCluster(3, dataNode); + partition.stopDisrupting(); + + ensureStableCluster(4); + + internalCluster().clearDisruptionScheme(); + } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() { + Set<String> nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set<String> nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrictedSettingsUpdate() { + Set<String> nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set<String> nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + + final Client client = client(nodesInOneSide.iterator().next()); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startNodes(3); + + final Client client = client(); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + internalCluster().restartRandomDataNode(); + + ensureStableCluster(4); + } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java new file mode 100644 index 0000000000000..94acf2b1dbb27 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -0,0 +1,534 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.indices.IndicesService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.greaterThan; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { + + /** + * Simulates all data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { + testRestoreFlow(1, true, true, randomIntBetween(1, 5)); + } + + /** + * Simulates all data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { + testRestoreFlow(1, false, true, randomIntBetween(1, 5)); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { + testRestoreFlow(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRemoteTranslogRestoreWithCommittedData() throws Exception { + testRestoreFlow(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); + } + + /** + * Simulates all data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Exception { + testRestoreFlowBothPrimaryReplicasDown(1, true, true, randomIntBetween(1, 5)); + } + + /** + * Simulates all data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Exception { + testRestoreFlowBothPrimaryReplicasDown(1, false, true, randomIntBetween(1, 5)); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception { + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, false, randomIntBetween(1, 5)); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store. + * @throws IOException IO Exception. + */ + public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws Exception { + testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, false, randomIntBetween(1, 5)); + } + + private void restoreAndVerify(int shardCount, int replicaCount, Map<String, Long> indexStats) throws Exception { + restore(INDEX_NAME); + ensureGreen(INDEX_NAME); + // This is required to get updated number from already active shards which were not restored + assertEquals(shardCount * (1 + replicaCount), getNumShards(INDEX_NAME).totalNumShards); + assertEquals(replicaCount, getNumShards(INDEX_NAME).numReplicas); + verifyRestoredData(indexStats, INDEX_NAME); + } + + /** + * Helper function to test restoring an index with no replication from remote store. Only primary node is dropped. + * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. + * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. + * @throws IOException IO Exception. + */ + private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) throws Exception { + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + + assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), indexStats.get(REFRESHED_OR_FLUSHED_OPERATIONS)); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureRed(INDEX_NAME); + + restoreAndVerify(shardCount, 0, indexStats); + } + + public void testMultipleWriters() throws Exception { + prepareCluster(1, 2, INDEX_NAME, 1, 1); + Map<String, Long> indexStats = indexData(randomIntBetween(2, 5), true, true, INDEX_NAME); + assertEquals(2, getNumShards(INDEX_NAME).totalNumShards); + + // ensure replica has latest checkpoint + flushAndRefresh(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + + Index indexObj = clusterService().state().metadata().indices().get(INDEX_NAME).getIndex(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNodeName(INDEX_NAME)); + IndexService indexService = indicesService.indexService(indexObj); + IndexShard indexShard = indexService.getShard(0); + RemoteSegmentMetadata remoteSegmentMetadataBeforeFailover = indexShard.getRemoteDirectory().readLatestMetadataFile(); + + // ensure all segments synced to replica + assertBusy( + () -> assertHitCount( + client(primaryNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + assertBusy( + () -> assertHitCount( + client(replicaNodeName(INDEX_NAME)).prepareSearch(INDEX_NAME).setSize(0).get(), + indexStats.get(TOTAL_OPERATIONS) + ), + 30, + TimeUnit.SECONDS + ); + + String newPrimaryNodeName = replicaNodeName(INDEX_NAME); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureYellow(INDEX_NAME); + + indicesService = internalCluster().getInstance(IndicesService.class, newPrimaryNodeName); + indexService = indicesService.indexService(indexObj); + indexShard = indexService.getShard(0); + IndexShard finalIndexShard = indexShard; + assertBusy(() -> assertTrue(finalIndexShard.isStartedPrimary() && finalIndexShard.isPrimaryMode())); + assertEquals( + finalIndexShard.getLatestSegmentInfosAndCheckpoint().v2().getPrimaryTerm(), + remoteSegmentMetadataBeforeFailover.getPrimaryTerm() + 1 + ); + } + + /** + * Helper function to test restoring an index having replicas from remote store when all the nodes housing the primary/replica drop. + * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. + * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. + * @throws IOException IO Exception. + */ + private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, boolean emptyTranslog, int shardCount) + throws Exception { + prepareCluster(1, 2, INDEX_NAME, 1, shardCount); + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, emptyTranslog, INDEX_NAME); + assertEquals(shardCount * 2, getNumShards(INDEX_NAME).totalNumShards); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(INDEX_NAME))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureRed(INDEX_NAME); + internalCluster().startDataOnlyNodes(2); + + restoreAndVerify(shardCount, 1, indexStats); + } + + /** + * Helper function to test restoring multiple indices from remote store when all the nodes housing the primary/replica drop. + * @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data. + * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. + * @throws IOException IO Exception. + */ + private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { + prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); + String[] indices = INDEX_NAMES.split(","); + Map<String, Map<String, Long>> indicesStats = new HashMap<>(); + for (String index : indices) { + Map<String, Long> indexStats = indexData(numberOfIterations, invokeFlush, index); + indicesStats.put(index, indexStats); + assertEquals(shardCount * 2, getNumShards(index).totalNumShards); + } + + for (String index : indices) { + ClusterHealthStatus indexHealth = ensureRed(index); + if (ClusterHealthStatus.RED.equals(indexHealth)) { + continue; + } + + if (ClusterHealthStatus.GREEN.equals(indexHealth)) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); + } + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); + } + + ensureRed(indices); + internalCluster().startDataOnlyNodes(3); + + boolean restoreAllShards = randomBoolean(); + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(INDEX_NAMES_WILDCARD.split(",")).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + ensureGreen(indices); + for (String index : indices) { + assertEquals(shardCount * 2, getNumShards(index).totalNumShards); + verifyRestoredData(indicesStats.get(index), index); + } + } + + public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { + int shardCount = randomIntBetween(1, 5); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); + indexData(randomIntBetween(2, 5), true, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + + PlainActionFuture<RestoreRemoteStoreResponse> future = PlainActionFuture.newFuture(); + client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(true), future); + try { + future.get(); + } catch (ExecutionException e) { + // If the request goes to co-ordinator, e.getCause() can be RemoteTransportException + assertTrue(e.getCause() instanceof IllegalStateException || e.getCause().getCause() instanceof IllegalStateException); + } + } + + public void testRestoreFlowNoRedIndex() throws Exception { + int shardCount = randomIntBetween(1, 5); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); + Map<String, Long> indexStats = indexData(randomIntBetween(2, 5), true, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + verifyRestoredData(indexStats, INDEX_NAME); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store + * for multiple indices matching a wildcard name pattern. + * @throws IOException IO Exception. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") + public void testRTSRestoreWithCommittedDataMultipleIndicesPatterns() throws Exception { + testRestoreFlowMultipleIndices(2, true, randomIntBetween(1, 5)); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store, + * with all remote-enabled red indices considered for the restore by default. + * @throws IOException IO Exception. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") + public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws Exception { + int shardCount = randomIntBetween(1, 5); + prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); + String[] indices = INDEX_NAMES.split(","); + Map<String, Map<String, Long>> indicesStats = new HashMap<>(); + for (String index : indices) { + Map<String, Long> indexStats = indexData(2, true, index); + indicesStats.put(index, indexStats); + assertEquals(shardCount, getNumShards(index).totalNumShards); + } + + for (String index : indices) { + if (ClusterHealthStatus.RED.equals(ensureRed(index))) { + continue; + } + + if (ClusterHealthStatus.GREEN.equals(ensureRed(index))) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); + } + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); + } + + ensureRed(indices); + internalCluster().startDataOnlyNodes(3); + + restore(indices); + ensureGreen(indices); + + for (String index : indices) { + assertEquals(shardCount, getNumShards(index).totalNumShards); + verifyRestoredData(indicesStats.get(index), index); + } + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store, + * with only some of the remote-enabled red indices requested for the restore. + * @throws IOException IO Exception. + */ + public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws Exception { + int shardCount = randomIntBetween(1, 5); + prepareCluster(1, 3, INDEX_NAMES, 0, shardCount); + String[] indices = INDEX_NAMES.split(","); + Map<String, Map<String, Long>> indicesStats = new HashMap<>(); + for (String index : indices) { + Map<String, Long> indexStats = indexData(2, true, index); + indicesStats.put(index, indexStats); + assertEquals(shardCount, getNumShards(index).totalNumShards); + } + + for (String index : indices) { + if (ClusterHealthStatus.RED.equals(ensureRed(index))) { + continue; + } + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); + } + + ensureRed(indices); + internalCluster().startDataOnlyNodes(3); + + boolean restoreAllShards = randomBoolean(); + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices[0], indices[1])); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices[0], indices[1]).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + ensureGreen(indices[0], indices[1]); + assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); + verifyRestoredData(indicesStats.get(indices[0]), indices[0]); + assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); + verifyRestoredData(indicesStats.get(indices[1]), indices[1]); + ensureRed(indices[2], indices[3]); + } + + /** + * Simulates refreshed data restored using Remote Segment Store + * and unrefreshed data restored using Remote Translog Store, + * with all remote-enabled red indices being considered for the restore + * except those matching the specified exclusion pattern. + * @throws IOException IO Exception. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") + public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws Exception { + int shardCount = randomIntBetween(1, 5); + prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); + String[] indices = INDEX_NAMES.split(","); + Map<String, Map<String, Long>> indicesStats = new HashMap<>(); + for (String index : indices) { + Map<String, Long> indexStats = indexData(2, true, index); + indicesStats.put(index, indexStats); + assertEquals(shardCount, getNumShards(index).totalNumShards); + } + + for (String index : indices) { + if (ClusterHealthStatus.RED.equals(ensureRed(index))) { + continue; + } + + if (ClusterHealthStatus.GREEN.equals(ensureRed(index))) { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(index))); + } + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(index))); + } + + ensureRed(indices); + internalCluster().startDataOnlyNodes(3); + + boolean restoreAllShards = randomBoolean(); + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices[0], indices[1])); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices("*", "-remote-store-test-index-*").restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + ensureGreen(indices[0], indices[1]); + assertEquals(shardCount, getNumShards(indices[0]).totalNumShards); + verifyRestoredData(indicesStats.get(indices[0]), indices[0]); + assertEquals(shardCount, getNumShards(indices[1]).totalNumShards); + verifyRestoredData(indicesStats.get(indices[1]), indices[1]); + ensureRed(indices[2], indices[3]); + } + + /** + * Simulates no-op restore from remote store, + * when the index has no data. + * @throws IOException IO Exception. + */ + public void testRTSRestoreDataOnlyInTranslog() throws Exception { + testRestoreFlow(0, true, false, randomIntBetween(1, 5)); + } + + public void testRateLimitedRemoteDownloads() throws Exception { + clusterSettingsSuppliedByTest = true; + int shardCount = randomIntBetween(1, 3); + Path segmentRepoPath = randomRepoPath(); + Path tlogRepoPath = randomRepoPath(); + prepareCluster( + 1, + 3, + INDEX_NAME, + 0, + shardCount, + buildRemoteStoreNodeAttributes(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, tlogRepoPath, true) + ); + + // validate inplace repository metadata update + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + DiscoveryNode node = clusterService.localNode(); + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); + + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); + + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertEquals("4096b", segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } + + Map<String, Long> indexStats = indexData(5, false, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureRed(INDEX_NAME); + restore(INDEX_NAME); + assertBusy(() -> { + long downloadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + downloadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteDownloadThrottleTimeInNanos(); + } + assertThat(downloadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(3, 5)).nanos())); + }, 30, TimeUnit.SECONDS); + // Waiting for extended period for green state so that rate limit does not cause flakiness + ensureGreen(TimeValue.timeValueSeconds(120), INDEX_NAME); + // This is required to get updated number from already active shards which were not restored + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + assertEquals(0, getNumShards(INDEX_NAME).numReplicas); + verifyRestoredData(indexStats, INDEX_NAME); + + // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown + // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 + settings.remove("max_remote_download_bytes_per_sec"); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) + .setType(ReloadableFsRepository.TYPE) + .setSettings(settings) + .get() + ); + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } + } + + // TODO: Restore flow - index aliases +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java new file mode 100644 index 0000000000000..6e796bdae5a4a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.translog.RemoteTranslogStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreStatsFromNodesStatsIT extends RemoteStoreBaseIntegTestCase { + private static final String INDEX_NAME = "remote-index-1"; + private static final int DATA_NODE_COUNT = 2; + private static final int CLUSTER_MANAGER_NODE_COUNT = 3; + + @Before + public void setup() { + setupCustomCluster(); + } + + private void setupCustomCluster() { + internalCluster().startClusterManagerOnlyNodes(CLUSTER_MANAGER_NODE_COUNT); + internalCluster().startDataOnlyNodes(DATA_NODE_COUNT); + ensureStableCluster(DATA_NODE_COUNT + CLUSTER_MANAGER_NODE_COUNT); + } + + /** + * - Creates two indices with single primary shard, pinned to a single node. + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output, repeats this for the 2nd index + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + */ + public void testNodesStatsParityWithOnlyPrimaryShards() { + String[] dataNodes = internalCluster().getDataNodeNames().toArray(String[]::new); + String randomDataNode = dataNodes[randomIntBetween(0, dataNodes.length - 1)]; + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + // Create first index + createIndex( + firstIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex( + secondIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertNodeStatsParityOnNode(randomDataNode, firstIndex, secondIndex); + } + + /** + * - Creates two indices with single primary shard and single replica + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output for both indices + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + * - Repeats the above 3 steps for the second node + */ + public void testNodesStatsParityWithReplicaShards() throws Exception { + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + createIndex(firstIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex(secondIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertBusy(() -> assertNodeStatsParityAcrossNodes(firstIndex, secondIndex), 15, TimeUnit.SECONDS); + } + + /** + * Ensures that node stats shows 0 values for dedicated cluster manager nodes + * since cluster manager nodes does not participate in indexing + */ + public void testZeroRemoteStatsOnNodesStatsForClusterManager() { + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + refresh(INDEX_NAME); + + NodesStatsResponse nodesStatsResponseForClusterManager = client().admin() + .cluster() + .prepareNodesStats(internalCluster().getClusterManagerName()) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue( + nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isClusterManagerNode() + && !nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isDataNode() + ); + assertZeroRemoteSegmentStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats() + ); + assertZeroRemoteTranslogStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getTranslog().getRemoteTranslogStats() + ); + + NodesStatsResponse nodesStatsResponseForDataNode = client().admin() + .cluster() + .prepareNodesStats(primaryNodeName(INDEX_NAME)) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue(nodesStatsResponseForDataNode.getNodes().get(0).getNode().isDataNode()); + RemoteSegmentStats remoteSegmentStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + + RemoteTranslogStats remoteTranslogStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertTrue(remoteTranslogStats.getUploadBytesStarted() > 0); + assertTrue(remoteTranslogStats.getUploadBytesSucceeded() > 0); + } + + private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); + } + + private static void assertNodeStatsParityAcrossNodes(String... indices) { + for (String dataNode : internalCluster().getDataNodeNames()) { + assertNodeStatsParityOnNode(dataNode, indices); + } + } + + private static void assertNodeStatsParityOnNode(String dataNode, String... indices) { + RemoteSegmentStats remoteSegmentStatsCumulative = new RemoteSegmentStats(); + RemoteTranslogStats remoteTranslogStatsCumulative = new RemoteTranslogStats(); + for (String index : indices) { + // Fetch _remotestore/stats + RemoteStoreStatsResponse remoteStoreStats = client(dataNode).admin() + .cluster() + .prepareRemoteStoreStats(index, "0") + .setLocal(true) + .get(); + remoteSegmentStatsCumulative.add(new RemoteSegmentStats(remoteStoreStats.getRemoteStoreStats()[0].getSegmentStats())); + remoteTranslogStatsCumulative.add(new RemoteTranslogStats(remoteStoreStats.getRemoteStoreStats()[0].getTranslogStats())); + } + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + // assert segment stats + RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertEquals(remoteSegmentStatsCumulative, remoteSegmentStatsFromNodesStats); + // Ensure that total upload time has non-zero value if there has been segments uploaded from the node + if (remoteSegmentStatsCumulative.getUploadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalUploadTime() > 0); + } + // Ensure that total download time has non-zero value if there has been segments downloaded to the node + if (remoteSegmentStatsCumulative.getDownloadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalDownloadTime() > 0); + } + + // assert translog stats + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStatsCumulative, remoteTranslogStatsFromNodesStats); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 76ef153fab963..4a0af206b9d89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -8,31 +8,61 @@ package org.opensearch.remotestore; -import org.junit.Before; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.junit.annotations.TestLogging; +import org.opensearch.test.transport.MockTransportService; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; - @Before + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void setup() { - setupRepo(); + internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -50,18 +80,58 @@ public void testStatsResponseFromAllNodes() { for (String node : nodes) { RemoteStoreStatsResponse response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); assertTrue(response.getSuccessfulShards() > 0); - assertTrue(response.getShards() != null && response.getShards().length != 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); - List<RemoteStoreStats> matches = Arrays.stream(response.getShards()) - .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) + List<RemoteStoreStats> matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) .collect(Collectors.toList()); assertEquals(1, matches.size()); - RemoteRefreshSegmentTracker.Stats stats = matches.get(0).getStats(); - assertResponseStats(stats); + + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteTranslogTransferTracker.Stats translogStats = matches.get(0).getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } + + // Step 3 - Enable replicas on the existing indices and ensure that download + // stats are being populated as well + changeReplicaCountAndEnsureGreen(1); + for (String node : nodes) { + RemoteStoreStatsResponse response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); + assertTrue(response.getSuccessfulShards() > 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); + List<RemoteStoreStats> matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(2, matches.size()); + for (RemoteStoreStats stat : matches) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + RemoteSegmentTransferTracker.Stats segmentStats = stat.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = stat.getTranslogStats(); + if (routing.primary()) { + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } else { + validateSegmentDownloadStats(segmentStats); + assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } + } } } public void testStatsResponseAllShards() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -79,13 +149,47 @@ public void testStatsResponseAllShards() { .cluster() .prepareRemoteStoreStats(INDEX_NAME, null); RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); - assertTrue(response.getSuccessfulShards() == 3); - assertTrue(response.getShards() != null && response.getShards().length == 3); - RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); - assertResponseStats(stats); + assertEquals(3, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 3); + + RemoteSegmentTransferTracker.Stats segmentStats = response.getRemoteStoreStats()[0].getSegmentStats(); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteTranslogTransferTracker.Stats translogStats = response.getRemoteStoreStats()[0].getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + + // Step 3 - Enable replicas on the existing indices and ensure that download + // stats are being populated as well + changeReplicaCountAndEnsureGreen(1); + response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, null).get(); + assertEquals(6, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 6); + for (RemoteStoreStats stat : response.getRemoteStoreStats()) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + segmentStats = stat.getSegmentStats(); + translogStats = stat.getTranslogStats(); + if (routing.primary()) { + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } else { + validateSegmentDownloadStats(segmentStats); + assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } + } + } public void testStatsResponseFromLocalNode() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -105,29 +209,521 @@ public void testStatsResponseFromLocalNode() { .prepareRemoteStoreStats(INDEX_NAME, null); remoteStoreStatsRequestBuilder.setLocal(true); RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); - assertTrue(response.getSuccessfulShards() == 1); - assertTrue(response.getShards() != null && response.getShards().length == 1); - RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); - assertResponseStats(stats); + assertEquals(1, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 1); + RemoteSegmentTransferTracker.Stats segmentStats = response.getRemoteStoreStats()[0].getSegmentStats(); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteTranslogTransferTracker.Stats translogStats = response.getRemoteStoreStats()[0].getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } + changeReplicaCountAndEnsureGreen(1); + for (String node : nodes) { + RemoteStoreStatsRequestBuilder remoteStoreStatsRequestBuilder = client(node).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, null); + remoteStoreStatsRequestBuilder.setLocal(true); + RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); + assertTrue(response.getSuccessfulShards() > 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); + for (RemoteStoreStats stat : response.getRemoteStoreStats()) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + RemoteSegmentTransferTracker.Stats segmentStats = stat.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = stat.getTranslogStats(); + if (routing.primary()) { + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } else { + validateSegmentDownloadStats(segmentStats); + assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); + } + } + } + } + + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + setup(); + // Scenario: + // - Create index with single primary and single replica shard + // - Disable Refresh Interval for the index + // - Index documents + // - Trigger refresh and flush + // - Assert that download stats == upload stats + // - Repeat this step for random times (between 5 and 10) + + // Create index with 1 pri and 1 replica and refresh interval disabled + createIndex( + INDEX_NAME, + Settings.builder().put(remoteStoreIndexSettings(1, 1)).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build() + ); + ensureGreen(INDEX_NAME); + + // Manually invoke a refresh + refresh(INDEX_NAME); + + // Get zero state values + // Extract and assert zero state primary stats + RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getSegmentStats(); + logger.info( + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + zeroStatePrimaryStats.refreshTimeLagMs, + zeroStatePrimaryStats.bytesLag, + zeroStatePrimaryStats.uploadBytesStarted, + zeroStatePrimaryStats.uploadBytesFailed + ); + assertTrue( + zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded + && zeroStatePrimaryStats.totalUploadsSucceeded == 1 + ); + assertTrue( + zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded + && zeroStatePrimaryStats.uploadBytesSucceeded > 0 + ); + assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + + // Extract and assert zero state replica stats + RemoteSegmentTransferTracker.Stats zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getSegmentStats(); + assertTrue( + zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + + // Index documents + for (int i = 1; i <= randomIntBetween(5, 10); i++) { + indexSingleDoc(INDEX_NAME); + // Running Flush & Refresh manually + flushAndRefresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + + // Poll for RemoteStore Stats + assertBusy(() -> { + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + // Iterate through the response and extract the relevant segment upload and download stats + List<RemoteStoreStats> primaryStatsList = Arrays.stream(response.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + assertEquals(1, primaryStatsList.size()); + List<RemoteStoreStats> replicaStatsList = Arrays.stream(response.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + assertEquals(1, replicaStatsList.size()); + RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.get(0).getSegmentStats(); + RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.get(0).getSegmentStats(); + // Assert Upload syncs - zero state uploads == download syncs + assertTrue(primaryStats.totalUploadsStarted > 0); + assertTrue(primaryStats.totalUploadsSucceeded > 0); + assertTrue( + replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 + && primaryStats.uploadBytesStarted + - zeroStatePrimaryStats.uploadBytesStarted >= replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertTrue( + replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 + && primaryStats.uploadBytesSucceeded + - zeroStatePrimaryStats.uploadBytesSucceeded >= replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + // Assert zero failures + assertEquals(0, primaryStats.uploadBytesFailed); + assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesFailed); + }, 60, TimeUnit.SECONDS); + } + } + + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") + public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + setup(); + // Scenario: + // - Create index with single primary and N-1 replica shards (N = no of data nodes) + // - Disable Refresh Interval for the index + // - Index documents + // - Trigger refresh and flush + // - Assert that download stats == upload stats + // - Repeat this step for random times (between 5 and 10) + + // Create index + int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(remoteStoreIndexSettings(dataNodeCount - 1, 1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .build() + ); + ensureGreen(INDEX_NAME); + + // Manually invoke a refresh + refresh(INDEX_NAME); + + // Get zero state values + // Extract and assert zero state primary stats + RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getSegmentStats(); + logger.info( + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + zeroStatePrimaryStats.refreshTimeLagMs, + zeroStatePrimaryStats.bytesLag, + zeroStatePrimaryStats.uploadBytesStarted, + zeroStatePrimaryStats.uploadBytesFailed + ); + assertTrue( + zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded + && zeroStatePrimaryStats.totalUploadsSucceeded == 1 + ); + assertTrue( + zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded + && zeroStatePrimaryStats.uploadBytesSucceeded > 0 + ); + assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + + // Extract and assert zero state replica stats + List<RemoteStoreStats> zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + zeroStateReplicaStats.forEach(stats -> { + assertTrue( + stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && stats.getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + }); + + int currentNodesInCluster = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + for (int i = 0; i < randomIntBetween(5, 10); i++) { + indexSingleDoc(INDEX_NAME); + // Running Flush & Refresh manually + flushAndRefresh(INDEX_NAME); + + assertBusy(() -> { + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + assertEquals(currentNodesInCluster, response.getSuccessfulShards()); + long uploadsStarted = 0, uploadsSucceeded = 0, uploadsFailed = 0; + long uploadBytesStarted = 0, uploadBytesSucceeded = 0, uploadBytesFailed = 0; + List<Long> downloadBytesStarted = new ArrayList<>(), downloadBytesSucceeded = new ArrayList<>(), downloadBytesFailed = + new ArrayList<>(); + + // Assert that stats for primary shard and replica shard set are equal + for (RemoteStoreStats eachStatsObject : response.getRemoteStoreStats()) { + RemoteSegmentTransferTracker.Stats stats = eachStatsObject.getSegmentStats(); + if (eachStatsObject.getShardRouting().primary()) { + uploadBytesStarted = stats.uploadBytesStarted; + uploadBytesSucceeded = stats.uploadBytesSucceeded; + uploadBytesFailed = stats.uploadBytesFailed; + } else { + downloadBytesStarted.add(stats.directoryFileTransferTrackerStats.transferredBytesStarted); + downloadBytesSucceeded.add(stats.directoryFileTransferTrackerStats.transferredBytesSucceeded); + downloadBytesFailed.add(stats.directoryFileTransferTrackerStats.transferredBytesFailed); + } + } + + assertEquals(0, uploadsFailed); + assertEquals(0, uploadBytesFailed); + for (int j = 0; j < response.getSuccessfulShards() - 1; j++) { + assertTrue(uploadBytesStarted - zeroStatePrimaryStats.uploadBytesStarted > downloadBytesStarted.get(j)); + assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.uploadBytesSucceeded > downloadBytesSucceeded.get(j)); + assertEquals(0, (long) downloadBytesFailed.get(j)); + } + }, 60, TimeUnit.SECONDS); + } + } + + public void testStatsOnShardRelocation() { + setup(); + // Scenario: + // - Create index with single primary and single replica shard + // - Index documents + // - Reroute replica shard to one of the remaining nodes + // - Assert that remote store stats reflects the new node ID + + // Create index + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + // Index docs + indexDocs(); + + // Fetch current set of nodes in the cluster + List<String> currentNodesInCluster = getClusterState().nodes() + .getDataNodes() + .values() + .stream() + .map(DiscoveryNode::getId) + .collect(Collectors.toList()); + DiscoveryNode[] discoveryNodesForIndex = client().admin().cluster().prepareSearchShards(INDEX_NAME).get().getNodes(); + + // Fetch nodes with shard copies of the created index + List<String> nodeIdsWithShardCopies = new ArrayList<>(); + Arrays.stream(discoveryNodesForIndex).forEach(eachNode -> nodeIdsWithShardCopies.add(eachNode.getId())); + + // Fetch nodes which does not have any copies of the index + List<String> nodeIdsWithoutShardCopy = currentNodesInCluster.stream() + .filter(eachNode -> !nodeIdsWithShardCopies.contains(eachNode)) + .collect(Collectors.toList()); + assertEquals(1, nodeIdsWithoutShardCopy.size()); + + // Manually reroute shard to a node which does not have any shard copy at present + ShardRouting replicaShardRouting = getClusterState().routingTable() + .index(INDEX_NAME) + .shard(0) + .assignedShards() + .stream() + .filter(shard -> !shard.primary()) + .collect(Collectors.toList()) + .get(0); + String sourceNode = replicaShardRouting.currentNodeId(); + String destinationNode = nodeIdsWithoutShardCopy.get(0); + relocateShard(0, sourceNode, destinationNode); + RemoteStoreStats[] allShardsStats = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get().getRemoteStoreStats(); + RemoteStoreStats replicaShardStat = Arrays.stream(allShardsStats) + .filter(eachStat -> !eachStat.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0); + + // Assert that remote store stats reflect the new shard state + assertEquals(ShardRoutingState.STARTED, replicaShardStat.getShardRouting().state()); + assertEquals(destinationNode, replicaShardStat.getShardRouting().currentNodeId()); + } + + public void testStatsOnShardUnassigned() throws IOException { + setup(); + // Scenario: + // - Create index with single primary and two replica shard + // - Index documents + // - Stop one data node + // - Assert: + // a. Total shard Count in the response object is equal to the previous node count + // b. Successful shard count in the response object is equal to the new node count + createIndex(INDEX_NAME, remoteStoreIndexSettings(2, 1)); + ensureGreen(INDEX_NAME); + indexDocs(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().get(); + int dataNodeCountBeforeStop = clusterHealthResponse.getNumberOfDataNodes(); + int nodeCount = clusterHealthResponse.getNumberOfNodes(); + String nodeToBeStopped = randomBoolean() ? primaryNodeName(INDEX_NAME) : replicaNodeName(INDEX_NAME); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeToBeStopped)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureStableCluster(nodeCount - 1); + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + int dataNodeCountAfterStop = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + assertEquals(dataNodeCountBeforeStop, response.getTotalShards()); + assertEquals(dataNodeCountAfterStop, response.getSuccessfulShards()); + // Indexing docs to ensure that the primary has started + indexSingleDoc(INDEX_NAME); + } + + public void testStatsOnRemoteStoreRestore() throws IOException { + setup(); + // Creating an index with primary shard count == total nodes in cluster and 0 replicas + int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); + ensureGreen(INDEX_NAME); + + // Index some docs to ensure segments being uploaded to remote store + indexDocs(); + refresh(INDEX_NAME); + + // Stop one data node to force the index into a red state + internalCluster().stopRandomDataNode(); + ensureRed(INDEX_NAME); + + // Start another data node to fulfil the previously launched capacity + internalCluster().startDataOnlyNode(); + + // Restore index from remote store + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(true), PlainActionFuture.newFuture()); + + // Ensure that the index is green + ensureGreen(INDEX_NAME); + + // Index some more docs to force segment uploads to remote store + indexDocs(); + + RemoteStoreStatsResponse remoteStoreStatsResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + Arrays.stream(remoteStoreStatsResponse.getRemoteStoreStats()).forEach(statObject -> { + RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + // Assert that we have both upload and download stats for the index + assertTrue( + segmentStats.totalUploadsStarted > 0 && segmentStats.totalUploadsSucceeded > 0 && segmentStats.totalUploadsFailed == 0 + ); + assertTrue( + segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 + && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 + ); + + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertNonZeroTranslogDownloadStats(translogStats); + }); + } + + public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + setup(); + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + refresh(INDEX_NAME); + + // Ensure that the index has 0 documents in it + assertEquals(0, client().admin().indices().prepareStats(INDEX_NAME).get().getTotal().docs.getCount()); + + // Assert that within 5 seconds the download and upload stats moves to a non-zero value + assertBusy(() -> { + RemoteStoreStats[] remoteStoreStats = client().admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, "0") + .get() + .getRemoteStoreStats(); + Arrays.stream(remoteStoreStats).forEach(statObject -> { + RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); + if (statObject.getShardRouting().primary()) { + assertTrue( + segmentStats.totalUploadsSucceeded == 1 + && segmentStats.totalUploadsStarted == segmentStats.totalUploadsSucceeded + && segmentStats.totalUploadsFailed == 0 + ); + // On primary shard creation, we upload to remote translog post primary mode activation. + // This changes upload stats to non-zero for primary shard. + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + } else { + assertTrue( + segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + assertZeroTranslogUploadStats(translogStats); + } + assertZeroTranslogDownloadStats(translogStats); + }); + }, 5, TimeUnit.SECONDS); + } + + public void testStatsCorrectnessOnFailover() { + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(nodeSettings(0)) + .build(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(clusterSettings); + internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + + // Index some docs and refresh + indexDocs(); + refresh(INDEX_NAME); + + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + + // Start network disruption - primary node will be isolated + Set<String> nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set<String> nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + ensureStableCluster(2, clusterManagerNode); + + RemoteStoreStatsResponse response = client(clusterManagerNode).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, "0"); + List<RemoteStoreStats> matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + assertEquals(0, segmentStats.refreshTimeLagMs); + + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureStableCluster(3, clusterManagerNode); + ensureGreen(INDEX_NAME); + logger.info("Test completed"); + } + + public void testZeroLagOnCreateIndex() throws InterruptedException { + setup(); + String clusterManagerNode = internalCluster().getClusterManagerName(); + + int numOfShards = randomIntBetween(1, 3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, numOfShards)); + ensureGreen(INDEX_NAME); + long currentTimeNs = System.nanoTime(); + while (currentTimeNs == System.nanoTime()) { + Thread.sleep(10); + } + + for (int i = 0; i < numOfShards; i++) { + RemoteStoreStatsResponse response = client(clusterManagerNode).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, String.valueOf(i)) + .get(); + for (RemoteStoreStats remoteStoreStats : response.getRemoteStoreStats()) { + assertEquals(0, remoteStoreStats.getSegmentStats().refreshTimeLagMs); + } } } private void indexDocs() { - // Indexing documents along with refreshes and flushes. for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { flush(INDEX_NAME); } else { refresh(INDEX_NAME); } - int numberOfOperations = randomIntBetween(20, 50); - for (int j = 0; j < numberOfOperations; j++) { - indexSingleDoc(INDEX_NAME); - } + int numberOfOperations = randomIntBetween(10, 30); + indexBulk(INDEX_NAME, numberOfOperations); } } - private void assertResponseStats(RemoteRefreshSegmentTracker.Stats stats) { + private void changeReplicaCountAndEnsureGreen(int replicaCount) { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + ); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + } + + private void relocateShard(int shardId, String sourceNode, String destNode) { + assertAcked(client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, shardId, sourceNode, destNode))); + ensureGreen(INDEX_NAME); + } + + private void validateSegmentUploadStats(RemoteSegmentTransferTracker.Stats stats) { assertEquals(0, stats.refreshTimeLagMs); assertEquals(stats.localRefreshNumber, stats.remoteRefreshNumber); assertTrue(stats.uploadBytesStarted > 0); @@ -143,4 +739,69 @@ private void assertResponseStats(RemoteRefreshSegmentTracker.Stats stats) { assertTrue(stats.uploadBytesPerSecMovingAverage > 0); assertTrue(stats.uploadTimeMovingAverage > 0); } + + private void validateSegmentDownloadStats(RemoteSegmentTransferTracker.Stats stats) { + assertTrue(stats.directoryFileTransferTrackerStats.lastTransferTimestampMs > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesStarted > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0); + assertEquals(stats.directoryFileTransferTrackerStats.transferredBytesFailed, 0); + assertTrue(stats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesMovingAverage > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage > 0); + } + + private void assertNonZeroTranslogUploadStatsNoFailures(RemoteTranslogTransferTracker.Stats stats) { + assertTrue(stats.uploadBytesStarted > 0); + assertTrue(stats.totalUploadsStarted > 0); + assertEquals(0, stats.uploadBytesFailed); + assertEquals(0, stats.totalUploadsFailed); + assertTrue(stats.uploadBytesSucceeded > 0); + assertTrue(stats.totalUploadsSucceeded > 0); + assertTrue(stats.totalUploadTimeInMillis > 0); + assertTrue(stats.lastSuccessfulUploadTimestamp > 0); + } + + private void assertZeroTranslogUploadStats(RemoteTranslogTransferTracker.Stats stats) { + assertEquals(0, stats.uploadBytesStarted); + assertEquals(0, stats.totalUploadsStarted); + assertEquals(0, stats.uploadBytesFailed); + assertEquals(0, stats.totalUploadsFailed); + assertEquals(0, stats.uploadBytesSucceeded); + assertEquals(0, stats.totalUploadsSucceeded); + assertEquals(0, stats.totalUploadTimeInMillis); + assertEquals(0, stats.lastSuccessfulUploadTimestamp); + } + + private void assertNonZeroTranslogDownloadStats(RemoteTranslogTransferTracker.Stats stats) { + assertTrue(stats.downloadBytesSucceeded > 0); + assertTrue(stats.totalDownloadsSucceeded > 0); + // TODO: Need to simulate a delay for this assertion to avoid flakiness + // assertTrue(stats.totalDownloadTimeInMillis > 0); + assertTrue(stats.lastSuccessfulDownloadTimestamp > 0); + } + + private void assertZeroTranslogDownloadStats(RemoteTranslogTransferTracker.Stats stats) { + assertEquals(0, stats.downloadBytesSucceeded); + assertEquals(0, stats.totalDownloadsSucceeded); + assertEquals(0, stats.totalDownloadTimeInMillis); + assertEquals(0, stats.lastSuccessfulDownloadTimestamp); + } + + // Validate if the shardRouting obtained from cluster state contains the exact same routing object + // parameters as obtained from the remote store stats API + private void validateShardRouting(ShardRouting routing) { + Stream<ShardRouting> currentRoutingTable = getClusterState().routingTable() + .getIndicesRouting() + .get(INDEX_NAME) + .shard(routing.id()) + .assignedShards() + .stream(); + assertTrue( + currentRoutingTable.anyMatch( + r -> (r.currentNodeId().equals(routing.currentNodeId()) + && r.state().equals(routing.state()) + && r.primary() == routing.primary()) + ) + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index 6764c50175e61..4e3f01b8f257f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -9,41 +9,43 @@ package org.opensearch.remotestore; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.junit.Before; + import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; -@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class ReplicaToPrimaryPromotionIT extends RemoteStoreBaseIntegTestCase { private int shard_count = 5; @Before public void setup() { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); } @Override public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shard_count) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) - .build(); + return Settings.builder().put(super.indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shard_count).build(); } public void testPromoteReplicaToPrimary() throws Exception { @@ -120,4 +122,63 @@ public void testPromoteReplicaToPrimary() throws Exception { refresh(indexName); assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); } + + public void testFailoverWhileIndexing() throws Exception { + internalCluster().startNode(); + internalCluster().startNode(); + final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + shard_count = scaledRandomIntBetween(1, 5); + createIndex(indexName); + ensureGreen(indexName); + int docCount = scaledRandomIntBetween(20, 50); + final int indexDocAfterFailover = scaledRandomIntBetween(20, 50); + AtomicInteger numAutoGenDocs = new AtomicInteger(); + CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = new Thread(() -> { + int docsAfterFailover = 0; + while (finished.get() == false && numAutoGenDocs.get() < docCount) { + IndexResponse indexResponse = internalCluster().clusterManagerClient() + .prepareIndex(indexName) + .setSource("field", numAutoGenDocs.get()) + .get(); + + if (indexResponse.status() == RestStatus.CREATED || indexResponse.status() == RestStatus.OK) { + numAutoGenDocs.incrementAndGet(); + if (numAutoGenDocs.get() == docCount / 2) { + if (random().nextInt(3) == 0) { + refresh(indexName); + } else if (random().nextInt(2) == 0) { + flush(indexName); + } + // Node is killed on this + latch.countDown(); + } else if (numAutoGenDocs.get() > docCount / 2) { + docsAfterFailover++; + if (docsAfterFailover == indexDocAfterFailover) { + finished.set(true); + } + } + } + } + logger.debug("Done indexing"); + }); + indexingThread.start(); + latch.await(); + + ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + final int numShards = state.metadata().index(indexName).getNumberOfShards(); + final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard(); + final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId()); + + // stop the random data node, all remaining shards are promoted to primaries + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(randomNode.getName())); + ensureYellowAndNoInitializingShards(indexName); + indexingThread.join(); + refresh(indexName); + assertHitCount( + client(internalCluster().getClusterManagerName()).prepareSearch(indexName).setSize(0).setTrackTotalHits(true).get(), + numAutoGenDocs.get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java new file mode 100644 index 0000000000000..3d8d001b17ddf --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -0,0 +1,196 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.ReplicationStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.SlowClusterStateProcessing; + +import java.nio.file.Path; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * This class runs tests with remote store + segRep while blocking file downloads + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationUsingRemoteStoreDisruptionIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(1); + } + + public void testCancelReplicationWhileSyncingSegments() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set<String> dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnySegmentFile(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); + assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_FILES, segmentReplicationTarget.state().getStage()); + assertTrue(segmentReplicationTarget.refCount() > 0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testCancelReplicationWhileFetchingMetadata() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set<String> dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnyFiles(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + SegmentReplicationTarget segmentReplicationTarget = targetService.get(indexShard.shardId()); + assertNotNull(segmentReplicationTarget); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, segmentReplicationTarget.state().getStage()); + assertTrue(segmentReplicationTarget.refCount() > 0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + assertNull(targetService.get(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testUpdateVisibleCheckpointWithLaggingClusterStateUpdates_primaryRelocation() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + Settings nodeSettings = Settings.builder().put(buildRemoteStoreNodeAttributes(location, 0d, "metadata", Long.MAX_VALUE)).build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNodes(2, nodeSettings); + final Settings indexSettings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + createIndex(INDEX_NAME, indexSettings); + ensureGreen(INDEX_NAME); + final Set<String> dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String oldPrimary = getNode(dataNodeNames, true); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + logger.info("--> start another node"); + final String newPrimary = internalCluster().startDataOnlyNode(nodeSettings); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("4") + .get(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(replicaNode, random(), 0, 0, 1000, 2000); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + + // relocate the primary + logger.info("--> relocate the shard"); + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(INDEX_NAME, 0, oldPrimary, newPrimary)) + .execute() + .actionGet(); + clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertEquals(clusterHealthResponse.isTimedOut(), false); + + IndexShard newPrimary_shard = getIndexShard(newPrimary, INDEX_NAME); + IndexShard replica = getIndexShard(replicaNode, INDEX_NAME); + assertBusy(() -> { + assertEquals( + newPrimary_shard.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + disruption.stopDisrupting(); + disableRepoConsistencyCheck("Remote Store Creates System Repository"); + cleanupRepo(); + } + + private String getNode(Set<String> dataNodeNames, boolean primary) { + assertEquals(2, dataNodeNames.size()); + for (String name : dataNodeNames) { + final IndexShard indexShard = getIndexShard(name, INDEX_NAME); + if (indexShard.routingEntry().primary() == primary) { + return name; + } + } + return null; + } + + private IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional<Integer> shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index f298fac7c894e..23864c35ad154 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -8,64 +8,47 @@ package org.opensearch.remotestore; -import org.junit.After; -import org.junit.Before; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.SegmentReplicationIT; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; import java.nio.file.Path; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; /** * This class runs Segment Replication Integ test suite with remote store enabled. - * Setup is similar to SegmentReplicationRemoteStoreIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. After this is moved out of experimental, we can combine and keep only one - * test suite for Segment and Remote store integration tests. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override - public Settings indexSettings() { + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); + protected boolean segmentReplicationWithRemoteEnabled() { + return true; } @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - } - - @Override - public void testPressureServiceStats() throws Exception { - super.testPressureServiceStats(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 0b64680033d84..6cfc76b7e3223 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -8,60 +8,46 @@ package org.opensearch.remotestore; -import org.junit.After; -import org.junit.Before; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPressureIT; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; import java.nio.file.Path; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. - * Setup is similar to SegmentReplicationPressureIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicationPressureIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override - public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); + protected boolean segmentReplicationWithRemoteEnabled() { + return true; } @Override - protected Settings featureFlagSettings() { + protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) .build(); } @Before public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java index 529e84d281476..7112b266840ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java @@ -8,31 +8,21 @@ package org.opensearch.remotestore.multipart; -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.multipart.mocks.MockFsRepository; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; -import java.nio.file.Path; import java.util.Collection; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +public class RemoteStoreMultipartFileCorruptionIT extends RemoteStoreBaseIntegTestCase { -public class RemoteStoreMultipartFileCorruptionIT extends OpenSearchIntegTestCase { - - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; private static final String INDEX_NAME = "remote-store-test-idx-1"; @Override @@ -40,36 +30,6 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - - @Before - public void setup() { - internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath); - } - - protected void putRepository(Path path) { - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) - .setType(MockFsRepositoryPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", path) - // custom setting for MockFsRepositoryPlugin - .put(MockFsRepository.TRIGGER_DATA_INTEGRITY_FAILURE.getKey(), true) - ) - ); - } - - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - } - protected Settings remoteStoreIndexSettings() { return Settings.builder() .put(super.indexSettings()) @@ -78,26 +38,16 @@ protected Settings remoteStoreIndexSettings() { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); } - private IndexResponse indexSingleDoc() { - return client().prepareIndex(INDEX_NAME) - .setId(UUIDs.randomBase64UUID()) - .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5)) - .get(); - } - public void testLocalFileCorruptionDuringUpload() { internalCluster().startDataOnlyNodes(1); createIndex(INDEX_NAME, remoteStoreIndexSettings()); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - indexSingleDoc(); + indexSingleDoc(INDEX_NAME); client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index a523d5c0f5470..3dfde6f472525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -8,31 +8,148 @@ package org.opensearch.remotestore.multipart; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; import org.opensearch.remotestore.RemoteStoreIT; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; import java.nio.file.Path; import java.util.Collection; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class RemoteStoreMultipartIT extends RemoteStoreIT { + Path repositoryLocation; + boolean compress; + boolean overrideBuildRepositoryMetadata; + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } @Override - protected void putRepository(Path path) { + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + } + + @Before + public void setup() { + clusterSettingsSuppliedByTest = true; + overrideBuildRepositoryMetadata = false; + repositoryLocation = randomRepoPath(); + compress = randomBoolean(); + } + + @Override + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + if (overrideBuildRepositoryMetadata) { + Map<String, String> nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + name + ); + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + if (name.equals(REPOSITORY_NAME)) { + settings.put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES); + return new RepositoryMetadata(name, MockFsRepositoryPlugin.TYPE, settings.build()); + } + + return new RepositoryMetadata(name, type, settings.build()); + } else { + return super.buildRepositoryMetadata(node, name); + } + + } + + public void testRateLimitedRemoteUploads() throws Exception { + clusterSettingsSuppliedByTest = true; + overrideBuildRepositoryMetadata = true; + Settings.Builder clusterSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryLocation, REPOSITORY_2_NAME, repositoryLocation)); + clusterSettings.put( + String.format(Locale.getDefault(), "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, REPOSITORY_NAME), + MockFsRepositoryPlugin.TYPE + ); + internalCluster().startNode(clusterSettings.build()); + Client client = client(); + logger.info("--> updating repository"); assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) + client.admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) .setType(MockFsRepositoryPlugin.TYPE) - .setSettings(Settings.builder().put("location", path)) + .setSettings( + Settings.builder() + .put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES) + ) ); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 10; i++) { + index(INDEX_NAME, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + // check if throttling is active + assertBusy(() -> { + long uploadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + uploadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteUploadThrottleTimeInNanos(); + } + assertThat(uploadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos())); + }, 30, TimeUnit.SECONDS); + + assertThat(client.prepareSearch(INDEX_NAME).setSize(0).get().getHits().getTotalHits().value, equalTo(10L)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java new file mode 100644 index 0000000000000..36987ac2d4991 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore.multipart.mocks; + +import org.apache.lucene.index.CorruptIndexException; +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +public class MockFsAsyncBlobContainer extends FsBlobContainer implements AsyncMultiStreamBlobContainer { + + private static final int TRANSFER_TIMEOUT_MILLIS = 30000; + + private final boolean triggerDataIntegrityFailure; + + public MockFsAsyncBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + super(blobStore, blobPath, path); + this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException { + + int nParts = 10; + long partSize = writeContext.getFileSize() / nParts; + StreamContext streamContext = writeContext.getStreamProvider(partSize); + final Path file = path.resolve(writeContext.getFileName()); + byte[] buffer = new byte[(int) writeContext.getFileSize()]; + AtomicLong totalContentRead = new AtomicLong(); + CountDownLatch latch = new CountDownLatch(streamContext.getNumberOfParts()); + for (int partIdx = 0; partIdx < streamContext.getNumberOfParts(); partIdx++) { + int finalPartIdx = partIdx; + Thread thread = new Thread(() -> { + try { + InputStreamContainer inputStreamContainer = streamContext.provideStream(finalPartIdx); + InputStream inputStream = inputStreamContainer.getInputStream(); + long remainingContentLength = inputStreamContainer.getContentLength(); + long offset = partSize * finalPartIdx; + while (remainingContentLength > 0) { + int readContentLength = inputStream.read(buffer, (int) offset, (int) remainingContentLength); + totalContentRead.addAndGet(readContentLength); + remainingContentLength -= readContentLength; + offset += readContentLength; + } + inputStream.close(); + } catch (IOException e) { + completionListener.onFailure(e); + } finally { + latch.countDown(); + } + }); + thread.start(); + } + try { + if (!latch.await(TRANSFER_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { + throw new IOException("Timed out waiting for file transfer to complete for " + writeContext.getFileName()); + } + } catch (InterruptedException e) { + throw new IOException("Await interrupted on CountDownLatch, transfer failed for " + writeContext.getFileName()); + } + try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { + outputStream.write(buffer); + } + if (writeContext.getFileSize() != totalContentRead.get()) { + throw new IOException( + "Incorrect content length read for file " + + writeContext.getFileName() + + ", actual file size: " + + writeContext.getFileSize() + + ", bytes read: " + + totalContentRead.get() + ); + } + + try { + // bulks need to succeed for segment files to be generated + if (isSegmentFile(writeContext.getFileName()) && triggerDataIntegrityFailure) { + completionListener.onFailure( + new RuntimeException( + new CorruptIndexException( + "Data integrity check failure for file: " + writeContext.getFileName(), + writeContext.getFileName() + ) + ) + ); + } else { + writeContext.getUploadFinalizer().accept(true); + completionListener.onResponse(null); + } + } catch (Exception e) { + completionListener.onFailure(e); + } + + } + + @Override + public void readBlobAsync(String blobName, ActionListener<ReadContext> listener) { + new Thread(() -> { + try { + long contentLength = listBlobs().get(blobName).length(); + long partSize = contentLength / 10; + int numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); + List<ReadContext.StreamPartCreator> blobPartStreams = new ArrayList<>(); + for (int partNumber = 0; partNumber < numberOfParts; partNumber++) { + long offset = partNumber * partSize; + InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset); + blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream)); + } + ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null); + listener.onResponse(blobReadContext); + } catch (Exception e) { + listener.onFailure(e); + } + }).start(); + } + + public boolean remoteIntegrityCheckSupported() { + return true; + } + + private boolean isSegmentFile(String filename) { + return !filename.endsWith(".tlog") && !filename.endsWith(".ckp"); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java index f1d9fbba84528..77b0cac922014 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java @@ -28,7 +28,7 @@ public MockFsBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boole @Override public BlobContainer blobContainer(BlobPath path) { try { - return new MockFsVerifyingBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + return new MockFsAsyncBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); } catch (IOException ex) { throw new OpenSearchException("failed to create blob container", ex); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java deleted file mode 100644 index 8f2814eb7c4c4..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.remotestore.multipart.mocks; - -import org.apache.lucene.index.CorruptIndexException; -import org.opensearch.action.ActionListener; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; -import org.opensearch.common.io.InputStreamContainer; -import org.opensearch.common.StreamContext; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.fs.FsBlobContainer; -import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.common.blobstore.stream.write.WriteContext; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -public class MockFsVerifyingBlobContainer extends FsBlobContainer implements VerifyingMultiStreamBlobContainer { - - private static final int TRANSFER_TIMEOUT_MILLIS = 30000; - - private final boolean triggerDataIntegrityFailure; - - public MockFsVerifyingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { - super(blobStore, blobPath, path); - this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; - } - - @Override - public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException { - - int nParts = 10; - long partSize = writeContext.getFileSize() / nParts; - StreamContext streamContext = writeContext.getStreamProvider(partSize); - final Path file = path.resolve(writeContext.getFileName()); - byte[] buffer = new byte[(int) writeContext.getFileSize()]; - AtomicLong totalContentRead = new AtomicLong(); - CountDownLatch latch = new CountDownLatch(streamContext.getNumberOfParts()); - for (int partIdx = 0; partIdx < streamContext.getNumberOfParts(); partIdx++) { - int finalPartIdx = partIdx; - Thread thread = new Thread(() -> { - try { - InputStreamContainer inputStreamContainer = streamContext.provideStream(finalPartIdx); - InputStream inputStream = inputStreamContainer.getInputStream(); - long remainingContentLength = inputStreamContainer.getContentLength(); - long offset = partSize * finalPartIdx; - while (remainingContentLength > 0) { - int readContentLength = inputStream.read(buffer, (int) offset, (int) remainingContentLength); - totalContentRead.addAndGet(readContentLength); - remainingContentLength -= readContentLength; - offset += readContentLength; - } - inputStream.close(); - } catch (IOException e) { - completionListener.onFailure(e); - } finally { - latch.countDown(); - } - }); - thread.start(); - } - try { - if (!latch.await(TRANSFER_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { - throw new IOException("Timed out waiting for file transfer to complete for " + writeContext.getFileName()); - } - } catch (InterruptedException e) { - throw new IOException("Await interrupted on CountDownLatch, transfer failed for " + writeContext.getFileName()); - } - try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { - outputStream.write(buffer); - } - if (writeContext.getFileSize() != totalContentRead.get()) { - throw new IOException( - "Incorrect content length read for file " - + writeContext.getFileName() - + ", actual file size: " - + writeContext.getFileSize() - + ", bytes read: " - + totalContentRead.get() - ); - } - - try { - // bulks need to succeed for segment files to be generated - if (isSegmentFile(writeContext.getFileName()) && triggerDataIntegrityFailure) { - completionListener.onFailure( - new RuntimeException( - new CorruptIndexException( - "Data integrity check failure for file: " + writeContext.getFileName(), - writeContext.getFileName() - ) - ) - ); - } else { - writeContext.getUploadFinalizer().accept(true); - completionListener.onResponse(null); - } - } catch (Exception e) { - completionListener.onFailure(e); - } - - } - - private boolean isSegmentFile(String filename) { - return !filename.endsWith(".tlog") && !filename.endsWith(".ckp"); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index 84178f0255d81..b8415f4b41815 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -39,8 +39,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Collection; import java.util.Collections; @@ -108,4 +108,16 @@ public void testUpdateRepository() { final Repository updatedRepository = repositoriesService.repository(repositoryName); assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); } + + public void testSystemRepositoryCantBeCreated() { + internalCluster(); + final String repositoryName = "test-repo"; + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); + + assertThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java index 4e7f2ae486c93..9057ef900efbd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -35,11 +35,11 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java index 274133c2c8239..299c2da21c222 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/AliasRoutingIT.java @@ -44,7 +44,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.equalTo; /** diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java index 3fd0c954398e8..64df858a18c9d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/PartitionedRoutingIT.java @@ -38,13 +38,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; -import org.mockito.internal.util.collections.Sets; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; +import org.mockito.internal.util.collections.Sets; + public class PartitionedRoutingIT extends OpenSearchIntegTestCase { public void testVariousPartitionSizes() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java index f43ddfcdfc47b..80e82fa387c96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/routing/SimpleRoutingIT.java @@ -54,9 +54,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.index.query.QueryBuilders; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.OpenSearchIntegTestCase; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java index f6e6b7994db65..42c257eb79eff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java @@ -8,22 +8,25 @@ package org.opensearch.script; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.mapper.MockFieldFilterPlugin; import org.opensearch.node.NodeMocksPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.MockSearchService; import org.opensearch.test.MockHttpTransport; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -31,9 +34,21 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.apache.logging.log4j.core.util.Throwables.getRootCause; -public class ScriptCacheIT extends OpenSearchIntegTestCase { +public class ScriptCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public ScriptCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/opensearch/script/StoredScriptsIT.java index 448bbf5e883ec..71b48a8357e80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/StoredScriptsIT.java @@ -31,9 +31,9 @@ package org.opensearch.script; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -69,7 +69,7 @@ public void testBasics() { .cluster() .preparePutStoredScript() .setId("foobar") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + LANG + "\", \"source\": \"1\"} }"), XContentType.JSON) + .setContent(new BytesArray("{\"script\": {\"lang\": \"" + LANG + "\", \"source\": \"1\"} }"), MediaTypeRegistry.JSON) ); String script = client().admin().cluster().prepareGetStoredScript("foobar").get().getSource().getSource(); assertNotNull(script); @@ -81,7 +81,12 @@ public void testBasics() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin().cluster().preparePutStoredScript().setId("id#").setContent(new BytesArray("{}"), XContentType.JSON).get() + () -> client().admin() + .cluster() + .preparePutStoredScript() + .setId("id#") + .setContent(new BytesArray("{}"), MediaTypeRegistry.JSON) + .get() ); assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } @@ -95,7 +100,7 @@ public void testMaxScriptSize() { .setId("foobar") .setContent( new BytesArray("{\"script\": { \"lang\": \"" + LANG + "\"," + " \"source\":\"0123456789abcdef\"} }"), - XContentType.JSON + MediaTypeRegistry.JSON ) .get() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchCancellationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchCancellationIT.java deleted file mode 100644 index 3c50627e342dd..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchCancellationIT.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search; - -import org.opensearch.common.settings.FeatureFlagSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; - -public class ConcurrentSegmentSearchCancellationIT extends SearchCancellationIT { - @Override - protected Settings featureFlagSettings() { - Settings.Builder featureSettings = Settings.builder(); - for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { - featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); - } - featureSettings.put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true"); - return featureSettings.build(); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchTimeoutIT.java deleted file mode 100644 index c19f762679fb0..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/search/ConcurrentSegmentSearchTimeoutIT.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search; - -import org.opensearch.common.settings.FeatureFlagSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; - -public class ConcurrentSegmentSearchTimeoutIT extends SearchTimeoutIT { - - @Override - protected Settings featureFlagSettings() { - Settings.Builder featureSettings = Settings.builder(); - for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { - featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); - } - featureSettings.put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true"); - return featureSettings.build(); - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java index 9db0ac4590efa..5a19e2b841c08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchCancellationIT.java @@ -32,11 +32,10 @@ package org.opensearch.search; -import org.apache.logging.log4j.LogManager; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.junit.After; +import org.apache.logging.log4j.LogManager; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -48,22 +47,26 @@ import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.WriteRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginsService; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.lookup.LeafFieldsLookup; -import org.opensearch.tasks.TaskCancelledException; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.transport.TransportException; +import org.junit.After; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -77,6 +80,7 @@ import static org.opensearch.action.search.TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY; import static org.opensearch.index.query.QueryBuilders.scriptQuery; import static org.opensearch.search.SearchCancellationIT.ScriptedBlockPlugin.SCRIPT_NAME; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.SearchService.NO_TIMEOUT; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -86,12 +90,24 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchCancellationIT extends OpenSearchIntegTestCase { +public class SearchCancellationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private TimeValue requestCancellationTimeout = TimeValue.timeValueSeconds(1); private TimeValue clusterCancellationTimeout = TimeValue.timeValueMillis(1500); private TimeValue keepAlive = TimeValue.timeValueSeconds(5); + public SearchCancellationIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(ScriptedBlockPlugin.class); @@ -229,7 +245,7 @@ public void testCancellationDuringQueryPhase() throws Exception { awaitForBlock(plugins); cancelSearch(SearchAction.NAME); disableBlocks(plugins); - logger.info("Segments {}", Strings.toString(XContentType.JSON, client().admin().indices().prepareSegments("test").get())); + logger.info("Segments {}", Strings.toString(MediaTypeRegistry.JSON, client().admin().indices().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); } @@ -283,7 +299,7 @@ public void testCancellationDuringFetchPhase() throws Exception { awaitForBlock(plugins); cancelSearch(SearchAction.NAME); disableBlocks(plugins); - logger.info("Segments {}", Strings.toString(XContentType.JSON, client().admin().indices().prepareSegments("test").get())); + logger.info("Segments {}", Strings.toString(MediaTypeRegistry.JSON, client().admin().indices().prepareSegments("test").get())); ensureSearchWasCancelled(searchResponse); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index aa8ef3f29c989..ef7da395d2151 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -32,6 +32,8 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -41,7 +43,9 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -50,10 +54,22 @@ import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.SearchTimeoutIT.ScriptedTimeoutPlugin.SCRIPT_NAME; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchTimeoutIT extends OpenSearchIntegTestCase { +public class SearchTimeoutIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchTimeoutIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -71,6 +87,7 @@ public void testSimpleTimeout() throws Exception { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(5, TimeUnit.MILLISECONDS)) @@ -82,12 +99,11 @@ public void testSimpleTimeout() throws Exception { } public void testSimpleDoesNotTimeout() throws Exception { - final int numDocs = 10; + final int numDocs = 9; for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } - refresh("test"); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setTimeout(new TimeValue(10000, TimeUnit.SECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) @@ -100,7 +116,7 @@ public void testSimpleDoesNotTimeout() throws Exception { public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - + indexRandomForConcurrentSearch("test"); OpenSearchException ex = expectThrows( OpenSearchException.class, () -> client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 28a4db5ecaf9d..f08d3e871c579 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -8,7 +8,6 @@ package org.opensearch.search; -import org.junit.Assert; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -17,6 +16,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -27,11 +27,11 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.search.stats.SearchStats; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.aggregations.Aggregations; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.snapshots.mockstore.MockRepository; @@ -39,6 +39,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; @@ -56,10 +57,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.action.search.SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED_KEY; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.search.aggregations.AggregationBuilders.terms; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) public class SearchWeightedRoutingIT extends OpenSearchIntegTestCase { @@ -74,6 +77,7 @@ public void testSearchWithWRRShardRouting() throws IOException { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") .put("cluster.routing.weighted.fail_open", false) + .put(SEARCH_REQUEST_STATS_ENABLED_KEY, true) .build(); logger.info("--> starting 6 nodes on different zones"); @@ -180,12 +184,39 @@ public void testSearchWithWRRShardRouting() throws IOException { assertFalse(!hitNodes.contains(nodeId)); } nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + int num = 0; + int coordNumber = 0; for (NodeStats stat : nodeStats.getNodes()) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (searchStats.getRequestStatsLongHolder() + .getRequestStatsHolder() + .get(SearchPhaseName.QUERY.getName()) + .getTimeInMillis() > 0) { + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal(), + greaterThan(0L) + ); + coordNumber += 1; + } Assert.assertTrue(searchStats.getQueryCount() > 0L); Assert.assertTrue(searchStats.getFetchCount() > 0L); + num++; } + assertThat(coordNumber, greaterThan(0)); + assertThat(num, greaterThan(0)); } private Map<String, List<String>> setupCluster(int nodeCountPerAZ, Settings commonSettings) { @@ -449,6 +480,7 @@ public void testShardRoutingWithNetworkDisruption_FailOpenDisabled() throws Exce * Assertions are put to make sure such shard search requests are served by data node in zone c. * @throws IOException throws exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10673") public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Exception { Settings commonSettings = Settings.builder() @@ -470,8 +502,9 @@ public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Excep logger.info("--> creating network partition disruption"); final String clusterManagerNode1 = internalCluster().getClusterManagerName(); - Set<String> nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new)); - Set<String> nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set<String> nodesInOneSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set<String> nodesInOtherSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0), nodeMap.get("c").get(0)) + .collect(Collectors.toCollection(HashSet::new)); NetworkDisruption networkDisruption = new NetworkDisruption( new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), @@ -839,8 +872,7 @@ private void assertSearchInAZ(String az) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); if (stat.getNode().isDataNode()) { if (stat.getNode().getId().equals(dataNodeId)) { - Assert.assertTrue(searchStats.getFetchCount() > 0L); - Assert.assertTrue(searchStats.getQueryCount() > 0L); + Assert.assertTrue(searchStats.getFetchCount() > 0L || searchStats.getQueryCount() > 0L); } } } @@ -914,7 +946,6 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws } logger.info("--> network disruption is stopped"); - networkDisruption.stopDisrupting(); for (int i = 0; i < 50; i++) { try { @@ -931,6 +962,8 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws fail("search should not fail"); } } + networkDisruption.stopDisrupting(); + assertSearchInAZ("b"); assertSearchInAZ("c"); assertNoSearchInAZ("a"); @@ -946,6 +979,7 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws * MultiGet with fail open enabled. No request failure on network disruption * @throws IOException throws exception */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10755") public void testMultiGetWithNetworkDisruption_FailOpenEnabled() throws Exception { Settings commonSettings = Settings.builder() @@ -1090,6 +1124,7 @@ public void testMultiGetWithNetworkDisruption_FailOpenDisabled() throws Exceptio /** * Assert that preference search with custom string doesn't hit a node in weighed away az */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8030") public void testStrictWeightedRoutingWithCustomString() { Settings commonSettings = Settings.builder() .put("cluster.routing.allocation.awareness.attributes", "zone") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java index 30e6aa4cd31fc..b7f71b00d802f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java @@ -32,21 +32,39 @@ package org.opensearch.search; -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchWithRejectionsIT extends OpenSearchIntegTestCase { +public class SearchWithRejectionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchWithRejectionsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java index 42e515cca9b6b..7ed3526cabe3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java @@ -31,23 +31,39 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.ExecutionException; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.SUITE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @ClusterScope(scope = SUITE) -public class StressSearchServiceReaperIT extends OpenSearchIntegTestCase { +public class StressSearchServiceReaperIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public StressSearchServiceReaperIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index b73b7722f9728..6059abce53c8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; @@ -45,16 +48,20 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationsIntegrationIT extends OpenSearchIntegTestCase { +public class AggregationsIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; @@ -63,6 +70,18 @@ public class AggregationsIntegrationIT extends OpenSearchIntegTestCase { + LARGE_STRING.length() + "] used in the request has exceeded the allowed maximum"; + public AggregationsIntegrationIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("index").setMapping("f", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index d35a560b0986c..1826dd69cd804 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -32,20 +32,26 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.missing.Missing; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.missing; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -54,7 +60,19 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.core.IsNull.notNullValue; -public class CombiIT extends OpenSearchIntegTestCase { +public class CombiIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CombiIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } /** * Making sure that if there are multiple aggregations, working on the same field, yet require different diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index 21f833d5430db..302ec3116d187 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; @@ -52,11 +55,12 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.Sum; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -68,6 +72,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -88,7 +93,19 @@ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like * the growth of dynamic arrays is tested. */ -public class EquivalenceIT extends OpenSearchIntegTestCase { +public class EquivalenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public EquivalenceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java index e554a3be20528..5926ff9f0cad1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/FiltersAggsRewriteIT.java @@ -33,11 +33,11 @@ package org.opensearch.search.aggregations; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregator; @@ -59,7 +59,7 @@ public void testWrapperQueryIsRewritten() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); BytesReference bytesReference; - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType)) { builder.startObject(); { builder.startObject("terms"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java index f210af7c10fb3..b650855083eed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java @@ -32,25 +32,43 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class MetadataIT extends OpenSearchIntegTestCase { +public class MetadataIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MetadataIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public void testMetadataSetOnAggregationResult() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("name", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index 26bfe59618275..bdd16c7e74dc0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -43,7 +46,12 @@ import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; @@ -56,7 +64,19 @@ import static org.hamcrest.Matchers.closeTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MissingValueIT extends OpenSearchIntegTestCase { +public class MissingValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MissingValueIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected int maximumNumberOfShards() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java index 011ebf8add92a..557ec9a37978d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -47,15 +49,19 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.adjacencyMatrix; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -68,11 +74,23 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AdjacencyMatrixIT extends OpenSearchIntegTestCase { +public class AdjacencyMatrixIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numSingleTag1Docs, numSingleTag2Docs, numTag1Docs, numTag2Docs, numMultiTagDocs; static final int MAX_NUM_FILTERS = 3; + public AdjacencyMatrixIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java index fc5407c4cade8..9a1efb3336212 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java @@ -31,26 +31,46 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BooleanTermsIT extends OpenSearchIntegTestCase { +public class BooleanTermsIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "b_value"; private static final String MULTI_VALUED_FIELD_NAME = "b_values"; static int numSingleTrues, numSingleFalses, numMultiTrues, numMultiFalses; + public BooleanTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java new file mode 100644 index 0000000000000..5a38ba670f1dc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class CompositeAggIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CompositeAggIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") + ); + waitForRelocation(ClusterHealthStatus.GREEN); + + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); + refresh("idx"); + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); + refresh("idx"); + + waitForRelocation(ClusterHealthStatus.GREEN); + refresh(); + } + + public void testCompositeAggWithNoSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation(new CompositeAggregationBuilder("my_composite", getTestValueSources())) + .get(); + assertSearchResponse(rsp); + } + + public void testCompositeAggWithSubAgg() { + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( + new CompositeAggregationBuilder("my_composite", getTestValueSources()).subAggregation( + new MaxAggregationBuilder("max").field("score") + ) + ) + .get(); + assertSearchResponse(rsp); + } + + private List<CompositeValuesSourceBuilder<?>> getTestValueSources() { + final List<CompositeValuesSourceBuilder<?>> sources = new ArrayList<>(); + sources.add(new TermsValuesSourceBuilder("keyword_vs").field("type")); + sources.add(new TermsValuesSourceBuilder("num_vs").field("num")); + return sources; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index 617c5745c9bba..6a15490cbfe63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -31,11 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; @@ -50,13 +51,14 @@ import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.opensearch.search.aggregations.bucket.histogram.LongBounds; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.opensearch.search.aggregations.bucket.histogram.LongBounds; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -79,6 +81,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -94,7 +97,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateHistogramIT extends OpenSearchIntegTestCase { +public class DateHistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static Map<ZonedDateTime, Map<String, Object>> expectedMultiSortBuckets; @@ -102,8 +105,20 @@ private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } + public DateHistogramIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } private static String format(ZonedDateTime date, String pattern) { @@ -162,9 +177,9 @@ public void setupSuiteScopeCluster() throws Exception { indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 - indexDoc(3, 23, 6) + indexDoc(3, 23, 6) // date: Mar 23, dates: Mar 23, Apr 24 ) - ); // date: Mar 23, dates: Mar 23, Apr 24 + ); indexRandom(true, builders); ensureSearchable(); } @@ -1309,16 +1324,15 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { } public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception { - String mappingJson = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("date") - .field("type", "date") - .field("format", "strict_date_optional_time||dd-MM-yyyy") - .endObject() - .endObject() - .endObject() - ); + String mappingJson = jsonBuilder().startObject() + .startObject("properties") + .startObject("date") + .field("type", "date") + .field("format", "strict_date_optional_time||dd-MM-yyyy") + .endObject() + .endObject() + .endObject() + .toString(); prepareCreate("idx2").setMapping(mappingJson).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; for (int i = 0; i < reqs.length; i++) { @@ -1461,7 +1475,7 @@ public void testExceptionOnNegativeInterval() { /** * https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices * that are queried simultaneously can lead to the "format" parameter in the aggregation not being preserved correctly. - * + * <p> * The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can * be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets. */ @@ -1604,8 +1618,8 @@ public void testScriptCaching() throws Exception { .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get() ); - String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); - String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); + String date = DateFieldMapper.getDefaultDateTimeFormatter().format(date(1, 1)); + String date2 = DateFieldMapper.getDefaultDateTimeFormatter().format(date(2, 1)); indexRandom( true, client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), @@ -1735,6 +1749,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + cluster().wipeIndices("cache_test_idx"); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1852,6 +1867,7 @@ public void testDateNanosHistogram() throws Exception { assertEquals(1, buckets.get(0).getDocCount()); assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(1).getDocCount()); + cluster().wipeIndices("nanos"); } public void testDateKeyFormatting() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 19e5bdb8916b8..eea896e01afe1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -31,24 +31,31 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -61,13 +68,25 @@ */ @OpenSearchIntegTestCase.SuiteScopeTestCase @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class DateHistogramOffsetIT extends OpenSearchIntegTestCase { +public class DateHistogramOffsetIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); + public DateHistogramOffsetIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java index 470ee6a4d2cea..f00b601a54b80 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -46,6 +48,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.time.ZoneId; @@ -63,6 +66,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -76,7 +80,19 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateRangeIT extends OpenSearchIntegTestCase { +public class DateRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public DateRangeIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx") @@ -1062,6 +1078,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } /** @@ -1124,6 +1141,7 @@ public void testRangeWithFormatStringValue() throws Exception { .get() ); assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); + internalCluster().wipeIndices(indexName); } /** @@ -1196,6 +1214,7 @@ public void testRangeWithFormatNumericValue() throws Exception { buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + internalCluster().wipeIndices(indexName); } private static List<Range.Bucket> checkBuckets(Range dateRange, String expectedAggName, long expectedBucketsSize) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 5b01e7573908c..b62e5f0f7f3b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -31,11 +31,14 @@ package org.opensearch.search.aggregations.bucket; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregator; @@ -43,14 +46,16 @@ import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.sampler; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -65,10 +70,22 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DiversifiedSamplerIT extends OpenSearchIntegTestCase { +public class DiversifiedSamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; + public DiversifiedSamplerIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } @@ -109,13 +126,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java index 3093c7490a2a5..ccb4af8386472 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java @@ -48,9 +48,9 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Avg; +import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.search.aggregations.metrics.Stats; -import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; @@ -88,6 +88,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class DoubleTermsIT extends AbstractTermsTestCase { + public DoubleTermsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1106,5 +1110,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java index ef455bf353ce4..2863711d49580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java @@ -31,9 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -42,14 +45,18 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -60,10 +67,22 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FilterIT extends OpenSearchIntegTestCase { +public class FilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs; + public FilterIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java new file mode 100644 index 0000000000000..e051265d4b3bc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterRewriteIT.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.Histogram; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; + +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class FilterRewriteIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + + // simulate segment level match all + private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); + private static final Map<String, Long> expected = new HashMap<>(); + + public FilterRewriteIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected void setupSuiteScopeCluster() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx").get()); + expected.clear(); + + final int repeat = randomIntBetween(2, 10); + final Set<Long> longTerms = new HashSet<>(); + + for (int i = 0; i < repeat; i++) { + final List<IndexRequestBuilder> indexRequests = new ArrayList<>(); + + long longTerm; + do { + longTerm = randomInt(repeat * 2); + } while (!longTerms.add(longTerm)); + ZonedDateTime time = ZonedDateTime.of(2024, 1, ((int) longTerm) + 1, 0, 0, 0, 0, ZoneOffset.UTC); + String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); + + final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); + for (int j = 0; j < frequency; j++) { + indexRequests.add( + client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field("date", dateTerm).field("match", true).endObject()) + ); + } + expected.put(dateTerm + "T00:00:00.000Z", (long) frequency); + + indexRandom(true, false, indexRequests); + } + + ensureSearchable(); + } + + public void testMinDocCountOnDateHistogram() throws Exception { + final SearchResponse allResponse = client().prepareSearch("idx") + .setSize(0) + .setQuery(QUERY) + .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(0)) + .get(); + + final Histogram allHisto = allResponse.getAggregations().get("histo"); + Map<String, Long> results = new HashMap<>(); + allHisto.getBuckets().forEach(bucket -> results.put(bucket.getKeyAsString(), bucket.getDocCount())); + + for (Map.Entry<String, Long> entry : expected.entrySet()) { + assertEquals(entry.getValue(), results.get(entry.getKey())); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java index 4c5033b957d00..e64877a1d4030 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java @@ -32,9 +32,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -44,6 +47,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -56,6 +60,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filters; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -66,10 +71,22 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FiltersIT extends OpenSearchIntegTestCase { +public class FiltersIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { static int numDocs, numTag1Docs, numTag2Docs, numOtherDocs; + public FiltersIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java index 6d99424989fd7..ed0bd3aad5bab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -47,17 +49,20 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.geoDistance; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -70,7 +75,11 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoDistanceIT extends OpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); + } @Override protected boolean forbidPrivateIndexSettings() { @@ -79,6 +88,14 @@ protected boolean forbidPrivateIndexSettings() { private Version version = VersionUtils.randomIndexCompatibleVersion(random()); + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); source.startArray("location"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java index 8a97d9c9e75dd..a4aea6096a6e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java @@ -31,19 +31,26 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -53,10 +60,22 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GlobalIT extends OpenSearchIntegTestCase { +public class GlobalIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static int numDocs; + public GlobalIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index dd495701c3ddb..4abd068d6fe37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -42,6 +44,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationExecutionException; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.histogram.DoubleBounds; @@ -51,12 +54,13 @@ import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,6 +73,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -85,7 +90,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class HistogramIT extends OpenSearchIntegTestCase { +public class HistogramIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; @@ -96,6 +101,18 @@ public class HistogramIT extends OpenSearchIntegTestCase { static long[] valueCounts, valuesCounts; static Map<Long, Map<String, Object>> expectedMultiSortBuckets; + public HistogramIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1144,6 +1161,7 @@ public void testDecimalIntervalAndOffset() throws Exception { assertEquals(1, buckets.get(0).getDocCount()); assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); assertEquals(1, buckets.get(1).getDocCount()); + internalCluster().wipeIndices("decimal_values"); } /** @@ -1285,6 +1303,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1388,6 +1407,7 @@ public void testHardBounds() throws Exception { buckets = histogram.getBuckets(); assertEquals(1, buckets.size()); assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + internalCluster().wipeIndices("test"); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java index f8f666aaa3c1b..44789ea63f536 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java @@ -31,9 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -41,6 +44,7 @@ import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -48,13 +52,26 @@ import java.util.Map; import java.util.function.Function; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class IpRangeIT extends OpenSearchIntegTestCase { +public class IpRangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public IpRangeIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public static class DummyScriptPlugin extends MockScriptPlugin { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java index cff51e74fdbd0..4d2da4fa1d14b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; @@ -50,6 +51,10 @@ public class IpTermsIT extends AbstractTermsTestCase { + public IpTermsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index bd0e69ca315ec..49031bfd3fc1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -48,9 +48,9 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Avg; +import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.search.aggregations.metrics.Stats; -import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; @@ -86,6 +86,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class LongTermsIT extends AbstractTermsTestCase { + public LongTermsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1054,5 +1058,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index 48e2a3d8fa9e1..781d2acc5e2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -33,9 +33,11 @@ package org.opensearch.search.aggregations.bucket; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilder; @@ -80,6 +82,10 @@ public class MinDocCountIT extends AbstractTermsTestCase { private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); private static int cardinality; + public MinDocCountIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index d4273aee925f7..09133f720f9f7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -9,6 +9,7 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.bucket.terms.BaseStringTermsTestCase; @@ -21,11 +22,11 @@ import java.util.Collections; import static java.util.Arrays.asList; +import static org.opensearch.search.aggregations.AggregationBuilders.multiTerms; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; -import static org.opensearch.search.aggregations.AggregationBuilders.multiTerms; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; /** * Extend {@link BaseStringTermsTestCase}. @@ -33,6 +34,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class MultiTermsIT extends BaseStringTermsTestCase { + public MultiTermsIT(Settings staticSettings) { + super(staticSettings); + } + // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { final int minDocCount = randomInt(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 53e457ac17a5b..3eb813dcb91ef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -32,23 +32,31 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.Comparators; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; import org.opensearch.search.aggregations.metrics.ExtendedStats; import org.opensearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -58,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NaNSortingIT extends OpenSearchIntegTestCase { +public class NaNSortingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private enum SubAggregation { AVG("avg") { @@ -130,6 +138,18 @@ public String sortKey() { public abstract double getValue(Aggregation aggregation); } + public NaNSortingIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("string_value", "type=keyword").get()); @@ -146,6 +166,7 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("idx").setSource(source.endObject()).get(); } refresh(); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index ed3edc8c624f8..288d4d2c4e525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -31,17 +31,18 @@ package org.opensearch.search.aggregations.bucket; -import org.apache.lucene.search.join.ScoreMode; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.InnerHitBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; @@ -55,10 +56,12 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -68,6 +71,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -87,12 +91,24 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NestedIT extends OpenSearchIntegTestCase { +public class NestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int numParents; private static int[] numChildren; private static SubAggCollectionMode aggCollectionMode; + public NestedIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { @@ -202,6 +218,7 @@ public void setupSuiteScopeCluster() throws Exception { ) ); indexRandom(true, builders); + indexRandomForMultipleSlices("idx"); ensureSearchable(); } @@ -332,6 +349,7 @@ public void testNestedAsSubAggregation() throws Exception { } public void testNestNestedAggs() throws Exception { + indexRandomForConcurrentSearch("idx_nested_nested_aggs"); SearchResponse response = client().prepareSearch("idx_nested_nested_aggs") .addAggregation( nested("level1", "nested1").subAggregation( @@ -461,7 +479,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { "{\"dates\": {\"month\": {\"label\": \"2014-11\", \"end\": \"2014-11-30\", \"start\": \"2014-11-01\"}, " + "\"day\": \"2014-11-30\"}, \"comments\": [{\"cid\": 3,\"identifier\": \"29111\"}, {\"cid\": 4,\"tags\": [" + "{\"tid\" :44,\"name\": \"Roles\"}], \"identifier\": \"29101\"}]}", - XContentType.JSON + MediaTypeRegistry.JSON ) ); indexRequests.add( @@ -471,7 +489,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { "{\"dates\": {\"month\": {\"label\": \"2014-12\", \"end\": \"2014-12-31\", \"start\": \"2014-12-01\"}, " + "\"day\": \"2014-12-03\"}, \"comments\": [{\"cid\": 1, \"identifier\": \"29111\"}, {\"cid\": 2,\"tags\": [" + "{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}", - XContentType.JSON + MediaTypeRegistry.JSON ) ); indexRandom(true, indexRequests); @@ -534,6 +552,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 tags = nestedTags.getAggregations().get("tag"); assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + internalCluster().wipeIndices("idx2"); } public void testNestedSameDocIdProcessedMultipleTime() throws Exception { @@ -584,6 +603,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("idx4"); SearchResponse response = client().prepareSearch("idx4") .addAggregation( @@ -640,6 +660,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + internalCluster().wipeIndices("idx4"); } public void testFilterAggInsideNestedAgg() throws Exception { @@ -758,6 +779,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("classes"); SearchResponse response = client().prepareSearch("classes") .addAggregation( @@ -802,6 +824,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { assertThat(bucket.getDocCount(), equalTo(1L)); numStringParams = bucket.getAggregations().get("num_string_params"); assertThat(numStringParams.getDocCount(), equalTo(0L)); + internalCluster().wipeIndices("classes"); } public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { @@ -826,6 +849,7 @@ public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { RestStatus.BAD_REQUEST, containsString("[inner_hits] already contains an entry for key [ih1]") ); + internalCluster().wipeIndices("idxduplicatehitnames"); } public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { @@ -848,5 +872,6 @@ public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { RestStatus.BAD_REQUEST, containsString("[inner_hits] already contains an entry for key [property]") ); + internalCluster().wipeIndices("idxnullhitnames"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index c46d6dcd847e1..50cee4e9ecd92 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -48,9 +50,11 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -60,6 +64,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.range; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -73,13 +78,25 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class RangeIT extends OpenSearchIntegTestCase { +public class RangeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; static int numDocs; + public RangeIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -161,6 +178,7 @@ public void setupSuiteScopeCluster() throws Exception { builders.add(client().prepareIndex("new_index").setSource(Collections.emptyMap())); indexRandom(true, builders); + indexRandomForMultipleSlices("idx", "old_index", "new_index"); ensureSearchable(); } @@ -894,6 +912,7 @@ public void testOverlappingRanges() throws Exception { } public void testEmptyAggregation() throws Exception { + indexRandomForConcurrentSearch("empty_bucket_idx"); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") .setQuery(matchAllQuery()) .addAggregation( @@ -1061,6 +1080,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java index 9659f4d154cb5..3bf9233d3441d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java @@ -31,22 +31,26 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.nested.Nested; import org.opensearch.search.aggregations.bucket.nested.ReverseNested; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.ValueCount; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -55,6 +59,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.nested; @@ -70,7 +75,19 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ReverseNestedIT extends OpenSearchIntegTestCase { +public class ReverseNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ReverseNestedIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override public void setupSuiteScopeCluster() throws Exception { @@ -726,6 +743,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); assertThat(barCount.getValue(), equalTo(2L)); } + internalCluster().wipeIndices("idx3"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 0bfeff9297ce8..3decab92acbff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -32,24 +32,30 @@ package org.opensearch.search.aggregations.bucket; -import org.opensearch.action.admin.indices.refresh.RefreshRequest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; +import org.opensearch.action.support.WriteRequest; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; -import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.opensearch.search.aggregations.bucket.sampler.SamplerAggregator; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Max; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.sampler; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -64,7 +70,7 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SamplerIT extends OpenSearchIntegTestCase { +public class SamplerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; @@ -72,6 +78,18 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } + public SamplerIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( @@ -108,13 +126,14 @@ public void setupSuiteScopeCluster() throws Exception { client().prepareIndex("test") .setId("" + i) .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); client().prepareIndex("idx_unmapped_author") .setId("" + i) .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); } - client().admin().indices().refresh(new RefreshRequest("test")).get(); } public void testIssue10719() throws Exception { @@ -171,6 +190,23 @@ public void testSimpleSampler() throws Exception { assertThat(maxBooksPerAuthor, equalTo(3L)); } + public void testSimpleSamplerShardSize() throws Exception { + final int SHARD_SIZE = randomIntBetween(1, 3); + SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(SHARD_SIZE); + sampleAgg.subAggregation(terms("authors").field("author")); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); + assertSearchResponse(response); + Sampler sample = response.getAggregations().get("sample"); + Terms authors = sample.getAggregations().get("authors"); + assertEquals(SHARD_SIZE * NUM_SHARDS, sample.getDocCount()); + } + public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index faa6a54394b00..4cab6deb08bb5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -31,8 +31,11 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -45,8 +48,13 @@ import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -68,7 +76,19 @@ * we can make sure that the reduce is properly propagated by checking that empty buckets were created. */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ShardReduceIT extends OpenSearchIntegTestCase { +public class ShardReduceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ShardReduceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index 8f3d94c2eacdb..66cce21bcf86f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -32,9 +32,10 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.bucket.terms.Terms; import java.util.HashMap; import java.util.List; @@ -45,6 +46,11 @@ import static org.hamcrest.Matchers.equalTo; public class ShardSizeTermsIT extends ShardSizeTestCase { + + public ShardSizeTermsIT(Settings staticSettings) { + super(staticSettings); + } + public void testNoShardSizeString() throws Exception { createIdx("type=keyword"); @@ -80,6 +86,7 @@ public void testShardSizeEqualsSizeString() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -92,8 +99,11 @@ public void testShardSizeEqualsSizeString() throws Exception { expected.put("1", 8L); expected.put("3", 8L); expected.put("2", 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + expectedDocCount = expected.get(bucket.getKeyAsString()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } @@ -215,6 +225,7 @@ public void testShardSizeEqualsSizeLong() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -227,8 +238,11 @@ public void testShardSizeEqualsSizeLong() throws Exception { expected.put(1, 8L); expected.put(3, 8L); expected.put(2, 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + expectedDocCount = expected.get(bucket.getKeyAsNumber().intValue()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } @@ -349,6 +363,7 @@ public void testShardSizeEqualsSizeDouble() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -361,8 +376,11 @@ public void testShardSizeEqualsSizeDouble() throws Exception { expected.put(1, 8L); expected.put(3, 8L); expected.put(2, 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + expectedDocCount = expected.get(bucket.getKeyAsNumber().intValue()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 43d49dc0bfd60..f2e9265fa5cf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -31,16 +31,17 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; @@ -63,6 +64,7 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ScriptHeuristic; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods; import java.io.IOException; @@ -79,6 +81,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; import static org.opensearch.search.aggregations.AggregationBuilders.significantText; @@ -91,12 +94,24 @@ import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SignificantTermsSignificanceScoreIT extends OpenSearchIntegTestCase { +public class SignificantTermsSignificanceScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String INDEX_NAME = "testidx"; static final String TEXT_FIELD = "text"; static final String CLASS_FIELD = "class"; + public SignificantTermsSignificanceScoreIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(TestScriptPlugin.class); @@ -208,7 +223,7 @@ public void testXContentResponse() throws Exception { + "\"score\":0.75," + "\"bg_count\":4" + "}]}}]}}"; - assertThat(Strings.toString(responseBuilder), equalTo(result)); + assertThat(responseBuilder.toString(), equalTo(result)); } @@ -243,7 +258,7 @@ public void testConsistencyWithDifferentShardCounts() throws Exception { public void testPopularTermManyDeletedDocs() throws Exception { String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; assertAcked( - prepareCreate(INDEX_NAME).setSettings(settings, XContentType.JSON) + prepareCreate(INDEX_NAME).setSettings(settings, MediaTypeRegistry.JSON) .setMapping("text", "type=keyword", CLASS_FIELD, "type=keyword") ); String[] cat1v1 = { "constant", "one" }; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 3d76b994ebac3..add6b71cb1753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -32,23 +32,29 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -60,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class TermsDocCountErrorIT extends OpenSearchIntegTestCase { +public class TermsDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; private static final String LONG_FIELD_NAME = "l_value"; @@ -72,6 +78,18 @@ public static String randomExecutionHint() { private static int numRoutingValues; + public TermsDocCountErrorIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get()); @@ -202,6 +220,15 @@ public void setupSuiteScopeCluster() throws Exception { indexRandom(true, builders); ensureSearchable(); + + // Force merge each shard down to 1 segment to verify results are the same between concurrent and non-concurrent search paths, else + // for concurrent segment search there will be additional error introduced during the slice level reduce and thus different buckets, + // doc_counts, and doc_count_errors may be returned. This test serves to verify that the doc_count_error is the same between + // concurrent and non-concurrent search in the 1 slice case. TermsFixedDocCountErrorIT verifies that the doc count error is + // correctly calculated for concurrent segment search at the slice level. + // See https://github.com/opensearch-project/OpenSearch/issues/11680" + forceMerge(1); + Thread.sleep(5000); // Sleep 5s to ensure force merge completes } private void assertDocCountErrorWithinBounds(int size, SearchResponse accurateResponse, SearchResponse testResponse) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java new file mode 100644 index 0000000000000..422af15d2881d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java @@ -0,0 +1,360 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.IndexSettings.MINIMUM_REFRESH_INTERVAL; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) +public class TermsFixedDocCountErrorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + private static final String STRING_FIELD_NAME = "s_value"; + + public TermsFixedDocCountErrorIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + public void testSimpleAggErrorMultiShard() throws Exception { + // size = 1, shard_size = 2 + // Shard_1 [A, A, A, A, B, B, C, C, D, D] -> Buckets {"A" : 4, "B" : 2} + // Shard_2 [A, B, B, B, C, C, C, D, D, D] -> Buckets {"B" : 3, "C" : 3} + // coordinator -> Buckets {"B" : 5, "A" : 4} + // Agg error is 4, from (shard_size)th bucket on each shard + // Bucket "A" error is 2, from (shard_size)th bucket on shard_2 + // Bucket "B" error is 0, it's present on both shards + + // size = 1 shard_size = 1 slice_size = 1 + // non-cs / cs + // Shard_1 [A, B, C] + // Shard_2 [B, C, D] + // cs + // Shard_1 slice_1 [A, B, C] -> {a : 1} -> {a : 1 -- error: 1} + // slice_2 [B, C, D] -> {b : 1} + // Coordinator should return the same doc count error in both cases + + assertAcked( + prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_mshard_1").get(); + assertEquals(1, segmentResponse.getIndices().get("idx_mshard_1").getShards().get(0).getShards()[0].getSegments().size()); + + assertAcked( + prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + segmentResponse = client().admin().indices().prepareSegments("idx_mshard_2").get(); + assertEquals(1, segmentResponse.getIndices().get("idx_mshard_2").getShards().get(0).getShards()[0].getSegments().size()); + + SearchResponse response = client().prepareSearch("idx_mshard_2", "idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(4, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); // Bucket "A" + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + } + + public void testSimpleAggErrorSingleShard() throws Exception { + assertAcked( + prepareCreate("idx_shard_error").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + refresh("idx_shard_error"); + + SearchResponse response = client().prepareSearch("idx_shard_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(1).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(1, terms.getBuckets().size()); + assertEquals(0, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); + assertEquals("A", bucket.getKey().toString()); + assertEquals(6, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + } + + public void testSliceLevelDocCountErrorSingleShard() throws Exception { + assumeTrue( + "Slice level error is not relevant to non-concurrent search cases", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + + // Slices are created by sorting segments by doc count in descending order then distributing in round robin fashion. + // Creates 2 segments (and therefore 2 slices since slice_count = 2) as follows: + // 1. [A, A, A, B, B, C] + // 2. [A, B, B, B, C, C] + // Thus we expect the doc count error for A to be 2 as the nth largest bucket on slice 2 has size 2 + + assertAcked( + prepareCreate("idx_slice_error").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_slice_error"); + + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_slice_error"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_slice_error").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_slice_error").getShards().get(0).getShards()[0].getSegments().size()); + + // Confirm that there is no error when shard_size == slice_size > cardinality + SearchResponse response = client().prepareSearch("idx_slice_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(1).shardSize(4)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(1, terms.getBuckets().size()); + assertEquals(0, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + response = client().prepareSearch("idx_slice_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(4, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); // Bucket "A" + assertEquals("A", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + } + + public void testSliceLevelDocCountErrorMultiShard() throws Exception { + assumeTrue( + "Slice level error is not relevant to non-concurrent search cases", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + + // Size = 2, shard_size = 2 + // Shard_1 [A, A, A, A, B, B, C, C] + // slice_1 [A, A, A, B, B, C] {"A" : 3, "B" : 2} + // slice_2 [A, C] {"A" : 1, "C" : 1} + // Shard_1 buckets: {"A" : 4 - error: 0, "B" : 2 - error: 1} + // Shard_2 [A, A, B, B, B, C, C, C] + // slice_1 [A, B, B, B, C, C] {"B" : 3, "C" : 2} + // slice_2 [A, C] {"A" : 1, "C" : 1} + // Shard_2 buckets: {"B" : 3 - error: 1, "C" : 3 - error: 0} + // Overall + // {"B" : 5 - error: 2, "A" : 4 - error: 3} Agg error: 6 + + assertAcked( + prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_mshard_1").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_mshard_1").getShards().get(0).getShards()[0].getSegments().size()); + + SearchResponse response = client().prepareSearch("idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(3, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("B", bucket.getKey().toString()); + assertEquals(2, bucket.getDocCount()); + assertEquals(1, bucket.getDocCountError()); + + assertAcked( + prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", MINIMUM_REFRESH_INTERVAL) + ) + ); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + segmentResponse = client().admin().indices().prepareSegments("idx_mshard_2").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_mshard_2").getShards().get(0).getShards()[0].getSegments().size()); + + response = client().prepareSearch("idx_mshard_2") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(3, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); + assertEquals("B", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(1, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("C", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + response = client().prepareSearch("idx_mshard_2", "idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(6, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(3, bucket.getDocCountError()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 852c3760751b3..1cc250c00dba9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -31,23 +31,28 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -55,15 +60,32 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -public class TermsShardMinDocCountIT extends OpenSearchIntegTestCase { +public class TermsShardMinDocCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + private static final String index = "someindex"; + public TermsShardMinDocCountIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } // see https://github.com/elastic/elasticsearch/issues/5998 public void testShardMinDocCountSignificantTermsTest() throws Exception { + assumeFalse( + "For concurrent segment search shard_min_doc_count is not enforced at the slice level. See https://github.com/opensearch-project/OpenSearch/issues/11847", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); String textMappings; if (randomBoolean()) { textMappings = "type=long"; @@ -124,15 +146,19 @@ private void addTermsDocs(String term, int numInClass, int numNotInClass, List<I String sourceClass = "{\"text\": \"" + term + "\", \"class\":" + "true" + "}"; String sourceNotClass = "{\"text\": \"" + term + "\", \"class\":" + "false" + "}"; for (int i = 0; i < numInClass; i++) { - builders.add(client().prepareIndex(index).setSource(sourceClass, XContentType.JSON)); + builders.add(client().prepareIndex(index).setSource(sourceClass, MediaTypeRegistry.JSON)); } for (int i = 0; i < numNotInClass; i++) { - builders.add(client().prepareIndex(index).setSource(sourceNotClass, XContentType.JSON)); + builders.add(client().prepareIndex(index).setSource(sourceNotClass, MediaTypeRegistry.JSON)); } } // see https://github.com/elastic/elasticsearch/issues/5998 public void testShardMinDocCountTermsTest() throws Exception { + assumeFalse( + "For concurrent segment search shard_min_doc_count is not enforced at the slice level. See https://github.com/opensearch-project/OpenSearch/issues/11847", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); final String[] termTypes = { "text", "long", "integer", "float", "double" }; String termtype = termTypes[randomInt(termTypes.length - 1)]; String termMappings = "type=" + termtype; @@ -165,8 +191,8 @@ public void testShardMinDocCountTermsTest() throws Exception { ) .get(); assertSearchResponse(response); - Terms sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(0)); + Terms terms = response.getAggregations().get("myTerms"); + assertThat(terms.getBuckets().size(), equalTo(0)); response = client().prepareSearch(index) .addAggregation( @@ -180,15 +206,15 @@ public void testShardMinDocCountTermsTest() throws Exception { ) .get(); assertSearchResponse(response); - sigterms = response.getAggregations().get("myTerms"); - assertThat(sigterms.getBuckets().size(), equalTo(2)); + terms = response.getAggregations().get("myTerms"); + assertThat(terms.getBuckets().size(), equalTo(2)); } private static void addTermsDocs(String term, int numDocs, List<IndexRequestBuilder> builders) { String sourceClass = "{\"text\": \"" + term + "\"}"; for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex(index).setSource(sourceClass, XContentType.JSON)); + builders.add(client().prepareIndex(index).setSource(sourceClass, MediaTypeRegistry.JSON)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java index 7775618ba5b13..79aa4a648310a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java @@ -8,15 +8,16 @@ package org.opensearch.search.aggregations.bucket.terms; -import org.junit.After; -import org.junit.Before; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; import org.opensearch.search.aggregations.bucket.AbstractTermsTestCase; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -37,6 +38,10 @@ public class BaseStringTermsTestCase extends AbstractTermsTestCase { protected static final String MULTI_VALUED_FIELD_NAME = "s_values"; protected static Map<String, Map<String, Object>> expectedMultiSortBuckets; + public BaseStringTermsTestCase(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index fa8e823545b36..edf9cd432dda2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -35,9 +35,9 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.query.QueryBuilders; import org.opensearch.script.Script; @@ -79,6 +79,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class StringTermsIT extends BaseStringTermsTestCase { + public StringTermsIT(Settings staticSettings) { + super(staticSettings); + } + // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { final int minDocCount = randomInt(1); @@ -1127,6 +1131,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testScriptWithValueType() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index 147f451c14de8..db4ee3571d141 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; @@ -45,7 +47,9 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -55,6 +59,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -65,7 +70,19 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class CardinalityIT extends OpenSearchIntegTestCase { +public class CardinalityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CardinalityIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -615,5 +632,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index be69428453952..8122304ba992c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -32,23 +32,40 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.OpenSearchException; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.BucketOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.Map; import java.util.stream.IntStream; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; -public class CardinalityWithRequestBreakerIT extends OpenSearchIntegTestCase { +public class CardinalityWithRequestBreakerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CardinalityWithRequestBreakerIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } /** * Test that searches using cardinality aggregations returns all request breaker memory. @@ -76,6 +93,7 @@ public void testRequestBreaker() throws Exception { ) .get(); + indexRandomForConcurrentSearch("test"); try { client().prepareSearch("test") .addAggregation( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java index cd0a649659c6e..4a2c100690de4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -37,6 +37,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.global.Global; @@ -44,7 +45,6 @@ import org.opensearch.search.aggregations.bucket.missing.Missing; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.ExtendedStats.Bounds; -import org.opensearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -70,6 +70,10 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { + public ExtendedStatsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -995,6 +999,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index ffc31b7cdb7c4..ed87fa6d8f5f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -34,6 +34,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.test.OpenSearchIntegTestCase; @@ -54,6 +55,10 @@ public class GeoCentroidIT extends AbstractGeoTestCase { private static final String aggName = "geoCentroid"; + public GeoCentroidIT(Settings staticSettings) { + super(staticSettings); + } + public void testEmptyAggregation() throws Exception { SearchResponse response = client().prepareSearch(EMPTY_IDX_NAME) .setQuery(matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 20fc6aaee20c9..ae67f0b1c0b66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -38,12 +38,12 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -53,7 +53,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; -import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -72,6 +72,10 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { + public HDRPercentileRanksIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -716,6 +720,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java index 2660dbe0a88ed..ff1cab85c18e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -75,6 +75,10 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { + public HDRPercentilesIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -687,6 +691,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index 226b4dbca18d9..0edba475a6401 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -68,8 +68,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.range; import static org.opensearch.search.aggregations.AggregationBuilders.terms; -import static org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; import static org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.ExactMedianAbsoluteDeviation.calculateMAD; +import static org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -91,6 +91,10 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { private static double singleValueExactMAD; private static double multiValueExactMAD; + public MedianAbsoluteDeviationIT(Settings staticSettings) { + super(staticSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build(); @@ -643,5 +647,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java index 2034bbb7e13bc..1725aa7847d72 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -32,14 +32,16 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -53,12 +55,14 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,6 +73,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.scriptedMetric; @@ -87,10 +92,22 @@ @ClusterScope(scope = Scope.SUITE) @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ScriptedMetricIT extends OpenSearchIntegTestCase { +public class ScriptedMetricIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static long numDocs; + public ScriptedMetricIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -332,7 +349,7 @@ public void setupSuiteScopeCluster() throws Exception { new BytesArray( "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"vars.multiplier = 3\"} }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -345,7 +362,7 @@ public void setupSuiteScopeCluster() throws Exception { new BytesArray( "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"state.list.add(vars.multiplier)\"} }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -361,7 +378,7 @@ public void setupSuiteScopeCluster() throws Exception { + "\"," + " \"source\": \"sum state values as a new aggregation\"} }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -377,7 +394,7 @@ public void setupSuiteScopeCluster() throws Exception { + "\"," + " \"source\": \"sum all states (lists) values as a new aggregation\"} }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -1378,6 +1395,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testConflictingAggAndScriptParams() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java index debdde8e13fe7..3708e1e6ab21b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java @@ -39,12 +39,12 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -66,6 +66,10 @@ import static org.hamcrest.Matchers.sameInstance; public class StatsIT extends AbstractNumericTestCase { + public StatsIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -384,5 +388,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java index fe236f04c19e8..b2aa3438b2306 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java @@ -68,6 +68,10 @@ public class SumIT extends AbstractNumericTestCase { + public SumIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(MetricAggScriptPlugin.class); @@ -359,6 +363,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index adf027222d7d9..4225c027c4d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -34,17 +34,17 @@ import org.apache.logging.log4j.LogManager; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationTestScriptsPlugin; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.filter.Filter; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -72,6 +72,10 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { + public TDigestPercentileRanksIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -626,6 +630,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java index fda15f9b90ea2..974e90fab16e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -74,6 +74,10 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { + public TDigestPercentilesIT(Settings staticSettings) { + super(staticSettings); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -597,5 +601,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index 96aeccfc03fb1..5d84452998e40 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; @@ -67,8 +69,10 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Iterator; @@ -83,6 +87,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -105,11 +110,23 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase() -public class TopHitsIT extends OpenSearchIntegTestCase { +public class TopHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String TERMS_AGGS_FIELD = "terms"; private static final String SORT_FIELD = "sort"; + public TopHitsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java index 82e667bccc576..4610281c4b8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; @@ -42,7 +44,9 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -51,6 +55,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -67,7 +72,20 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ValueCountIT extends OpenSearchIntegTestCase { +public class ValueCountIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ValueCountIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); @@ -363,6 +381,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testOrderByEmptyAggregation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java index 6cd16a47e98d2..48fd06bac285b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +45,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AvgBucketIT extends OpenSearchIntegTestCase { +public class AvgBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +76,18 @@ public class AvgBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public AvgBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java index e8e21d3580e1c..1b22cf2018d96 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java @@ -32,12 +32,15 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -47,9 +50,11 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -58,6 +63,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -69,7 +75,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketScriptIT extends OpenSearchIntegTestCase { +public class BucketScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -83,6 +89,18 @@ public class BucketScriptIT extends OpenSearchIntegTestCase { private static int maxNumber; private static long date; + public BucketScriptIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -551,7 +569,7 @@ public void testStoredScript() { // Script source is not interpreted but it references a pre-defined script from CustomScriptPlugin .setContent( new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\"," + " \"source\": \"my_script\" } }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java index 8fe8876c7593b..7dca1d0d79b1e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -32,11 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -46,9 +49,11 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -57,6 +62,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.bucketSelector; @@ -70,7 +76,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSelectorIT extends OpenSearchIntegTestCase { +public class BucketSelectorIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -82,6 +88,18 @@ public class BucketSelectorIT extends OpenSearchIntegTestCase { private static int minNumber; private static int maxNumber; + public BucketSelectorIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -488,7 +506,7 @@ public void testStoredScript() { + "\", " + "\"source\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" } }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java index 231aa2e078de6..ffb607866935b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java @@ -32,9 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; @@ -44,15 +47,18 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -68,7 +74,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSortIT extends OpenSearchIntegTestCase { +public class BucketSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX = "bucket-sort-it-data-index"; private static final String INDEX_WITH_GAPS = "bucket-sort-it-data-index-with-gaps"; @@ -78,6 +84,18 @@ public class BucketSortIT extends OpenSearchIntegTestCase { private static final String VALUE_1_FIELD = "value_1"; private static final String VALUE_2_FIELD = "value_2"; + public BucketSortIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(INDEX, INDEX_WITH_GAPS); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java index 2c7890fb7b1cb..8c89c1232ebb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.search.aggregations.InternalAggregation; @@ -44,6 +47,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -55,9 +59,11 @@ import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.derivative; @@ -69,13 +75,25 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateDerivativeIT extends OpenSearchIntegTestCase { +public class DateDerivativeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { // some index names used during these tests private static final String IDX_DST_START = "idx_dst_start"; private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; + public DateDerivativeIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 406c57d044259..f8def40ec003a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -47,19 +50,23 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filters; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.search.aggregations.AggregationBuilders.sum; -import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.derivative; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.movingAvg; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -70,7 +77,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DerivativeIT extends OpenSearchIntegTestCase { +public class DerivativeIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -92,6 +99,18 @@ public class DerivativeIT extends OpenSearchIntegTestCase { private static Double[] firstDerivValueCounts_empty_rnd; private static long numDocsEmptyIdx_rnd; + public DerivativeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); @@ -668,6 +687,7 @@ public void testAvgMovavgDerivNPE() throws Exception { } refresh(); + indexRandomForConcurrentSearch("movavg_npe"); SearchResponse response = client().prepareSearch("movavg_npe") .addAggregation( @@ -680,6 +700,7 @@ public void testAvgMovavgDerivNPE() throws Exception { .get(); assertSearchResponse(response); + internalCluster().wipeIndices("movavg_npe"); } private void checkBucketKeyAndDocCount( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 85fe794b05fc6..1bd04cc13268f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -45,11 +48,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -61,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ExtendedStatsBucketIT extends OpenSearchIntegTestCase { +public class ExtendedStatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -72,6 +79,18 @@ public class ExtendedStatsBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public ExtendedStatsBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index bb7aa9514564a..ea6fcbd6a1560 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -32,13 +32,16 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.PipelineAggregatorBuilders; @@ -54,12 +57,16 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -72,7 +79,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MaxBucketIT extends OpenSearchIntegTestCase { +public class MaxBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -83,6 +90,18 @@ public class MaxBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public MaxBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); @@ -519,13 +538,13 @@ public void testNested() throws Exception { /** * https://github.com/elastic/elasticsearch/issues/33514 - * + * <p> * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. - * + * <p> * Applies to any pipeline agg, not just max. */ public void testFieldIsntWrittenOutTwice() throws Exception { @@ -585,7 +604,8 @@ public void testFieldIsntWrittenOutTwice() throws Exception { groupByLicenseAgg.subAggregation(peakPipelineAggBuilder); SearchResponse response = client().prepareSearch("foo_*").setSize(0).addAggregation(groupByLicenseAgg).get(); - BytesReference bytes = XContentHelper.toXContent(response, XContentType.JSON, false); - XContentHelper.convertToMap(bytes, false, XContentType.JSON); + BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(response, MediaTypeRegistry.JSON, false); + XContentHelper.convertToMap(bytes, false, MediaTypeRegistry.JSON); + internalCluster().wipeIndices("foo_*"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index 5f7e5e5174254..44d12436382f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -32,21 +32,28 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; +import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MinBucketIT extends OpenSearchIntegTestCase { +public class MinBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +76,18 @@ public class MinBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public MinBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index 91390edc7e872..d35b80b7918fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -40,13 +42,14 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.Client; import org.opensearch.common.collect.EvictingQueue; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -58,6 +61,7 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -72,7 +76,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MovAvgIT extends OpenSearchIntegTestCase { +public class MovAvgIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; private static final String VALUE_FIELD2 = "v_value2"; @@ -128,6 +132,18 @@ public String toString() { } } + public MovAvgIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx").setMapping( @@ -1146,7 +1162,7 @@ public void testHoltWintersMinimization() { * the default settings. Which means our mock histo will match the generated result (which it won't * if the minimizer is actually working, since the coefficients will be different and thus generate different * data) - * + * <p> * We can simulate this by setting the window size == size of histo */ public void testMinimizeNotEnoughData() { @@ -1298,6 +1314,7 @@ public void testPredictWithNonEmptyBuckets() throws Exception { .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, i).field(VALUE_FIELD2, 10).endObject()) ); } + indexRandomForConcurrentSearch("predict_non_empty"); bulkBuilder.get(); ensureSearchable(); @@ -1342,6 +1359,7 @@ public void testPredictWithNonEmptyBuckets() throws Exception { assertThat(movAvgAgg, nullValue()); } } + internalCluster().wipeIndices("predict_non_empty"); } private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java index 1da079781dc63..29cb334bfcd00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -43,14 +46,17 @@ import org.opensearch.search.aggregations.metrics.Percentile; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -62,7 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class PercentilesBucketIT extends OpenSearchIntegTestCase { +public class PercentilesBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; @@ -73,6 +79,18 @@ public class PercentilesBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public PercentilesBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index f5a5d025946ec..507bff51f0e39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -32,22 +32,29 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.collect.EvictingQueue; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -61,7 +68,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SerialDiffIT extends OpenSearchIntegTestCase { +public class SerialDiffIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -90,6 +97,18 @@ public String toString() { } } + public SerialDiffIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private ValuesSourceAggregationBuilder<? extends ValuesSourceAggregationBuilder<?>> randomMetric(String name, String field) { int rand = randomIntBetween(0, 3); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java index e9f34f6aa65d9..fbaf799871c8a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +45,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,10 +65,9 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class StatsBucketIT extends OpenSearchIntegTestCase { +public class StatsBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; static int interval; static int minRandomValue; @@ -69,6 +75,18 @@ public class StatsBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public StatsBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java index 5bd962017c247..a5967124ff921 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +45,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +65,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SumBucketIT extends OpenSearchIntegTestCase { +public class SumBucketIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +76,18 @@ public class SumBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public SumBucketIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 070a2bd7d6b7c..fb84134120e00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -8,13 +8,10 @@ package org.opensearch.search.backpressure; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.ActionListener; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.search.SearchShardTask; @@ -22,10 +19,14 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.search.backpressure.settings.NodeDuressSettings; @@ -34,14 +35,17 @@ import org.opensearch.search.backpressure.settings.SearchTaskSettings; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -50,16 +54,29 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchBackpressureIT extends OpenSearchIntegTestCase { +public class SearchBackpressureIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); private static final int MOVING_AVERAGE_WINDOW_SIZE = 10; + public SearchBackpressureIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { final List<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index 2b4aba029466a..ad1ce0582cfb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -32,8 +32,9 @@ package org.opensearch.search.basic; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; @@ -44,11 +45,14 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchService; import org.opensearch.test.OpenSearchIntegTestCase; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.After; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -56,7 +60,19 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchRedStateIndexIT extends OpenSearchIntegTestCase { +public class SearchRedStateIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchRedStateIndexIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; @@ -131,6 +147,7 @@ private void buildRedIndex(int numShards) throws Exception { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index 6099c5342a9d3..681f7081fa2dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -32,13 +32,20 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -46,7 +53,20 @@ * This test basically verifies that search with a single shard active (cause we indexed to it) and other * shards possibly not active at all (cause they haven't allocated) will still work. */ -public class SearchWhileCreatingIndexIT extends OpenSearchIntegTestCase { +public class SearchWhileCreatingIndexIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchWhileCreatingIndexIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testIndexCausesIndexCreation() throws Exception { searchWhileCreatingIndex(false, 1); // 1 replica in our default... } @@ -80,6 +100,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) } client().prepareIndex("test").setId(id).setSource("field", "test").get(); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java index 1f1384cc5f72d..f7b8b0df7dca7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -40,20 +42,36 @@ import org.opensearch.common.settings.Settings; import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; import static org.opensearch.test.hamcrest.OpenSearchAssertions.formatShardStatus; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchWhileRelocatingIT extends OpenSearchIntegTestCase { +public class SearchWhileRelocatingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchWhileRelocatingIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public void testSearchAndRelocateConcurrentlyRandomReplicas() throws Exception { testSearchAndRelocateConcurrently(randomIntBetween(0, 1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java index 0b55ea9119d89..614ec2ebd634a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java @@ -32,18 +32,18 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.tests.util.English; - import org.opensearch.OpenSearchException; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -55,6 +55,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.engine.ThrowingLeafReaderWrapper; @@ -66,9 +67,22 @@ import java.util.Random; import java.util.concurrent.ExecutionException; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class SearchWithRandomExceptionsIT extends OpenSearchIntegTestCase { +public class SearchWithRandomExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchWithRandomExceptionsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -81,16 +95,15 @@ protected boolean addMockInternalEngine() { } public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("test") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("test") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .toString(); final double lowLevelRate; final double topLevelRate; if (frequently()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java index b0adc00f37fee..b45b334fc1d1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -32,8 +32,9 @@ package org.opensearch.search.basic; -import org.apache.lucene.tests.util.English; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.tests.util.English; import org.opensearch.OpenSearchException; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -42,7 +43,6 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Requests; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; @@ -50,6 +50,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.store.MockFSDirectoryFactory; import org.opensearch.test.store.MockFSIndexStore; @@ -58,10 +59,23 @@ import java.util.Collection; import java.util.concurrent.ExecutionException; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -public class SearchWithRandomIOExceptionsIT extends OpenSearchIntegTestCase { +public class SearchWithRandomIOExceptionsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchWithRandomIOExceptionsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -69,16 +83,15 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { } public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("test") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("test") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .toString(); final double exceptionRate; final double exceptionOnOpenRate; if (frequently()) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java index 841821b5bbad6..0e337822ba0e7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -41,23 +43,40 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.client.Requests.clusterHealthRequest; import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.client.Requests.searchRequest; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class TransportSearchFailuresIT extends OpenSearchIntegTestCase { +public class TransportSearchFailuresIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public TransportSearchFailuresIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected int maximumNumberOfReplicas() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index 069559d6d11b1..a82b6f12755ca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -51,9 +53,11 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Set; import java.util.TreeSet; @@ -66,6 +70,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -74,7 +79,19 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class TransportTwoNodesSearchIT extends OpenSearchIntegTestCase { +public class TransportTwoNodesSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public TransportTwoNodesSearchIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected int numberOfReplicas() { @@ -105,6 +122,7 @@ private Set<String> prepareData(int numShards) throws Exception { fullExpectedIds.add(Integer.toString(i)); } refresh(); + indexRandomForConcurrentSearch("test"); return fullExpectedIds; } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index e9ea88b5945a3..13b4abb58b4df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -32,14 +32,17 @@ package org.opensearch.search.fetch; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,12 +51,13 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -64,11 +68,25 @@ import static java.util.Collections.singletonList; import static org.opensearch.client.Requests.indexRequest; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.CoreMatchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2) -public class FetchSubPhasePluginIT extends OpenSearchIntegTestCase { +public class FetchSubPhasePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FetchSubPhasePluginIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); @@ -95,6 +113,7 @@ public void testPlugin() throws Exception { .actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setSource(new SearchSourceBuilder().ext(Collections.singletonList(new TermVectorsFetchBuilder("test")))) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index 5ae17e84de135..b743c00bf4549 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.fetch.subphase; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.index.IndexRequestBuilder; @@ -54,8 +56,8 @@ import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -72,6 +74,7 @@ import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -84,7 +87,19 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class InnerHitsIT extends OpenSearchIntegTestCase { +public class InnerHitsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public InnerHitsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -876,6 +891,7 @@ public void testNestedSource() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("index1"); // the field name (comments.message) used for source filtering should be the same as when using that field for // other features (like in the query dsl or aggs) in order for consistency: @@ -952,6 +968,7 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { client().prepareIndex("index1").setId("1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); client().prepareIndex("index2").setId("3").setSource("key", "value").get(); refresh(); + indexRandomForConcurrentSearch("index1", "index2"); SearchResponse response = client().prepareSearch("index1", "index2") .setQuery( @@ -981,6 +998,7 @@ public void testUseMaxDocInsteadOfSize() throws Exception { .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); QueryBuilder query = nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1) ); @@ -998,6 +1016,7 @@ public void testTooHighResultWindow() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("index2"); SearchResponse response = client().prepareSearch("index2") .setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index d83f1eb776b20..a1adc6f99b92a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -32,16 +32,22 @@ package org.opensearch.search.fetch.subphase; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.constantScoreQuery; @@ -52,11 +58,27 @@ import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.termsQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.hasKey; + +public class MatchedQueriesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MatchedQueriesIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } -public class MatchedQueriesIT extends OpenSearchIntegTestCase { public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); @@ -65,6 +87,7 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test2", "number", 2).get(); client().prepareIndex("test").setId("3").setSource("name", "test3", "number", 3).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -74,15 +97,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .should(rangeQuery("number").gte(2).queryName("test2")) ) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("3") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -92,15 +118,18 @@ public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { .setQuery( boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2")) ) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test1")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test1")); + assertThat(hit.getMatchedQueryScore("test1"), equalTo(1f)); } else if (hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("test2")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("test2")); + assertThat(hit.getMatchedQueryScore("test2"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -115,6 +144,7 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { client().prepareIndex("test").setId("2").setSource("name", "test").get(); client().prepareIndex("test").setId("3").setSource("name", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -125,12 +155,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -146,12 +179,15 @@ public void testSimpleMatchedQueryFromTopLevelFilter() throws Exception { assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else if (hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -166,6 +202,7 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex client().prepareIndex("test").setId("2").setSource("name", "test", "title", "title2").get(); client().prepareIndex("test").setId("3").setSource("name", "test", "title", "title3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(boolQuery().must(matchAllQuery()).filter(termsQuery("title", "title1", "title2", "title3").queryName("title"))) @@ -174,9 +211,11 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex assertHitCount(searchResponse, 3L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1") || hit.getId().equals("2") || hit.getId().equals("3")) { - assertThat(hit.getMatchedQueries().length, equalTo(2)); - assertThat(hit.getMatchedQueries(), hasItemInArray("name")); - assertThat(hit.getMatchedQueries(), hasItemInArray("title")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(2)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("name")); + assertThat(hit.getMatchedQueryScore("name"), greaterThan(0f)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("title")); + assertThat(hit.getMatchedQueryScore("title"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -198,56 +237,63 @@ public void testSimpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Ex } } - public void testRegExpQuerySupportsName() { + public void testRegExpQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.regexpQuery("title", "title1").queryName("regex")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("regex")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("regex")); + assertThat(hit.getMatchedQueryScore("regex"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } } } - public void testPrefixQuerySupportsName() { + public void testPrefixQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); - SearchResponse searchResponse = client().prepareSearch() + var query = client().prepareSearch() .setQuery(QueryBuilders.prefixQuery("title", "title").queryName("prefix")) - .get(); + .setIncludeNamedQueriesScore(true); + var searchResponse = query.get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("prefix")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("prefix")); + assertThat(hit.getMatchedQueryScore("prefix"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } } } - public void testFuzzyQuerySupportsName() { + public void testFuzzyQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.fuzzyQuery("title", "titel1").queryName("fuzzy")) @@ -256,42 +302,47 @@ public void testFuzzyQuerySupportsName() { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("fuzzy")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("fuzzy")); + assertThat(hit.getMatchedQueryScore("fuzzy"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } } } - public void testWildcardQuerySupportsName() { + public void testWildcardQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.wildcardQuery("title", "titl*").queryName("wildcard")) + .setIncludeNamedQueriesScore(true) .get(); assertHitCount(searchResponse, 1L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("wildcard")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("wildcard")); + assertThat(hit.getMatchedQueryScore("wildcard"), equalTo(1f)); } else { fail("Unexpected document returned with id " + hit.getId()); } } } - public void testSpanFirstQuerySupportsName() { + public void testSpanFirstQuerySupportsName() throws InterruptedException { createIndex("test1"); ensureGreen(); client().prepareIndex("test1").setId("1").setSource("title", "title1 title2").get(); refresh(); + indexRandomForConcurrentSearch("test1"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.spanFirstQuery(QueryBuilders.spanTermQuery("title", "title1"), 10).queryName("span")) @@ -300,8 +351,9 @@ public void testSpanFirstQuerySupportsName() { for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("span")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("span")); + assertThat(hit.getMatchedQueryScore("span"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -318,6 +370,7 @@ public void testMatchedWithShould() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); client().prepareIndex("test").setId("2").setSource("content", "consectetur adipisicing elit").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Execute search at least two times to load it in cache int iter = scaledRandomIntBetween(2, 10); @@ -334,11 +387,13 @@ public void testMatchedWithShould() throws Exception { assertHitCount(searchResponse, 2L); for (SearchHit hit : searchResponse.getHits()) { if (hit.getId().equals("1")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("dolor")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("dolor")); + assertThat(hit.getMatchedQueryScore("dolor"), greaterThan(0f)); } else if (hit.getId().equals("2")) { - assertThat(hit.getMatchedQueries().length, equalTo(1)); - assertThat(hit.getMatchedQueries(), hasItemInArray("elit")); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("elit")); + assertThat(hit.getMatchedQueryScore("elit"), greaterThan(0f)); } else { fail("Unexpected document returned with id " + hit.getId()); } @@ -352,16 +407,20 @@ public void testMatchedWithWrapperQuery() throws Exception { client().prepareIndex("test").setId("1").setSource("content", "Lorem ipsum dolor sit amet").get(); refresh(); + indexRandomForConcurrentSearch("test"); MatchQueryBuilder matchQueryBuilder = matchQuery("content", "amet").queryName("abc"); - BytesReference matchBytes = XContentHelper.toXContent(matchQueryBuilder, XContentType.JSON, false); + BytesReference matchBytes = XContentHelper.toXContent(matchQueryBuilder, MediaTypeRegistry.JSON, false); TermQueryBuilder termQueryBuilder = termQuery("content", "amet").queryName("abc"); - BytesReference termBytes = XContentHelper.toXContent(termQueryBuilder, XContentType.JSON, false); + BytesReference termBytes = XContentHelper.toXContent(termQueryBuilder, MediaTypeRegistry.JSON, false); QueryBuilder[] queries = new QueryBuilder[] { wrapperQuery(matchBytes), constantScoreQuery(wrapperQuery(termBytes)) }; for (QueryBuilder query : queries) { SearchResponse searchResponse = client().prepareSearch().setQuery(query).get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("abc")); + SearchHit hit = searchResponse.getHits().getAt(0); + assertThat(hit.getMatchedQueriesAndScores().size(), equalTo(1)); + assertThat(hit.getMatchedQueriesAndScores(), hasKey("abc")); + assertThat(hit.getMatchedQueryScore("abc"), greaterThan(0f)); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index 7df5b9b88a69c..66cbf36137551 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -31,12 +31,15 @@ package org.opensearch.search.fetch.subphase.highlight; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -45,6 +48,7 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHighlight; import static org.hamcrest.Matchers.equalTo; @@ -52,7 +56,19 @@ * Integration test for highlighters registered by a plugin. */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class CustomHighlighterSearchIT extends OpenSearchIntegTestCase { +public class CustomHighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public CustomHighlighterSearchIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index f2a22b99a86a3..f449a91a57279 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -31,26 +31,29 @@ package org.opensearch.search.fetch.subphase.highlight; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockAnalyzer; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.search.join.ScoreMode; - +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.analysis.MockTokenizer; +import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; -import org.opensearch.common.Strings; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.time.DateFormatter; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.AbstractIndexAnalyzerProvider; import org.opensearch.index.analysis.AnalyzerProvider; import org.opensearch.index.analysis.PreConfiguredTokenFilter; @@ -65,17 +68,15 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.BoundaryScannerType; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.Field; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockKeywordPlugin; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -109,6 +110,7 @@ import static org.opensearch.index.query.QueryBuilders.regexpQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.highlight; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -127,16 +129,31 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class HighlighterSearchIT extends OpenSearchIntegTestCase { +// Higher timeout to accommodate large number of tests in this class. See https://github.com/opensearch-project/OpenSearch/issues/12119 +@TimeoutSuite(millis = 35 * TimeUnits.MINUTE) +public class HighlighterSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + // TODO as we move analyzers out of the core we need to move some of these into HighlighterWithAnalyzersTests private static final String[] ALL_TYPES = new String[] { "plain", "fvh", "unified" }; + public HighlighterSearchIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); } - public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException { + public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties") @@ -158,6 +175,7 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { SearchResponse search = client().prepareSearch() @@ -171,12 +189,13 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio } } - public void testHighlightingWithStoredKeyword() throws IOException { + public void testHighlightingWithStoredKeyword() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties").startObject("text").field("type", "keyword").field("store", true).endObject().endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); + indexRandomForConcurrentSearch("test"); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); SearchResponse search = client().prepareSearch() @@ -186,7 +205,7 @@ public void testHighlightingWithStoredKeyword() throws IOException { assertHighlight(search, 0, "text", 0, equalTo("<em>foo</em>")); } - public void testHighlightingWithWildcardName() throws IOException { + public void testHighlightingWithWildcardName() throws IOException, InterruptedException { // test the kibana case with * as fieldname that will try highlight all fields including meta fields XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -202,6 +221,7 @@ public void testHighlightingWithWildcardName() throws IOException { assertAcked(prepareCreate("test").setMapping(mappings)); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -211,7 +231,7 @@ public void testHighlightingWithWildcardName() throws IOException { } } - public void testFieldAlias() throws IOException { + public void testFieldAlias() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -229,7 +249,7 @@ public void testFieldAlias() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -238,7 +258,7 @@ public void testFieldAlias() throws IOException { } } - public void testFieldAliasWithSourceLookup() throws IOException { + public void testFieldAliasWithSourceLookup() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -257,7 +277,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -266,7 +286,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { } } - public void testFieldAliasWithWildcardField() throws IOException { + public void testFieldAliasWithWildcardField() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("keyword") @@ -282,13 +302,14 @@ public void testFieldAliasWithWildcardField() throws IOException { client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); + indexRandomForConcurrentSearch("test"); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); SearchResponse search = client().prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); assertHighlight(search, 0, "alias", 0, equalTo("<em>foo</em>")); } - public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { + public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("_source") @@ -315,6 +336,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -331,7 +353,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc } // see #3486 - public void testHighTermFrequencyDoc() throws IOException { + public void testHighTermFrequencyDoc() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping("name", "type=text,term_vector=with_positions_offsets,store=" + randomBoolean())); StringBuilder builder = new StringBuilder(); for (int i = 0; i < 6000; i++) { @@ -339,6 +361,7 @@ public void testHighTermFrequencyDoc() throws IOException { } client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("name", "abc"))) .highlighter(new HighlightBuilder().field("name")) @@ -366,6 +389,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) @@ -652,7 +676,7 @@ public void testHighlightIssue1994() throws Exception { assertHighlight(search, 0, "titleTV", 1, 2, equalTo("<em>highlight</em> other text")); } - public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { + public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() throws InterruptedException { createIndex("test"); ensureGreen(); @@ -665,6 +689,7 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -715,6 +740,7 @@ public void testHighlightingOnWildcardFields() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field*"); SearchSourceBuilder source = searchSource() @@ -764,6 +790,7 @@ public void testForceSourceWithSourceDisabled() throws Exception { .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content") .get(); refresh(); + indexRandomForConcurrentSearch("test"); // works using stored field SearchResponse searchResponse = client().prepareSearch("test") @@ -804,6 +831,7 @@ public void testPlainHighlighter() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -1006,6 +1034,7 @@ public void testFVHManyMatches() throws Exception { String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); client().prepareIndex("test").setSource("field1", value).get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) @@ -1097,6 +1126,7 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception ); index("test", "type1", "3", "foo", "weird", "bar", "result"); refresh(); + indexRandomForConcurrentSearch("test"); Field fooField = new Field("foo").numOfFragments(1) .order("score") @@ -1389,6 +1419,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1434,6 +1465,7 @@ public void testMultiMapperVectorFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1479,6 +1511,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1523,6 +1556,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1552,6 +1586,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "this is a test")) @@ -1589,6 +1624,7 @@ public void testDisableFastVectorHighlighter() throws Exception { .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "test for the workaround")) @@ -1650,6 +1686,7 @@ public void testFSHHighlightAllMvFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "tag")) @@ -1667,11 +1704,12 @@ public void testFSHHighlightAllMvFragments() throws Exception { ); } - public void testBoostingQuery() { + public void testBoostingQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1683,11 +1721,12 @@ public void testBoostingQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog")); } - public void testBoostingQueryTermVector() throws IOException { + public void testBoostingQueryTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1699,12 +1738,13 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { + public void testCommonTermsQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -1714,12 +1754,13 @@ public void testCommonTermsQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog")); } - public void testCommonTermsTermVector() throws IOException { + public void testCommonTermsTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) .highlighter(highlight().field("field2").order("score").preTags("<x>").postTags("</x>")); @@ -1745,6 +1786,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) @@ -1797,12 +1839,13 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ); } - public void testPlainHighlighterMultipleFields() { + public void testPlainHighlighterMultipleFields() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1815,7 +1858,7 @@ public void testPlainHighlighterMultipleFields() { assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>")); } - public void testFastVectorHighlighterMultipleFields() { + public void testFastVectorHighlighterMultipleFields() throws InterruptedException { assertAcked( prepareCreate("test").setMapping( "field1", @@ -1828,6 +1871,7 @@ public void testFastVectorHighlighterMultipleFields() { index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1845,6 +1889,7 @@ public void testMissingStoredField() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); // This query used to fail when the field to highlight was absent SearchResponse response = client().prepareSearch("test") @@ -1885,6 +1930,7 @@ public void testNumericHighlighting() throws Exception { .setSource("text", "opensearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1907,6 +1953,7 @@ public void testResetTwice() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("text", "opensearch test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1916,7 +1963,7 @@ public void testResetTwice() throws Exception { assertHitCount(response, 1L); } - public void testHighlightUsesHighlightQuery() throws IOException { + public void testHighlightUsesHighlightQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1927,6 +1974,7 @@ public void testHighlightUsesHighlightQuery() throws IOException { index("test", "type1", "1", "text", "Testing the highlight query feature"); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder.Field field = new HighlightBuilder.Field("text"); @@ -1962,7 +2010,7 @@ private static String randomStoreField() { return ""; } - public void testHighlightNoMatchSize() throws IOException { + public void testHighlightNoMatchSize() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1974,6 +2022,7 @@ public void testHighlightNoMatchSize() throws IOException { String text = "I am pretty long so some of me should get cut off. Second sentence"; index("test", "type1", "1", "text", text); refresh(); + indexRandomForConcurrentSearch("test"); // When you don't set noMatchSize you don't get any results if there isn't anything to highlight. HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21).numOfFragments(1).highlighterType("plain"); @@ -2072,7 +2121,7 @@ public void testHighlightNoMatchSize() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { + public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -2085,6 +2134,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { String text2 = "I am short"; index("test", "type1", "1", "text", new String[] { text1, text2 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21) @@ -2167,7 +2217,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { + public void testHighlightNoMatchSizeNumberOfFragments() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -2181,6 +2231,7 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { String text3 = "This is the fifth sentence"; index("test", "type1", "1", "text", new String[] { text1, text2, text3 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(1) @@ -2224,6 +2275,7 @@ public void testPostingsHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -2301,6 +2353,7 @@ public void testPostingsHighlighterMultipleFields() throws Exception { "The <b>slow<b> brown fox. Second sentence." ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -2325,6 +2378,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) @@ -2357,6 +2411,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("<field1>").postTags("</field1>"))); @@ -2393,7 +2448,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { } } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws IOException, InterruptedException { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") @@ -2415,6 +2470,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") .get(); refresh(); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), ALL_TYPES); @@ -2460,6 +2516,7 @@ public void testPostingsHighlighterOrderByScore() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) @@ -2546,6 +2603,7 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2604,6 +2662,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2653,13 +2712,14 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { assertNoFailures(search); } - public void testPostingsHighlighterBoostingQuery() throws IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test") .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2670,7 +2730,7 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); @@ -2678,6 +2738,7 @@ public void testPostingsHighlighterCommonTermsQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -2719,6 +2780,7 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")).highlighter(highlight().field("field2")); @@ -2741,6 +2803,7 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")).highlighter(highlight().field("field2")); @@ -2764,6 +2827,7 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")).highlighter(highlight().field("field2")); @@ -2787,6 +2851,7 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")).highlighter(highlight().field("field2")); @@ -2821,6 +2886,7 @@ public void testPostingsHighlighterTermRangeQuery() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) @@ -2838,6 +2904,7 @@ public void testPostingsHighlighterQueryString() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) @@ -2859,6 +2926,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) @@ -2873,6 +2941,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2890,6 +2959,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2905,6 +2975,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -3009,7 +3080,7 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { * because it doesn't support the concept of terms having a different weight based on position. * @param highlighterType highlighter to test */ - private void phraseBoostTestCase(String highlighterType) { + private void phraseBoostTestCase(String highlighterType) throws InterruptedException { ensureGreen(); StringBuilder text = new StringBuilder(); text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n"); @@ -3022,6 +3093,7 @@ private void phraseBoostTestCase(String highlighterType) { } index("test", "type1", "1", "field1", text.toString()); refresh(); + indexRandomForConcurrentSearch("test"); // Match queries phraseBoostTestCaseForClauses( @@ -3090,7 +3162,7 @@ private <P extends AbstractQueryBuilder<P>> void phraseBoostTestCaseForClauses( assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher); } - public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException { + public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException, InterruptedException { // check that we do not get an exception for geo_point fields in case someone tries to highlight // it accidentially with a wildcard // see https://github.com/elastic/elasticsearch/issues/17537 @@ -3114,6 +3186,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); String highlighterType = randomFrom(ALL_TYPES); QueryBuilder query = QueryBuilders.boolQuery() .should( @@ -3131,7 +3204,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); } - public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { + public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException, InterruptedException { // same as above but in this example the query gets rewritten during highlighting // see https://github.com/elastic/elasticsearch/issues/17537#issuecomment-244939633 XContentBuilder mappings = jsonBuilder(); @@ -3158,6 +3231,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); QueryBuilder query = QueryBuilders.functionScoreQuery( QueryBuilders.boolQuery() @@ -3173,7 +3247,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertThat(search.getHits().getTotalHits().value, equalTo(1L)); } - public void testKeywordFieldHighlighting() throws IOException { + public void testKeywordFieldHighlighting() throws IOException, InterruptedException { // check that keyword highlighting works XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -3186,6 +3260,7 @@ public void testKeywordFieldHighlighting() throws IOException { .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) @@ -3219,6 +3294,7 @@ public void testCopyToFields() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchQuery("foo_copy", "brown")) .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) @@ -3231,26 +3307,25 @@ public void testCopyToFields() throws Exception { } public void testACopyFieldWithNestedQuery() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("foo") - .field("type", "nested") - .startObject("properties") - .startObject("text") - .field("type", "text") - .field("copy_to", "foo_text") - .endObject() - .endObject() - .endObject() - .startObject("foo_text") - .field("type", "text") - .field("term_vector", "with_positions_offsets") - .field("store", true) - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("foo") + .field("type", "nested") + .startObject("properties") + .startObject("text") + .field("type", "text") + .field("copy_to", "foo_text") + .endObject() + .endObject() + .endObject() + .startObject("foo_text") + .field("type", "text") + .field("term_vector", "with_positions_offsets") + .field("store", true) + .endObject() + .endObject() + .endObject() + .toString(); prepareCreate("test").setMapping(mapping).get(); client().prepareIndex("test") @@ -3269,7 +3344,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) @@ -3287,6 +3362,7 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) @@ -3304,6 +3380,7 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder.FilterFunctionBuilder filterBuilder = new FunctionScoreQueryBuilder.FilterFunctionBuilder( QueryBuilders.termQuery("enable", "yes"), new RandomScoreFunctionBuilder() @@ -3361,25 +3438,24 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { } public void testWithNestedQuery() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("text") - .field("type", "text") - .field("index_options", "offsets") - .field("term_vector", "with_positions_offsets") - .endObject() - .startObject("foo") - .field("type", "nested") - .startObject("properties") - .startObject("text") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("text") + .field("type", "text") + .field("index_options", "offsets") + .field("term_vector", "with_positions_offsets") + .endObject() + .startObject("foo") + .field("type", "nested") + .startObject("properties") + .startObject("text") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); prepareCreate("test").setMapping(mapping).get(); client().prepareIndex("test") @@ -3399,6 +3475,7 @@ public void testWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String type : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3456,6 +3533,7 @@ public void testWithNormalizer() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3478,6 +3556,7 @@ public void testDisableHighlightIdField() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "plain", "unified" }) { SearchResponse searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index 2d86c630e30e1..4d398f8ca09cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -32,15 +32,18 @@ package org.opensearch.search.fieldcaps; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.fieldcaps.FieldCapabilities; import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -52,9 +55,22 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class FieldCapabilitiesIT extends OpenSearchIntegTestCase { +public class FieldCapabilitiesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FieldCapabilitiesIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Before public void setUp() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 90d0a59f7b58d..906d45ef84b3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -32,27 +32,28 @@ package org.opensearch.search.fields; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Numbers; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -60,9 +61,8 @@ import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.lookup.FieldLookup; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -90,6 +90,7 @@ import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -102,7 +103,19 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class SearchFieldsIT extends OpenSearchIntegTestCase { +public class SearchFieldsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchFieldsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -201,29 +214,28 @@ static Object docScript(Map<String, Object> vars, String fieldName) { public void testStoredFields() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("field1") - .field("type", "text") - .field("store", true) - .endObject() - .startObject("field2") - .field("type", "text") - .field("store", false) - .endObject() - .startObject("field3") - .field("type", "text") - .field("store", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("field1") + .field("type", "text") + .field("store", true) + .endObject() + .startObject("field2") + .field("type", "text") + .field("store", false) + .endObject() + .startObject("field3") + .field("type", "text") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); client().prepareIndex("test") .setId("1") @@ -233,6 +245,7 @@ public void testStoredFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -304,21 +317,20 @@ public void testStoredFields() throws Exception { public void testScriptDocAndFields() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("num1") - .field("type", "double") - .field("store", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("num1") + .field("type", "double") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); client().prepareIndex("test") .setId("1") @@ -341,6 +353,7 @@ public void testScriptDocAndFields() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value"); SearchResponse response = client().prepareSearch() @@ -406,21 +419,20 @@ public void testScriptDocAndFields() throws Exception { public void testScriptWithUnsignedLong() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("unsigned_num1") - .field("type", "unsigned_long") - .field("store", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("unsigned_num1") + .field("type", "unsigned_long") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); client().prepareIndex("test") .setId("1") @@ -442,6 +454,7 @@ public void testScriptWithUnsignedLong() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -509,20 +522,19 @@ public void testScriptWithUnsignedLong() throws Exception { public void testScriptFieldWithNanos() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("date") - .field("type", "date_nanos") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("date") + .field("type", "date_nanos") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); String date = "2019-01-31T10:00:00.123456789Z"; indexRandom( true, @@ -532,6 +544,7 @@ public void testScriptFieldWithNanos() throws Exception { .setSource(jsonBuilder().startObject().field("date", "1970-01-01T00:00:00.000Z").endObject()), client().prepareIndex("test").setId("2").setSource(jsonBuilder().startObject().field("date", date).endObject()) ); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -617,6 +630,7 @@ public void testScriptFieldUsingSource() throws Exception { ) .get(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -659,6 +673,7 @@ public void testScriptFieldUsingSource() throws Exception { public void testScriptFieldsForNullReturn() throws Exception { client().prepareIndex("test").setId("1").setSource("foo", "bar").setRefreshPolicy("true").get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) .addScriptField("test_script_1", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap())) @@ -706,60 +721,59 @@ public void testPartialFields() throws Exception { public void testStoredFieldsWithoutSource() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("_source") - .field("enabled", false) - .endObject() - .startObject("properties") - .startObject("byte_field") - .field("type", "byte") - .field("store", true) - .endObject() - .startObject("short_field") - .field("type", "short") - .field("store", true) - .endObject() - .startObject("integer_field") - .field("type", "integer") - .field("store", true) - .endObject() - .startObject("long_field") - .field("type", "long") - .field("store", true) - .endObject() - .startObject("float_field") - .field("type", "float") - .field("store", true) - .endObject() - .startObject("double_field") - .field("type", "double") - .field("store", true) - .endObject() - .startObject("date_field") - .field("type", "date") - .field("store", true) - .endObject() - .startObject("boolean_field") - .field("type", "boolean") - .field("store", true) - .endObject() - .startObject("binary_field") - .field("type", "binary") - .field("store", true) - .endObject() - .startObject("unsigned_long_field") - .field("type", "unsigned_long") - .field("store", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("_source") + .field("enabled", false) + .endObject() + .startObject("properties") + .startObject("byte_field") + .field("type", "byte") + .field("store", true) + .endObject() + .startObject("short_field") + .field("type", "short") + .field("store", true) + .endObject() + .startObject("integer_field") + .field("type", "integer") + .field("store", true) + .endObject() + .startObject("long_field") + .field("type", "long") + .field("store", true) + .endObject() + .startObject("float_field") + .field("type", "float") + .field("store", true) + .endObject() + .startObject("double_field") + .field("type", "double") + .field("store", true) + .endObject() + .startObject("date_field") + .field("type", "date") + .field("store", true) + .endObject() + .startObject("boolean_field") + .field("type", "boolean") + .field("store", true) + .endObject() + .startObject("binary_field") + .field("type", "binary") + .field("store", true) + .endObject() + .startObject("unsigned_long_field") + .field("type", "unsigned_long") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); client().prepareIndex("test") @@ -781,6 +795,7 @@ public void testStoredFieldsWithoutSource() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -838,6 +853,7 @@ public void testSearchFieldsMetadata() throws Exception { .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); SearchResponse searchResponse = client().prepareSearch("my-index").addStoredField("field1").addStoredField("_routing").get(); @@ -852,6 +868,7 @@ public void testSearchFieldsNonLeafField() throws Exception { .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject()) .setRefreshPolicy(IMMEDIATE) .get(); + indexRandomForConcurrentSearch("my-index"); assertFailures( client().prepareSearch("my-index").addStoredField("field1"), @@ -917,7 +934,8 @@ public void testGetFieldsComplexField() throws Exception { .endObject() ); - client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, XContentType.JSON).get(); + client().prepareIndex("my-index").setId("1").setRefreshPolicy(IMMEDIATE).setSource(source, MediaTypeRegistry.JSON).get(); + indexRandomForConcurrentSearch("my-index"); String field = "field1.field2.field3.field4"; @@ -944,61 +962,60 @@ public void testSingleValueFieldDatatField() throws ExecutionException, Interrup public void testDocValueFields() throws Exception { createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("_source") - .field("enabled", false) - .endObject() - .startObject("properties") - .startObject("text_field") - .field("type", "text") - .field("fielddata", true) - .endObject() - .startObject("keyword_field") - .field("type", "keyword") - .endObject() - .startObject("byte_field") - .field("type", "byte") - .endObject() - .startObject("short_field") - .field("type", "short") - .endObject() - .startObject("integer_field") - .field("type", "integer") - .endObject() - .startObject("long_field") - .field("type", "long") - .endObject() - .startObject("float_field") - .field("type", "float") - .endObject() - .startObject("double_field") - .field("type", "double") - .endObject() - .startObject("date_field") - .field("type", "date") - .endObject() - .startObject("boolean_field") - .field("type", "boolean") - .endObject() - .startObject("binary_field") - .field("type", "binary") - .field("doc_values", true) // off by default on binary fields - .endObject() - .startObject("ip_field") - .field("type", "ip") - .endObject() - .startObject("flat_object_field") - .field("type", "flat_object") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("_source") + .field("enabled", false) + .endObject() + .startObject("properties") + .startObject("text_field") + .field("type", "text") + .field("fielddata", true) + .endObject() + .startObject("keyword_field") + .field("type", "keyword") + .endObject() + .startObject("byte_field") + .field("type", "byte") + .endObject() + .startObject("short_field") + .field("type", "short") + .endObject() + .startObject("integer_field") + .field("type", "integer") + .endObject() + .startObject("long_field") + .field("type", "long") + .endObject() + .startObject("float_field") + .field("type", "float") + .endObject() + .startObject("double_field") + .field("type", "double") + .endObject() + .startObject("date_field") + .field("type", "date") + .endObject() + .startObject("boolean_field") + .field("type", "boolean") + .endObject() + .startObject("binary_field") + .field("type", "binary") + .field("doc_values", true) // off by default on binary fields + .endObject() + .startObject("ip_field") + .field("type", "ip") + .endObject() + .startObject("flat_object_field") + .field("type", "flat_object") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); - client().admin().indices().preparePutMapping().setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping().setSource(mapping, MediaTypeRegistry.JSON).get(); ZonedDateTime date = ZonedDateTime.of(2012, 3, 22, 0, 0, 0, 0, ZoneOffset.UTC); client().prepareIndex("test") @@ -1026,6 +1043,7 @@ public void testDocValueFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1067,9 +1085,6 @@ public void testDocValueFields() throws Exception { ) ) ); - String json = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("flat_object_field").field("foo", "bar").endObject().endObject() - ); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); @@ -1261,6 +1276,7 @@ public void testScriptFields() throws Exception { ); } indexRandom(true, reqs); + indexRandomForConcurrentSearch("index"); ensureSearchable(); SearchRequestBuilder req = client().prepareSearch("index"); for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) { @@ -1316,6 +1332,7 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1377,6 +1394,7 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1430,6 +1448,7 @@ public void testStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1472,6 +1491,7 @@ public void testWildcardStoredFieldsWithFieldAlias() throws Exception { index("test", MapperService.SINGLE_MAPPING_NAME, "1", "field1", "value1", "field2", "value2"); refresh("test"); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").get(); assertHitCount(searchResponse, 1L); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index 0a03fb6404742..0380b3c7ddb89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -32,13 +32,15 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; @@ -53,12 +55,14 @@ import org.opensearch.search.MultiValueMode; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Locale; @@ -72,6 +76,7 @@ import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -85,7 +90,19 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -public class DecayFunctionScoreIT extends OpenSearchIntegTestCase { +public class DecayFunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public DecayFunctionScoreIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected boolean forbidPrivateIndexSettings() { @@ -375,6 +392,7 @@ public void testBoostModeSettingWorks() throws Exception { ) ); indexRandom(true, false, indexBuilders); // force no dummy docs + indexRandomForConcurrentSearch("test"); // Test Gauss List<Float> lonlat = new ArrayList<>(); @@ -459,6 +477,7 @@ public void testParseGeoPoint() throws Exception { constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(randomIntBetween(1, 10)) ); + indexRandomForConcurrentSearch("test"); GeoPoint point = new GeoPoint(20, 11); ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -512,6 +531,7 @@ public void testCombineModes() throws Exception { .setRefreshPolicy(IMMEDIATE) .setSource(jsonBuilder().startObject().field("test", "value value").field("num", 1.0).endObject()) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder baseQuery = functionScoreQuery( constantScoreQuery(termQuery("test", "value")), ScoreFunctionBuilders.weightFactorFunction(2) @@ -631,6 +651,7 @@ public void testCombineModesExplain() throws Exception { constantScoreQuery(termQuery("test", "value")).queryName("query1"), ScoreFunctionBuilders.weightFactorFunction(2, "weight1") ); + indexRandomForConcurrentSearch("test"); // decay score should return 0.5 for this function and baseQuery should return 2.0f as it's score ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -739,6 +760,7 @@ public void testParseDateMath() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse sr = client().search( searchRequest().source( searchSource().query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))) @@ -794,6 +816,7 @@ public void testValueMissingLin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -870,6 +893,7 @@ public void testDateWithoutOrigin() throws Exception { ).actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -951,6 +975,7 @@ public void testManyDocsLin() throws Exception { List<Float> lonlat = new ArrayList<>(); lonlat.add(100f); lonlat.add(110f); + indexRandomForConcurrentSearch("test"); ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) .source( @@ -1084,6 +1109,7 @@ public void testNoQueryGiven() throws Exception { client().index(indexRequest("test").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject())) .actionGet(); refresh(); + indexRandomForConcurrentSearch("test"); // so, we indexed a string field, but now we try to score a num field ActionFuture<SearchResponse> response = client().search( searchRequest().searchType(SearchType.QUERY_THEN_FETCH) @@ -1148,6 +1174,7 @@ public void testMultiFieldOptions() throws Exception { ); indexRandom(true, doc1, doc2); + indexRandomForConcurrentSearch("test"); ActionFuture<SearchResponse> response = client().search(searchRequest().source(searchSource().query(baseQuery))); SearchResponse sr = response.actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java index 3651a7354e5de..0573dcfc4863d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java @@ -32,8 +32,11 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; @@ -52,9 +55,9 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -72,13 +75,26 @@ import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class ExplainableScriptIT extends OpenSearchIntegTestCase { +public class ExplainableScriptIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ExplainableScriptIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public static class ExplainableScriptPlugin extends Plugin implements ScriptPlugin { @Override @@ -93,7 +109,7 @@ public String getType() { public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) { assert scriptSource.equals("explainable_script"); assert context == ScoreScript.CONTEXT; - ScoreScript.Factory factory = (params1, lookup) -> new ScoreScript.LeafFactory() { + ScoreScript.Factory factory = (params1, lookup, indexSearcher) -> new ScoreScript.LeafFactory() { @Override public boolean needs_score() { return false; @@ -101,7 +117,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - return new MyScript(params1, lookup, ctx); + return new MyScript(params1, lookup, indexSearcher, ctx); } }; return context.factoryClazz.cast(factory); @@ -117,8 +133,8 @@ public Set<ScriptContext<?>> getSupportedContexts() { static class MyScript extends ScoreScript implements ExplainableScoreScript { - MyScript(Map<String, Object> params, SearchLookup lookup, LeafReaderContext leafContext) { - super(params, lookup, leafContext); + MyScript(Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher, LeafReaderContext leafContext) { + super(params, lookup, indexSearcher, leafContext); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index 4e1df591cb245..6956833cf6d62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -32,30 +32,49 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; +import org.opensearch.common.settings.Settings; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.arrayWithSize; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; /** * Tests for the {@code field_value_factor} function in a function_score query. */ -public class FunctionScoreFieldValueIT extends OpenSearchIntegTestCase { - public void testFieldValueFactor() throws IOException { +public class FunctionScoreFieldValueIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FunctionScoreFieldValueIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testFieldValueFactor() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -74,8 +93,8 @@ public void testFieldValueFactor() throws IOException { client().prepareIndex("test").setId("1").setSource("test", 5, "body", "foo").get(); client().prepareIndex("test").setId("2").setSource("test", 17, "body", "foo").get(); client().prepareIndex("test").setId("3").setSource("body", "bar").get(); - refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 SearchResponse response = client().prepareSearch("test") @@ -164,7 +183,7 @@ public void testFieldValueFactor() throws IOException { } } - public void testFieldValueFactorExplain() throws IOException { + public void testFieldValueFactorExplain() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -185,6 +204,7 @@ public void testFieldValueFactorExplain() throws IOException { client().prepareIndex("test").setId("3").setSource("body", "bar").get(); refresh(); + indexRandomForConcurrentSearch("test"); // document 2 scores higher because 17 > 5 final String functionName = "func1"; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 3d24933f66d17..4f267f0059291 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; @@ -45,11 +48,12 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -63,6 +67,7 @@ import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -72,11 +77,23 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class FunctionScoreIT extends OpenSearchIntegTestCase { +public class FunctionScoreIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { static final String TYPE = "type"; static final String INDEX = "index"; + public FunctionScoreIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -103,10 +120,11 @@ protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() { } } - public void testScriptScoresNested() throws IOException { + public void testScriptScoresNested() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script scriptOne = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "1", Collections.emptyMap()); Script scriptTwo = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -125,10 +143,11 @@ public void testScriptScoresNested() throws IOException { assertThat(response.getHits().getAt(0).getScore(), equalTo(1.0f)); } - public void testScriptScoresWithAgg() throws IOException { + public void testScriptScoresWithAgg() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -143,10 +162,11 @@ public void testScriptScoresWithAgg() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testScriptScoresWithAggWithExplain() throws IOException { + public void testScriptScoresWithAggWithExplain() throws IOException, InterruptedException { createIndex(INDEX); index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject()); refresh(); + indexRandomForConcurrentSearch(INDEX); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "get score value", Collections.emptyMap()); @@ -172,7 +192,7 @@ public void testScriptScoresWithAggWithExplain() throws IOException { assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1L)); } - public void testMinScoreFunctionScoreBasic() throws IOException { + public void testMinScoreFunctionScoreBasic() throws IOException, InterruptedException { float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, OpenSearchTestCase::randomFloat); index( @@ -184,6 +204,7 @@ public void testMinScoreFunctionScoreBasic() throws IOException { .endObject() ); refresh(); + indexRandomForConcurrentSearch(INDEX); ensureYellow(); Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['random_score']", Collections.emptyMap()); @@ -268,6 +289,7 @@ public void testWithEmptyFunctions() throws IOException, ExecutionException, Int assertAcked(prepareCreate("test")); index("test", "testtype", "1", jsonBuilder().startObject().field("text", "test text").endObject()); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse termQuery = client().search(searchRequest().source(searchSource().explain(true).query(termQuery("text", "text")))) .get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index a1d3d9f22af04..593f844305743 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -32,11 +32,14 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; -import org.opensearch.action.ActionFuture; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.query.functionscore.DecayFunction; @@ -46,9 +49,9 @@ import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.SearchHits; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -62,11 +65,25 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class FunctionScorePluginIT extends OpenSearchIntegTestCase { +public class FunctionScorePluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public FunctionScorePluginIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); @@ -99,6 +116,7 @@ public void testPlugin() throws Exception { ).actionGet(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); DecayFunctionBuilder<?> gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d"); ActionFuture<SearchResponse> response = client().search( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index 0e00b66ffa383..5121d5023fd95 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; @@ -41,8 +43,8 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.Operator; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -52,9 +54,10 @@ import org.opensearch.search.rescore.QueryRescoreMode; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.Comparator; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -70,6 +73,7 @@ import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFourthHit; @@ -79,6 +83,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSecondHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasMatchedQueries; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -86,8 +91,21 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -public class QueryRescorerIT extends OpenSearchIntegTestCase { - public void testEnforceWindowSize() { +public class QueryRescorerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public QueryRescorerIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testEnforceWindowSize() throws InterruptedException { createIndex("test"); // this int iters = scaledRandomIntBetween(10, 20); @@ -95,6 +113,7 @@ public void testEnforceWindowSize() { client().prepareIndex("test").setId(Integer.toString(i)).setSource("f", Integer.toString(i)).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numShards = getNumShards("test").numPrimaries; for (int j = 0; j < iters; j++) { @@ -146,6 +165,7 @@ public void testRescorePhrase() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) .setRescorer( @@ -451,6 +471,7 @@ private static void assertEquivalent(String query, SearchResponse plain, SearchR public void testEquivalence() throws Exception { // no dummy docs since merges can change scores while we run queries. int numDocs = indexRandomNumbers("whitespace", -1, false); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(50, 100); for (int i = 0; i < iters; i++) { @@ -522,6 +543,7 @@ public void testExplain() throws Exception { .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree") .get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch() @@ -573,7 +595,7 @@ public void testExplain() throws Exception { SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR)) + .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR).queryName("hello-world")) .setRescorer(innerRescoreQuery, 5) .setExplain(true) .get(); @@ -581,7 +603,10 @@ public void testExplain() throws Exception { assertFirstHit(searchResponse, hasId("1")); assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - + final String[] matchedQueries = { "hello-world" }; + assertFirstHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertSecondHit(searchResponse, hasMatchedQueries(matchedQueries)); + assertThirdHit(searchResponse, hasMatchedQueries(matchedQueries)); for (int j = 0; j < 3; j++) { assertThat(searchResponse.getHits().getAt(j).getExplanation().getDescription(), equalTo(descriptionModes[innerMode])); } @@ -793,6 +818,7 @@ public void testFromSize() throws Exception { client().prepareIndex("test").setId("" + i).setSource("text", "hello world").get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder request = client().prepareSearch(); request.setQuery(QueryBuilders.termQuery("text", "hello")); @@ -809,6 +835,7 @@ public void testRescorePhaseWithInvalidSort() throws Exception { client().prepareIndex("test").setId("" + i).setSource("number", 0).get(); } refresh(); + indexRandomForConcurrentSearch("test"); Exception exc = expectThrows( Exception.class, diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 2176b93079d02..f1205ba0f1e93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -31,8 +31,11 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -43,7 +46,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.CoreMatchers; import java.util.Arrays; @@ -60,6 +63,7 @@ import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; import static org.opensearch.script.MockScriptPlugin.NAME; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.allOf; @@ -71,7 +75,19 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -public class RandomScoreFunctionIT extends OpenSearchIntegTestCase { +public class RandomScoreFunctionIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public RandomScoreFunctionIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -113,6 +129,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int outerIters = scaledRandomIntBetween(10, 20); for (int o = 0; o < outerIters; o++) { final int seed = randomInt(); @@ -185,6 +202,7 @@ public void testScoreAccessWithinScript() throws Exception { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); Map<String, Object> params = new HashMap<>(); params.put("factor", randomIntBetween(2, 4)); @@ -276,6 +294,7 @@ public void testSeedReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -295,6 +314,7 @@ public void testSeedAndNameReportedInExplain() throws Exception { index("test", "type", "1", jsonBuilder().startObject().endObject()); flush(); refresh(); + indexRandomForConcurrentSearch("test"); int seed = 12345678; @@ -346,6 +366,7 @@ public void testScoreRange() throws Exception { } flush(); refresh(); + indexRandomForConcurrentSearch("test"); int iters = scaledRandomIntBetween(10, 20); for (int i = 0; i < iters; ++i) { SearchResponse searchResponse = client().prepareSearch() @@ -368,6 +389,7 @@ public void testSeeds() throws Exception { index("test", "type", "" + i, jsonBuilder().startObject().endObject()); } flushAndRefresh(); + indexRandomForConcurrentSearch("test"); assertNoFailures( client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoBoundingBoxQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoBoundingBoxQueryIT.java index 265e3faa004fc..2f48ea0f64e35 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoBoundingBoxQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoBoundingBoxQueryIT.java @@ -36,8 +36,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoDistanceIT.java index cf3e362ae0cab..272f07e874fdf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/AbstractGeoDistanceIT.java @@ -40,8 +40,8 @@ import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.IdsQueryBuilder; @@ -66,11 +66,11 @@ import java.util.Map; import java.util.function.Function; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; /** base class for testing geo_distance queries on geo_ field types */ abstract class AbstractGeoDistanceIT extends OpenSearchIntegTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoPointsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoPointsIT.java index c20c7b297249f..f171555aed13a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoPointsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoPointsIT.java @@ -8,8 +8,8 @@ package org.opensearch.search.geo; -import org.junit.Before; import org.opensearch.core.xcontent.XContentBuilder; +import org.junit.Before; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoShapesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoShapesIT.java index 003e56e3e0d6e..26702d8313e4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoShapesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoDistanceQueryGeoShapesIT.java @@ -8,8 +8,8 @@ package org.opensearch.search.geo; -import org.junit.Before; import org.opensearch.core.xcontent.XContentBuilder; +import org.junit.Before; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index bb14ed1ea5578..701ff0a94baf2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; @@ -39,7 +41,6 @@ import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; - import org.opensearch.Version; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.bulk.BulkItemResponse; @@ -47,8 +48,6 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Priority; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.builders.CoordinatesBuilder; @@ -57,35 +56,39 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.PolygonBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.Streams; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; - import org.junit.BeforeClass; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Random; import java.util.zip.GZIPInputStream; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; + import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.geometry.utils.Geohash.addNeighbors; import static org.opensearch.index.query.QueryBuilders.geoBoundingBoxQuery; import static org.opensearch.index.query.QueryBuilders.geoDistanceQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; @@ -95,7 +98,19 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class GeoFilterIT extends OpenSearchIntegTestCase { +public class GeoFilterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public GeoFilterIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected boolean forbidPrivateIndexSettings() { @@ -211,17 +226,16 @@ public void testShapeRelations() throws Exception { assertTrue("Disjoint relation is not supported", disjointSupport); assertTrue("within relation is not supported", withinSupport); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("area") - .field("type", "geo_shape") - .field("tree", "geohash") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("area") + .field("type", "geo_shape") + .field("tree", "geohash") + .endObject() + .endObject() + .endObject() + .toString(); CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("shapes").setMapping(mapping); mappingRequest.get(); @@ -244,8 +258,9 @@ public void testShapeRelations() throws Exception { ); BytesReference data = BytesReference.bytes(jsonBuilder().startObject().field("area", polygon).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + client().prepareIndex("shapes").setId("1").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("shapes"); // Point in polygon SearchResponse result = client().prepareSearch() @@ -307,7 +322,7 @@ public void testShapeRelations() throws Exception { ); data = BytesReference.bytes(jsonBuilder().startObject().field("area", inverse).endObject()); - client().prepareIndex("shapes").setId("2").setSource(data, XContentType.JSON).get(); + client().prepareIndex("shapes").setId("2").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); // re-check point on polygon hole @@ -346,7 +361,7 @@ public void testShapeRelations() throws Exception { ); data = BytesReference.bytes(jsonBuilder().startObject().field("area", builder).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + client().prepareIndex("shapes").setId("1").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); // Create a polygon crossing longitude 180 with hole. @@ -359,7 +374,7 @@ public void testShapeRelations() throws Exception { ); data = BytesReference.bytes(jsonBuilder().startObject().field("area", builder).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); + client().prepareIndex("shapes").setId("1").setSource(data, MediaTypeRegistry.JSON).get(); client().admin().indices().prepareRefresh().get(); result = client().prepareSearch() @@ -407,6 +422,7 @@ public void testBulk() throws Exception { client().admin().indices().prepareCreate("countries").setSettings(settings).setMapping(xContentBuilder).get(); BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, xContentBuilder.contentType()).get(); + indexRandomForConcurrentSearch("countries"); for (BulkItemResponse item : bulk.getItems()) { assertFalse("unable to index data", item.isFailed()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java index d0b017732b270..2010a288427b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -39,21 +41,37 @@ import org.opensearch.common.settings.Settings; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.geoPolygonQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoPolygonIT extends OpenSearchIntegTestCase { +public class GeoPolygonIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public GeoPolygonIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected boolean forbidPrivateIndexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java index 89eb6038d8110..6dbffa019382d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java @@ -32,31 +32,47 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.common.Strings; import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class GeoShapeIntegrationIT extends OpenSearchIntegTestCase { +public class GeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public GeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Settings nodeSettings(int nodeOrdinal) { @@ -73,32 +89,30 @@ protected Settings nodeSettings(int nodeOrdinal) { */ public void testOrientationPersistence() throws Exception { String idxName = "orientation"; - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("orientation", "left") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "geo_shape") + .field("orientation", "left") + .endObject() + .endObject() + .endObject() + .toString(); // create index assertAcked(prepareCreate(idxName).setMapping(mapping)); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("orientation", "right") - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "geo_shape") + .field("orientation", "right") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate(idxName + "2").setMapping(mapping)); ensureGreen(idxName, idxName + "2"); @@ -140,44 +154,43 @@ public void testIgnoreMalformed() throws Exception { ensureGreen(); // test self crossing ccw poly not crossing dateline - String polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(-177.0) - .value(15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(-177.0) + .value(15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", polygonGeoJson)); SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -200,7 +213,7 @@ public void testMappingUpdate() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin().indices().preparePutMapping("test").setSource(update, XContentType.JSON).get() + () -> client().admin().indices().preparePutMapping("test").setSource(update, MediaTypeRegistry.JSON).get() ); assertThat(e.getMessage(), containsString("using [BKD] strategy cannot be merged with")); } @@ -231,7 +244,7 @@ public void testIndexShapeRouting() throws Exception { + " }\n" + "}"; - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON).setRouting("ABC")); + indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, MediaTypeRegistry.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC")) @@ -267,8 +280,8 @@ public void testIndexPolygonDateLine() throws Exception { String source = "{\n" + " \"shape\" : \"POLYGON((179 0, -179 0, -179 2, 179 2, 179 0))\"" + "}"; - indexRandom(true, client().prepareIndex("quad").setId("0").setSource(source, XContentType.JSON)); - indexRandom(true, client().prepareIndex("vector").setId("0").setSource(source, XContentType.JSON)); + indexRandom(true, client().prepareIndex("quad").setId("0").setSource(source, MediaTypeRegistry.JSON)); + indexRandom(true, client().prepareIndex("vector").setId("0").setSource(source, MediaTypeRegistry.JSON)); try { ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java index 11f2132bb29de..e9115cf7dfbce 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -32,67 +32,81 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.common.Strings; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.geometry.Circle; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class LegacyGeoShapeIntegrationIT extends OpenSearchIntegTestCase { +public class LegacyGeoShapeIntegrationIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public LegacyGeoShapeIntegrationIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } /** * Test that orientation parameter correctly persists across cluster restart */ public void testOrientationPersistence() throws Exception { String idxName = "orientation"; - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("tree", "quadtree") - .field("orientation", "left") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .field("orientation", "left") + .endObject() + .endObject() + .endObject() + .toString(); // create index assertAcked(prepareCreate(idxName).setMapping(mapping)); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("tree", "quadtree") - .field("orientation", "right") - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "geo_shape") + .field("tree", "quadtree") + .field("orientation", "right") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate(idxName + "2").setMapping(mapping)); ensureGreen(idxName, idxName + "2"); @@ -136,44 +150,43 @@ public void testIgnoreMalformed() throws Exception { ensureGreen(); // test self crossing ccw poly not crossing dateline - String polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(-177.0) - .value(15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(-177.0) + .value(15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", polygonGeoJson)); SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -207,7 +220,7 @@ public void testIndexShapeRouting() throws Exception { + " }\n" + "}"; - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON).setRouting("ABC")); + indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, MediaTypeRegistry.JSON).setRouting("ABC")); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC")) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index 04d193aaea71a..36fc5de0a5cf7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -32,26 +32,28 @@ package org.opensearch.search.morelikethis; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.RoutingMissingException; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder.Item; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -64,6 +66,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -75,7 +78,19 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -public class MoreLikeThisIT extends OpenSearchIntegTestCase { +public class MoreLikeThisIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MoreLikeThisIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -104,6 +119,7 @@ public void testSimpleMoreLikeThis() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -134,6 +150,7 @@ public void testSimpleMoreLikeThisWithTypes() throws Exception { client().index(indexRequest("test").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis"); SearchResponse response = client().prepareSearch() @@ -169,6 +186,7 @@ public void testMoreLikeThisForZeroTokensInOneOfTheAnalyzedFields() throws Excep ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery( @@ -235,6 +253,7 @@ public void testMoreLikeThisWithAliases() throws Exception { client().index(indexRequest("test").id("4").source(jsonBuilder().startObject().field("text", "opensearch release").endObject())) .actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running moreLikeThis on index"); SearchResponse response = client().prepareSearch() @@ -270,7 +289,7 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { String indexName = "foo"; String aliasName = "foo_name"; - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject().toString(); client().admin().indices().prepareCreate(indexName).setMapping(mapping).get(); client().admin().indices().prepareAliases().addAlias(indexName, aliasName).get(); @@ -283,6 +302,7 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { client().index(indexRequest(indexName).id("3").source(jsonBuilder().startObject().field("text", "opensearch index").endObject())) .actionGet(); refresh(indexName); + indexRandomForConcurrentSearch(indexName); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item(aliasName, "1") }).minTermFreq(1).minDocFreq(1)) @@ -292,7 +312,7 @@ public void testMoreLikeThisWithAliasesInLikeDocuments() throws Exception { } public void testMoreLikeThisIssue2197() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject().toString(); client().admin().indices().prepareCreate("foo").setMapping(mapping).get(); client().prepareIndex("foo") .setId("1") @@ -300,6 +320,7 @@ public void testMoreLikeThisIssue2197() throws Exception { .get(); client().admin().indices().prepareRefresh("foo").get(); assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1") })) @@ -313,7 +334,7 @@ public void testMoreLikeThisIssue2197() throws Exception { // Issue #2489 public void testMoreLikeWithCustomRouting() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject().toString(); client().admin().indices().prepareCreate("foo").setMapping(mapping).get(); ensureGreen(); @@ -323,6 +344,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { .setRouting("2") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("2") })) @@ -333,7 +355,7 @@ public void testMoreLikeWithCustomRouting() throws Exception { // Issue #3039 public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").endObject().endObject().toString(); assertAcked( prepareCreate("foo", 2, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0)).setMapping( mapping @@ -347,6 +369,7 @@ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception { .setRouting("4000") .get(); client().admin().indices().prepareRefresh("foo").get(); + indexRandomForConcurrentSearch("foo"); SearchResponse response = client().prepareSearch() .setQuery(new MoreLikeThisQueryBuilder(null, new Item[] { new Item("foo", "1").routing("4000") })) .get(); @@ -380,6 +403,7 @@ public void testNumericField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Implicit list of fields -> ignore numeric fields SearchResponse searchResponse = client().prepareSearch() @@ -487,6 +511,7 @@ public void testMoreLikeThisWithFieldAlias() throws Exception { index("test", "_doc", "1", "text", "lucene"); index("test", "_doc", "2", "text", "lucene release"); refresh(); + indexRandomForConcurrentSearch("test"); Item item = new Item("test", "1"); QueryBuilder query = QueryBuilders.moreLikeThisQuery(new String[] { "alias" }, null, new Item[] { item }) @@ -527,6 +552,7 @@ public void testSimpleMoreLikeInclude() throws Exception { .source(jsonBuilder().startObject().field("text", "Lucene has been ported to other programming languages").endObject()) ).actionGet(); client().admin().indices().refresh(refreshRequest()).actionGet(); + indexRandomForConcurrentSearch("test"); logger.info("Running More Like This with include true"); SearchResponse response = client().prepareSearch() @@ -811,11 +837,12 @@ public void testSelectFields() throws IOException, ExecutionException, Interrupt assertHitCount(response, 1); } - public void testWithRouting() throws IOException { + public void testWithRouting() throws IOException, InterruptedException { client().prepareIndex("index").setId("1").setRouting("3").setSource("text", "this is a document").get(); client().prepareIndex("index").setId("2").setRouting("1").setSource("text", "this is another document").get(); client().prepareIndex("index").setId("3").setRouting("4").setSource("text", "this is yet another document").get(); refresh("index"); + indexRandomForConcurrentSearch("index"); Item item = new Item("index", "2").routing("1"); MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = new MoreLikeThisQueryBuilder( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index 8226663abf49e..9f49b7a27cda4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -32,26 +32,46 @@ package org.opensearch.search.msearch; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; -public class MultiSearchIT extends OpenSearchIntegTestCase { +public class MultiSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MultiSearchIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } - public void testSimpleMultiSearch() { + public void testSimpleMultiSearch() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); client().prepareIndex("test").setId("2").setSource("field", "yyy").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiSearchResponse response = client().prepareMultiSearch() .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx"))) .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) @@ -69,13 +89,14 @@ public void testSimpleMultiSearch() { assertFirstHit(response.getResponses()[1].getResponse(), hasId("2")); } - public void testSimpleMultiSearchMoreRequests() { + public void testSimpleMultiSearchMoreRequests() throws InterruptedException { createIndex("test"); int numDocs = randomIntBetween(0, 16); for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + client().prepareIndex("test").setId(Integer.toString(i)).setSource("{}", MediaTypeRegistry.JSON).get(); } refresh(); + indexRandomForConcurrentSearch("test"); int numSearchRequests = randomIntBetween(1, 64); MultiSearchRequest request = new MultiSearchRequest(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java new file mode 100644 index 0000000000000..a6554271a0bc5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.nested; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.join.ScoreMode; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +/** + * Creating a separate class with no parameterization to create and index documents in a single + * test run and compare search responses across concurrent and non-concurrent search. For more details, + * refer: https://github.com/opensearch-project/OpenSearch/issues/11413 + */ +public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { + + /* + * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested + * here as call to indexRandomForMultipleSlices is made and compared with explain output for + * non-concurrent search use-case. Separate test class is created to test explain for 1 slice + * case in concurrent search, refer {@link SimpleExplainIT#testExplainWithSingleDoc} + * For more details, refer: https://github.com/opensearch-project/OpenSearch/issues/11413 + * */ + public void testExplainMultipleDocs() throws Exception { + assertAcked( + prepareCreate("test").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + ) + ); + + ensureGreen(); + + client().prepareIndex("test") + .setId("1") + .setSource( + jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1") + .endObject() + .startObject() + .field("n_field1", "n_value1") + .endObject() + .endArray() + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + + indexRandomForMultipleSlices("test"); + + // Turn off the concurrent search setting to test search with non-concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build()) + .get(); + + SearchResponse nonConSearchResp = client().prepareSearch("test") + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true) + .get(); + assertNoFailures(nonConSearchResp); + assertThat(nonConSearchResp.getHits().getTotalHits().value, equalTo(1L)); + Explanation nonConSearchExplain = nonConSearchResp.getHits().getHits()[0].getExplanation(); + assertThat(nonConSearchExplain.getValue(), equalTo(nonConSearchResp.getHits().getHits()[0].getScore())); + + // Turn on the concurrent search setting to test search with concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build()) + .get(); + + SearchResponse conSearchResp = client().prepareSearch("test") + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true) + .get(); + assertNoFailures(conSearchResp); + assertThat(conSearchResp.getHits().getTotalHits().value, equalTo(1L)); + Explanation conSearchExplain = conSearchResp.getHits().getHits()[0].getExplanation(); + assertThat(conSearchExplain.getValue(), equalTo(conSearchResp.getHits().getHits()[0].getScore())); + + // assert that the explanation for concurrent search should be equal to the non-concurrent search's explanation + assertEquals(nonConSearchExplain, conSearchExplain); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).build()) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index bd67f5f83375e..19e38da1aed05 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.nested; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.DocWriteResponse; @@ -45,15 +47,18 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.sort.NestedSortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortMode; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -61,17 +66,30 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; - import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class SimpleNestedIT extends OpenSearchIntegTestCase { +public class SimpleNestedIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleNestedIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); ensureGreen(); @@ -102,6 +120,7 @@ public void testSimpleNested() throws Exception { .get(); waitForRelocation(ClusterHealthStatus.GREEN); + indexRandomForConcurrentSearch("test"); GetResponse getResponse = client().prepareGet("test", "1").get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getSourceAsBytes(), notNullValue()); @@ -269,6 +288,7 @@ public void testMultiNested() throws Exception { refresh(); // check the numDocs assertDocumentCount("test", 7); + indexRandomForConcurrentSearch("test"); // do some multi nested queries SearchResponse searchResponse = client().prepareSearch("test") @@ -429,7 +449,13 @@ public void testDeleteNestedDocsWithAlias() throws Exception { assertDocumentCount("test", 6); } - public void testExplain() throws Exception { + /* + * Tests the explain output for single doc. Concurrent search with only slice 1 is tested + * here as call to indexRandomForMultipleSlices has implications on the range of child docs + * in the explain output. Separate test class is created to test explain for multiple slices + * case in concurrent search, refer {@link SimpleNestedExplainIT} + * */ + public void testExplainWithSingleDoc() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -543,6 +569,7 @@ public void testSimpleNestedSorting() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -651,6 +678,7 @@ public void testSimpleNestedSortingWithNestedFilterMissing() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test") .setQuery(QueryBuilders.matchAllQuery()) @@ -783,7 +811,7 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { + " }\n" + " ]\n" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); @@ -835,10 +863,11 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception { + " }\n" + " ]\n" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // access id = 1, read, max value, asc, should use grault and quxx SearchResponse searchResponse = client().prepareSearch() @@ -987,7 +1016,7 @@ public void testLeakingSortValues() throws Exception { + " }\n" + " ]\n" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); @@ -1006,11 +1035,12 @@ public void testLeakingSortValues() throws Exception { + " } \n" + " ]\n" + "}", - XContentType.JSON + MediaTypeRegistry.JSON ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(termQuery("_id", 2)) @@ -1191,6 +1221,7 @@ public void testSortNestedWithNestedFilter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Without nested filter SearchResponse searchResponse = client().prepareSearch() @@ -1571,6 +1602,7 @@ public void testNestedSortingWithNestedFilterAsFilter() throws Exception { .get(); assertTrue(indexResponse2.getShardInfo().getSuccessful() > 0); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .addSort(SortBuilders.fieldSort("users.first").setNestedPath("users").order(SortOrder.ASC)) @@ -1603,6 +1635,7 @@ public void testCheckFixedBitSetCache() throws Exception { client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); ensureSearchable("test"); + indexRandomForConcurrentSearch("test"); // No nested mapping yet, there shouldn't be anything in the fixed bit set cache ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 6fea62a416818..8ae652082f653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -8,13 +8,10 @@ package org.opensearch.search.pit; -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.ActionFuture; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; -import org.opensearch.action.ActionListener; -import org.opensearch.action.LatchedActionListener; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; @@ -22,13 +19,21 @@ import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.search.SearchContextMissingException; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.util.ArrayList; import java.util.List; @@ -38,10 +43,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; -import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; /** * Multi node integration tests for delete PIT use cases @@ -88,8 +93,8 @@ public void testDeletePit() throws Exception { assertTrue(deletePitInfo.isSuccessful()); } validatePitStats("index", 0, 10); - /** - * Checking deleting the same PIT id again results in succeeded + /* + Checking deleting the same PIT id again results in succeeded */ deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); @@ -108,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 0); - /** - * Delete Pit #1 + /* + Delete Pit #1 */ DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture<DeletePitResponse> deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -123,8 +128,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitResponse = execute.get(); pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 5); - /** - * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + /* + Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ deletePITRequest = new DeletePitRequest(pitIds); deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -160,9 +165,9 @@ public void testDeleteAllPits() throws Exception { validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture<DeletePitResponse> execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -202,9 +207,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); ensureGreen(); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture<DeletePitResponse> execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -237,9 +242,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); ensureGreen(); - /** - * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and - * once the node restarts, all active contexts are cleared in the node ) + /* + When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + once the node restarts, all active contexts are cleared in the node ) */ ActionFuture<DeletePitResponse> execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -263,18 +268,23 @@ public void testDeleteWhileSearch() throws Exception { try { latch.await(); for (int j = 0; j < 30; j++) { - client().prepareSearch() + SearchResponse searchResponse = client().prepareSearch() .setSize(2) .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) .execute() .get(); + if (searchResponse.getFailedShards() != 0) { + verifySearchContextMissingException(searchResponse.getShardFailures()); + } } } catch (Exception e) { - /** - * assert for exception once delete pit goes through. throw error in case of any exeption before that. + /* + assert for exception once delete pit goes through. throw error in case of any exeption before that. */ if (deleted.get() == true) { - if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + Throwable t = ExceptionsHelper.unwrapCause(e.getCause()); + assertTrue(e.toString(), t instanceof SearchPhaseExecutionException); + verifySearchContextMissingException(((SearchPhaseExecutionException) t).shardFailures()); return; } throw new AssertionError(e); @@ -283,9 +293,9 @@ public void testDeleteWhileSearch() throws Exception { threads[i].setName("opensearch[node_s_0][search]"); threads[i].start(); } + deleted.set(true); ActionFuture<DeletePitResponse> execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - deleted.set(true); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); @@ -296,6 +306,17 @@ public void testDeleteWhileSearch() throws Exception { } } + private void verifySearchContextMissingException(ShardSearchFailure[] failures) { + for (ShardSearchFailure failure : failures) { + Throwable cause = ExceptionsHelper.unwrapCause(failure.getCause()); + if (failure.toString().contains("reader_context is already closed can't increment refCount current count")) { + // this is fine, expected search error when context is already deleted + } else { + assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + } + } + } + public void testtConcurrentDeletes() throws InterruptedException, ExecutionException { CreatePitResponse pitResponse = createPitOnIndex("index"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index 117165bf1e4d6..8bea5ef97fbba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -8,14 +8,14 @@ package org.opensearch.search.pit; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; @@ -29,18 +29,23 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.action.admin.indices.flush.FlushRequest; -import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -51,16 +56,28 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.containsString; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; /** * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeIT extends OpenSearchIntegTestCase { +public class PitMultiNodeIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public PitMultiNodeIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Before public void setupIndex() throws ExecutionException, InterruptedException { @@ -77,6 +94,7 @@ public void clearIndex() { public void testPit() throws Exception { CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); request.setIndices(new String[] { "index" }); + indexRandomForConcurrentSearch("index"); ActionFuture<CreatePitResponse> execute = client().execute(CreatePitAction.INSTANCE, request); CreatePitResponse pitResponse = execute.get(); SearchResponse searchResponse = client().prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 55a2a1fdde2b5..bc9eeb528b031 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.preference; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.search.SearchRequestBuilder; @@ -42,19 +44,22 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; -import org.opensearch.node.Node; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.node.Node; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; -import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -64,7 +69,19 @@ import static org.hamcrest.Matchers.not; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchPreferenceIT extends OpenSearchIntegTestCase { +public class SearchPreferenceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchPreferenceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override public Settings nodeSettings(int nodeOrdinal) { @@ -75,7 +92,7 @@ public Settings nodeSettings(int nodeOrdinal) { } // see #2896 - public void testStopOneNodePreferenceWithRedState() throws IOException { + public void testStopOneNodePreferenceWithRedState() throws Exception { assertAcked( prepareCreate("test").setSettings( Settings.builder().put("index.number_of_shards", cluster().numDataNodes() + 2).put("index.number_of_replicas", 0) @@ -86,6 +103,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get(); } refresh(); + indexRandomForConcurrentSearch("test"); internalCluster().stopRandomDataNode(); client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { @@ -114,7 +132,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0)); } - public void testNoPreferenceRandom() { + public void testNoPreferenceRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -125,6 +143,7 @@ public void testNoPreferenceRandom() { client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -135,12 +154,13 @@ public void testNoPreferenceRandom() { assertThat(firstNodeId, not(equalTo(secondNodeId))); } - public void testSimplePreference() { - client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", XContentType.JSON).get(); + public void testSimplePreference() throws InterruptedException { + client().admin().indices().prepareCreate("test").setSettings("{\"number_of_replicas\": 1}", MediaTypeRegistry.JSON).get(); ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -177,7 +197,7 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() { } } - public void testNodesOnlyRandom() { + public void testNodesOnlyRandom() throws Exception { assertAcked( prepareCreate("test").setSettings( // this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data @@ -187,6 +207,7 @@ public void testNodesOnlyRandom() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final Client client = internalCluster().smartClient(); // multiple wildchar to cover multi-param usecase @@ -238,7 +259,7 @@ private void assertSearchOnRandomNodes(SearchRequestBuilder request) { assertThat(hitNodes.size(), greaterThan(1)); } - public void testCustomPreferenceUnaffectedByOtherShardMovements() { + public void testCustomPreferenceUnaffectedByOtherShardMovements() throws InterruptedException { /* * Custom preferences can be used to encourage searches to go to a consistent set of shard copies, meaning that other copies' data @@ -257,6 +278,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { ensureGreen(); client().prepareIndex("test").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); final String customPreference = randomAlphaOfLength(10); @@ -276,6 +298,7 @@ public void testCustomPreferenceUnaffectedByOtherShardMovements() { prepareCreate("test2").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, replicasInNewIndex)) ); ensureGreen(); + indexRandomForConcurrentSearch("test2"); assertSearchesSpecificNode("test", customPreference, nodeId); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/ProfilerSingleNodeNetworkTest.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/ProfilerSingleNodeNetworkTest.java index 12d68c9c38ca1..de7677e3b3708 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/ProfilerSingleNodeNetworkTest.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/ProfilerSingleNodeNetworkTest.java @@ -18,9 +18,9 @@ import java.util.List; import java.util.Map; +import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; public class ProfilerSingleNodeNetworkTest extends OpenSearchSingleNodeTestCase { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java index 0f08c537d74d8..2f608a0cbe06f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java @@ -32,9 +32,11 @@ package org.opensearch.search.profile.aggregation; -import org.hamcrest.core.IsNull; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; @@ -44,18 +46,22 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; +import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.QueryProfileShardResult; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.hamcrest.core.IsNull; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.sameInstance; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.diversifiedSampler; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -65,13 +71,19 @@ import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationProfilerIT extends OpenSearchIntegTestCase { +public class AggregationProfilerIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + private static final String BUILD_LEAF_COLLECTOR = AggregationTimingType.BUILD_LEAF_COLLECTOR.toString(); private static final String COLLECT = AggregationTimingType.COLLECT.toString(); private static final String POST_COLLECTION = AggregationTimingType.POST_COLLECTION.toString(); @@ -90,7 +102,52 @@ public class AggregationProfilerIT extends OpenSearchIntegTestCase { COLLECT + "_count", POST_COLLECTION + "_count", BUILD_AGGREGATION + "_count", - REDUCE + "_count" + REDUCE + "_count", + INITIALIZE + "_start_time", + BUILD_LEAF_COLLECTOR + "_start_time", + COLLECT + "_start_time", + POST_COLLECTION + "_start_time", + BUILD_AGGREGATION + "_start_time", + REDUCE + "_start_time" + ); + + private static final Set<String> CONCURRENT_SEARCH_BREAKDOWN_KEYS = Set.of( + INITIALIZE, + BUILD_LEAF_COLLECTOR, + COLLECT, + POST_COLLECTION, + BUILD_AGGREGATION, + REDUCE, + INITIALIZE + "_count", + BUILD_LEAF_COLLECTOR + "_count", + COLLECT + "_count", + POST_COLLECTION + "_count", + BUILD_AGGREGATION + "_count", + REDUCE + "_count", + "max_" + INITIALIZE, + "max_" + BUILD_LEAF_COLLECTOR, + "max_" + COLLECT, + "max_" + POST_COLLECTION, + "max_" + BUILD_AGGREGATION, + "max_" + REDUCE, + "min_" + INITIALIZE, + "min_" + BUILD_LEAF_COLLECTOR, + "min_" + COLLECT, + "min_" + POST_COLLECTION, + "min_" + BUILD_AGGREGATION, + "min_" + REDUCE, + "avg_" + INITIALIZE, + "avg_" + BUILD_LEAF_COLLECTOR, + "avg_" + COLLECT, + "avg_" + POST_COLLECTION, + "avg_" + BUILD_AGGREGATION, + "avg_" + REDUCE, + "max_" + BUILD_LEAF_COLLECTOR + "_count", + "max_" + COLLECT + "_count", + "min_" + BUILD_LEAF_COLLECTOR + "_count", + "min_" + COLLECT + "_count", + "avg_" + BUILD_LEAF_COLLECTOR + "_count", + "avg_" + COLLECT + "_count" ); private static final String TOTAL_BUCKETS = "total_buckets"; @@ -105,6 +162,20 @@ public class AggregationProfilerIT extends OpenSearchIntegTestCase { private static final String TAG_FIELD = "tag"; private static final String STRING_FIELD = "string_field"; private final int numDocs = 5; + private static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; + private static final String REASON_AGGREGATION = "aggregation"; + + public AggregationProfilerIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected int numberOfShards() { @@ -169,7 +240,18 @@ public void testSimpleProfile() { assertThat(histoAggResult.getTime(), greaterThan(0L)); Map<String, Long> breakdown = histoAggResult.getTimeBreakdown(); assertThat(breakdown, notNullValue()); - assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (histoAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(breakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); assertThat(breakdown.get(COLLECT), greaterThan(0L)); assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); @@ -212,7 +294,18 @@ public void testMultiLevelProfile() { assertThat(histoAggResult.getTime(), greaterThan(0L)); Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown(); assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (histoAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(histoBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -230,7 +323,12 @@ public void testMultiLevelProfile() { assertThat(termsAggResult.getTime(), greaterThan(0L)); Map<String, Long> termsBreakdown = termsAggResult.getTimeBreakdown(); assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (termsAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(termsBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -245,7 +343,12 @@ public void testMultiLevelProfile() { assertThat(avgAggResult.getTime(), greaterThan(0L)); Map<String, Long> avgBreakdown = termsAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (avgAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(avgBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -298,7 +401,18 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(histoAggResult.getTime(), greaterThan(0L)); Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown(); assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (histoAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(histoBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -316,7 +430,12 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(termsAggResult.getTime(), greaterThan(0L)); Map<String, Long> termsBreakdown = termsAggResult.getTimeBreakdown(); assertThat(termsBreakdown, notNullValue()); - assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (termsAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(termsBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(termsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(termsBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(termsBreakdown.get(COLLECT), greaterThan(0L)); assertThat(termsBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -331,7 +450,12 @@ public void testMultiLevelProfileBreadthFirst() { assertThat(avgAggResult.getTime(), greaterThan(0L)); Map<String, Long> avgBreakdown = avgAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (avgAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(avgBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_AGGREGATION), greaterThan(0L)); @@ -369,7 +493,18 @@ public void testDiversifiedAggProfile() { assertThat(diversifyAggResult.getTime(), greaterThan(0L)); Map<String, Long> diversifyBreakdown = diversifyAggResult.getTimeBreakdown(); assertThat(diversifyBreakdown, notNullValue()); - assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (diversifyAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(diversifyBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(diversifyBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); @@ -386,7 +521,12 @@ public void testDiversifiedAggProfile() { assertThat(maxAggResult.getTime(), greaterThan(0L)); Map<String, Long> maxBreakdown = maxAggResult.getTimeBreakdown(); assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (maxAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(maxBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(diversifyBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(diversifyBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(diversifyBreakdown.get(COLLECT), greaterThan(0L)); @@ -439,7 +579,18 @@ public void testComplexProfile() { assertThat(histoAggResult.getTime(), greaterThan(0L)); Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown(); assertThat(histoBreakdown, notNullValue()); - assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (histoAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(histoBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(histoBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } assertThat(histoBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(histoBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(histoBreakdown.get(COLLECT), greaterThan(0L)); @@ -462,7 +613,12 @@ public void testComplexProfile() { assertThat(tagsAggResult.getTime(), greaterThan(0L)); Map<String, Long> tagsBreakdown = tagsAggResult.getTimeBreakdown(); assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (tagsAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(tagsBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); @@ -482,7 +638,12 @@ public void testComplexProfile() { assertThat(avgAggResult.getTime(), greaterThan(0L)); Map<String, Long> avgBreakdown = avgAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (avgAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(avgBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); @@ -498,7 +659,12 @@ public void testComplexProfile() { assertThat(maxAggResult.getTime(), greaterThan(0L)); Map<String, Long> maxBreakdown = maxAggResult.getTimeBreakdown(); assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (maxAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(maxBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); @@ -514,7 +680,12 @@ public void testComplexProfile() { assertThat(stringsAggResult.getTime(), greaterThan(0L)); Map<String, Long> stringsBreakdown = stringsAggResult.getTimeBreakdown(); assertThat(stringsBreakdown, notNullValue()); - assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (stringsAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(stringsBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(stringsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(stringsBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(stringsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(stringsBreakdown.get(COLLECT), greaterThan(0L)); @@ -534,7 +705,12 @@ public void testComplexProfile() { assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = avgAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (avgAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(avgBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); @@ -550,7 +726,12 @@ public void testComplexProfile() { assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = maxAggResult.getTimeBreakdown(); assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (maxAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(maxBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); @@ -567,7 +748,12 @@ public void testComplexProfile() { assertThat(tagsAggResult.getTime(), greaterThan(0L)); tagsBreakdown = tagsAggResult.getTimeBreakdown(); assertThat(tagsBreakdown, notNullValue()); - assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (tagsAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(tagsBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(tagsBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(tagsBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(tagsBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(tagsBreakdown.get(COLLECT), greaterThan(0L)); @@ -587,7 +773,12 @@ public void testComplexProfile() { assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = avgAggResult.getTimeBreakdown(); assertThat(avgBreakdown, notNullValue()); - assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (avgAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(avgBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(avgBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(avgBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(avgBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(avgBreakdown.get(COLLECT), greaterThan(0L)); @@ -603,7 +794,12 @@ public void testComplexProfile() { assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = maxAggResult.getTimeBreakdown(); assertThat(maxBreakdown, notNullValue()); - assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + if (maxAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(maxBreakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + } else { + assertThat(maxBreakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + } assertThat(maxBreakdown.get(INITIALIZE), greaterThan(0L)); assertThat(maxBreakdown.get(BUILD_LEAF_COLLECTOR), greaterThan(0L)); assertThat(maxBreakdown.get(COLLECT), greaterThan(0L)); @@ -652,7 +848,6 @@ public void testGlobalAggWithStatsSubAggregatorProfile() { .get(); assertSearchResponse(response); - Global global = response.getAggregations().get("global"); assertThat(global, IsNull.notNullValue()); assertThat(global.getName(), equalTo("global")); @@ -700,11 +895,109 @@ public void testGlobalAggWithStatsSubAggregatorProfile() { assertThat(globalAggResult.getTime(), greaterThan(0L)); Map<String, Long> breakdown = globalAggResult.getTimeBreakdown(); assertThat(breakdown, notNullValue()); - assertEquals(BREAKDOWN_KEYS, breakdown.keySet()); + if (globalAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertEquals(CONCURRENT_SEARCH_BREAKDOWN_KEYS, breakdown.keySet()); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 0); + } + } else { + assertEquals(BREAKDOWN_KEYS, breakdown.keySet()); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 0); + } + } assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); assertThat(breakdown.get(COLLECT), greaterThan(0L)); assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); assertEquals(0, breakdown.get(REDUCE).intValue()); } } + + public void testMultipleAggregationsProfile() { + SearchResponse response = client().prepareSearch("idx") + .setProfile(true) + .addAggregation(histogram("histo_1").field(NUMBER_FIELD).interval(1L)) + .addAggregation(histogram("histo_2").field(NUMBER_FIELD).interval(1L)) + .get(); + assertSearchResponse(response); + Map<String, ProfileShardResult> profileResults = response.getProfileResults(); + assertThat(profileResults, notNullValue()); + assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); + for (ProfileShardResult profileShardResult : profileResults.values()) { + assertThat(profileShardResult, notNullValue()); + List<QueryProfileShardResult> queryProfilerResults = profileShardResult.getQueryProfileResults(); + assertThat(queryProfilerResults, notNullValue()); + for (QueryProfileShardResult queryProfilerResult : queryProfilerResults) { + CollectorResult collectorResult = queryProfilerResult.getCollectorResult(); + String reason = collectorResult.getReason(); + assertThat(reason, equalTo("search_multi")); + List<CollectorResult> children = collectorResult.getProfiledChildren(); + assertThat(children.size(), equalTo(2)); + assertThat(children.get(1).getName(), containsString("[histo_1, histo_2]")); + } + AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); + assertThat(aggProfileResults, notNullValue()); + List<ProfileResult> aggProfileResultsList = aggProfileResults.getProfileResults(); + assertThat(aggProfileResultsList, notNullValue()); + assertThat(aggProfileResultsList.size(), equalTo(2)); + for (ProfileResult histoAggResult : aggProfileResultsList) { + assertThat(histoAggResult, notNullValue()); + assertThat(histoAggResult.getQueryName(), equalTo("NumericHistogramAggregator")); + assertThat(histoAggResult.getLuceneDescription(), containsString("histo_")); + assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); + assertThat(histoAggResult.getTime(), greaterThan(0L)); + Map<String, Long> breakdown = histoAggResult.getTimeBreakdown(); + assertThat(breakdown, notNullValue()); + if (histoAggResult.getMaxSliceTime() != null) { + // concurrent segment search enabled + assertThat(breakdown.keySet(), equalTo(CONCURRENT_SEARCH_BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResultWithConcurrentSearchEnabled(collectorResult, 2); + } + } else { + assertThat(breakdown.keySet(), equalTo(BREAKDOWN_KEYS)); + for (QueryProfileShardResult collectorResult : profileShardResult.getQueryProfileResults()) { + assertCollectorResult(collectorResult, 2); + } + } + assertThat(breakdown.get(INITIALIZE), greaterThan(0L)); + assertThat(breakdown.get(COLLECT), greaterThan(0L)); + assertThat(breakdown.get(BUILD_AGGREGATION).longValue(), greaterThan(0L)); + assertThat(breakdown.get(REDUCE), equalTo(0L)); + Map<String, Object> debug = histoAggResult.getDebugInfo(); + assertThat(debug, notNullValue()); + assertThat(debug.keySet(), equalTo(Set.of(TOTAL_BUCKETS))); + assertThat(((Number) debug.get(TOTAL_BUCKETS)).longValue(), greaterThan(0L)); + } + } + } + + private void assertCollectorResult(QueryProfileShardResult collectorResult, int expectedChildrenCount) { + long nodeTime = collectorResult.getCollectorResult().getTime(); + assertThat(collectorResult.getCollectorResult().getMaxSliceTime(), equalTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getMinSliceTime(), equalTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getAvgSliceTime(), equalTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getReduceTime(), equalTo(0L)); + assertThat(collectorResult.getCollectorResult().getSliceCount(), equalTo(1)); + assertThat(collectorResult.getCollectorResult().getProfiledChildren().size(), equalTo(expectedChildrenCount)); + if (expectedChildrenCount == 2) { + assertThat(collectorResult.getCollectorResult().getProfiledChildren().get(0).getReason(), equalTo(REASON_SEARCH_TOP_HITS)); + assertThat(collectorResult.getCollectorResult().getProfiledChildren().get(1).getReason(), equalTo(REASON_AGGREGATION)); + } + } + + private void assertCollectorResultWithConcurrentSearchEnabled(QueryProfileShardResult collectorResult, int expectedChildrenCount) { + long nodeTime = collectorResult.getCollectorResult().getTime(); + assertThat(collectorResult.getCollectorResult().getMaxSliceTime(), lessThanOrEqualTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getMinSliceTime(), lessThanOrEqualTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getAvgSliceTime(), lessThanOrEqualTo(nodeTime)); + assertThat(collectorResult.getCollectorResult().getReduceTime(), greaterThan(0L)); + assertThat(collectorResult.getCollectorResult().getSliceCount(), greaterThanOrEqualTo(1)); + assertThat(collectorResult.getCollectorResult().getProfiledChildren().size(), equalTo(expectedChildrenCount)); + if (expectedChildrenCount == 2) { + assertThat(collectorResult.getCollectorResult().getProfiledChildren().get(0).getReason(), equalTo(REASON_SEARCH_TOP_HITS)); + assertThat(collectorResult.getCollectorResult().getProfiledChildren().get(1).getReason(), equalTo(REASON_AGGREGATION)); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 8601e2b6d6be9..412a94aaf1b3e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,12 +32,14 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.MultiSearchResponse; -import org.opensearch.action.search.SearchType; import org.opensearch.action.search.SearchRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilder; @@ -46,23 +48,44 @@ import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; -import java.util.List; import java.util.Arrays; -import java.util.Map; +import java.util.Collection; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; -import static org.hamcrest.Matchers.is; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.emptyOrNullString; -import static org.hamcrest.Matchers.equalTo; -import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +252,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +295,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List<ProfileResult> children = result.getProfiledChildren(); @@ -282,12 +307,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +357,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +403,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +444,90 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List<String> terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry<String, ProfileShardResult> shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List<ProfileResult> results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map<String, Long> breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +568,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +608,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +662,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +695,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map<String, Long> breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java index e1724d496fa91..b95542382e5fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java @@ -32,18 +32,22 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.explain.ExplainResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -52,11 +56,24 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class ExistsIT extends OpenSearchIntegTestCase { +public class ExistsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ExistsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { @@ -140,14 +157,7 @@ public void testExists() throws Exception { assertSearchResponse(resp); try { assertEquals( - String.format( - Locale.ROOT, - "exists(%s, %d) mapping: %s response: %s", - fieldName, - count, - Strings.toString(mapping), - resp - ), + String.format(Locale.ROOT, "exists(%s, %d) mapping: %s response: %s", fieldName, count, mapping.toString(), resp), count, resp.getHits().getTotalHits().value ); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java index 2e8b45ef7b2d6..392f8b036b7a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -39,8 +40,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.util.set.Sets; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.Operator; @@ -51,13 +52,13 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.MockKeywordPlugin; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -74,6 +75,7 @@ import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.multiMatchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -89,7 +91,19 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; -public class MultiMatchQueryIT extends OpenSearchIntegTestCase { +public class MultiMatchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MultiMatchQueryIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 36de3f7ebaa60..c43a9c23661ea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -32,30 +32,35 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.Operator; import org.opensearch.index.query.QueryStringQueryBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -64,10 +69,22 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class QueryStringIT extends OpenSearchIntegTestCase { +public class QueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; + public QueryStringIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); @@ -76,7 +93,7 @@ public static void createRandomClusterSetting() { @Before public void setup() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); } @@ -161,8 +178,9 @@ public void testWithLotsOfTypes() throws Exception { public void testDocWithAllTypes() throws Exception { List<IndexRequestBuilder> reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -202,6 +220,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -222,6 +241,7 @@ public void testRegexCaseInsensitivity() throws Exception { indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("messages"); SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); assertNoFailures(response); @@ -253,12 +273,13 @@ public void testAllFields() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); Settings.Builder settings = Settings.builder().put("index.query.default_field", "*"); - prepareCreate("test_1").setSource(indexBody, XContentType.JSON).setSettings(settings).get(); + prepareCreate("test_1").setSource(indexBody, MediaTypeRegistry.JSON).setSettings(settings).get(); ensureGreen("test_1"); List<IndexRequestBuilder> reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test_1"); SearchResponse resp = client().prepareSearch("test_1") .setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND)) @@ -351,6 +372,7 @@ public void testLimitOnExpandedFields() throws Exception { client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("testindex"); // single field shouldn't trigger the limit doAssertOneHitForQueryString("field_A0:foo"); @@ -442,6 +464,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception { List<IndexRequestBuilder> indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and geo_point field. // By default, the geo_point field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index d736365a6e236..136ddce152f63 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; @@ -43,8 +45,9 @@ import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -54,6 +57,7 @@ import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.scriptScoreQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -62,7 +66,19 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; -public class ScriptScoreQueryIT extends OpenSearchIntegTestCase { +public class ScriptScoreQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ScriptScoreQueryIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -87,13 +103,14 @@ protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() { // 1) only matched docs retrieved // 2) score is calculated based on a script with params // 3) min score applied - public void testScriptScore() { + public void testScriptScore() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map<String, Object> params = new HashMap<>(); params.put("param1", 0.1); @@ -113,13 +130,14 @@ public void testScriptScore() { assertOrderedSearchHits(resp, "10", "8", "6"); } - public void testScriptScoreBoolQuery() { + public void testScriptScoreBoolQuery() throws Exception { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; for (int i = 1; i <= docCount; i++) { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + i, "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map<String, Object> params = new HashMap<>(); params.put("param1", 0.1); @@ -133,7 +151,7 @@ public void testScriptScoreBoolQuery() { } // test that when the internal query is rewritten script_score works well - public void testRewrittenQuery() { + public void testRewrittenQuery() throws Exception { assertAcked( prepareCreate("test-index2").setSettings(Settings.builder().put("index.number_of_shards", 1)) .setMapping("field1", "type=date", "field2", "type=double") @@ -142,6 +160,7 @@ public void testRewrittenQuery() { client().prepareIndex("test-index2").setId("2").setSource("field1", "2019-10-01", "field2", 2).get(); client().prepareIndex("test-index2").setId("3").setSource("field1", "2019-11-01", "field2", 3).get(); refresh(); + indexRandomForConcurrentSearch("test-index2"); RangeQueryBuilder rangeQB = new RangeQueryBuilder("field1").from("2019-01-01"); // the query should be rewritten to from:null Map<String, Object> params = new HashMap<>(); @@ -152,7 +171,7 @@ public void testRewrittenQuery() { assertOrderedSearchHits(resp, "3", "2", "1"); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws Exception { try { assertAcked(prepareCreate("test-index").setMapping("field1", "type=text", "field2", "type=double")); int docCount = 10; @@ -160,6 +179,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("field1", "text" + (i % 2), "field2", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); Map<String, Object> params = new HashMap<>(); params.put("param1", 0.1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 0e0f4873297ba..a58db51780826 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -32,14 +32,15 @@ package org.opensearch.search.query; -import org.apache.lucene.tests.analysis.MockTokenizer; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.join.ScoreMode; -import org.apache.lucene.util.AttributeSource; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.tests.util.English; - +import org.apache.lucene.util.AttributeSource; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -50,9 +51,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.CharFilterFactory; import org.opensearch.index.analysis.NormalizingCharFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; @@ -73,12 +75,11 @@ import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.AggregationBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestIssueLogging; import java.io.IOException; @@ -98,7 +99,6 @@ import java.util.regex.Pattern; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -128,6 +128,7 @@ import static org.opensearch.index.query.QueryBuilders.termsQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; @@ -140,11 +141,24 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class SearchQueryIT extends OpenSearchIntegTestCase { +public class SearchQueryIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchQueryIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -176,7 +190,7 @@ public void testEmptyQueryString() throws ExecutionException, InterruptedExcepti } // see https://github.com/elastic/elasticsearch/issues/3177 - public void testIssue3177() { + public void testIssue3177() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); @@ -185,6 +199,7 @@ public void testIssue3177() { waitForRelocation(); forceMerge(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount( client().prepareSearch() .setQuery(matchAllQuery()) @@ -374,7 +389,19 @@ public void testCommonTermsQuery() throws Exception { assertSecondHit(searchResponse, hasId("2")); assertThirdHit(searchResponse, hasId("3")); - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get(); + // cutoff frequency of 1 makes all terms high frequency so the query gets rewritten as a + // conjunction of all terms (the lowFreqMinimumShouldMatch parameter is effectively ignored) + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(1).lowFreqMinimumShouldMatch("2")) + .get(); + assertHitCount(searchResponse, 1L); + assertFirstHit(searchResponse, hasId("2")); + + // cutoff frequency of 100 makes all terms low frequency, so lowFreqMinimumShouldMatch=3 + // means all terms must match + searchResponse = client().prepareSearch() + .setQuery(commonTermsQuery("field1", "the huge fox").cutoffFrequency(100).lowFreqMinimumShouldMatch("3")) + .get(); assertHitCount(searchResponse, 1L); assertFirstHit(searchResponse, hasId("2")); @@ -445,6 +472,7 @@ public void testQueryStringAnalyzedWildcard() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*")).get(); assertHitCount(searchResponse, 1L); @@ -462,11 +490,12 @@ public void testQueryStringAnalyzedWildcard() throws Exception { assertHitCount(searchResponse, 1L); } - public void testLowercaseExpandedTerms() { + public void testLowercaseExpandedTerms() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1")).get(); assertHitCount(searchResponse, 1L); @@ -479,7 +508,7 @@ public void testLowercaseExpandedTerms() { } // Issue #3540 - public void testDateRangeInQueryString() { + public void testDateRangeInQueryString() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date", "future", "type=date")); @@ -490,6 +519,7 @@ public void testDateRangeInQueryString() { client().prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get(); assertHitCount(searchResponse, 1L); @@ -505,7 +535,7 @@ public void testDateRangeInQueryString() { } // Issue #7880 - public void testDateRangeInQueryStringWithTimeZone_7880() { + public void testDateRangeInQueryStringWithTimeZone_7880() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -516,6 +546,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { client().prepareIndex("test").setId("1").setSource("past", now).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]").timeZone(timeZone.getId())) .get(); @@ -523,7 +554,7 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { } // Issue #10477 - public void testDateRangeInQueryStringWithTimeZone_10477() { + public void testDateRangeInQueryStringWithTimeZone_10477() throws InterruptedException { // the mapping needs to be provided upfront otherwise we are not sure how many failures we get back // as with dynamic mappings some shards might be lacking behind and parse a different query assertAcked(prepareCreate("test").setMapping("past", "type=date")); @@ -532,6 +563,7 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { client().prepareIndex("test").setId("2").setSource("past", "2015-04-06T00:00:00+0000").get(); refresh(); + indexRandomForConcurrentSearch("test"); // Timezone set with dates SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) @@ -705,6 +737,7 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1L); @@ -721,6 +754,7 @@ public void testFiltersWithCustomCacheKey() throws Exception { client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get(); assertHitCount(searchResponse, 1L); @@ -762,6 +796,7 @@ public void testMatchQueryFuzzy() throws Exception { client().prepareIndex("test").setId("1").setSource("text", "Unit"), client().prepareIndex("test").setId("2").setSource("text", "Unity") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ZERO)).get(); assertHitCount(searchResponse, 0L); @@ -848,11 +883,12 @@ public void testMultiMatchQuery() throws Exception { assertFirstHit(searchResponse, hasId("1")); } - public void testMatchQueryZeroTermsQuery() { + public void testMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1").get(); client().prepareIndex("test").setId("2").setSource("field1", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)) .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE)); @@ -869,11 +905,12 @@ public void testMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryZeroTermsQuery() { + public void testMultiMatchQueryZeroTermsQuery() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field1", "type=text,analyzer=classic", "field2", "type=text,analyzer=classic")); client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").get(); client().prepareIndex("test").setId("2").setSource("field1", "value3", "field2", "value4").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must( multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQuery.ZeroTermsQuery.NONE) @@ -893,11 +930,12 @@ public void testMultiMatchQueryZeroTermsQuery() { assertHitCount(searchResponse, 2L); } - public void testMultiMatchQueryMinShouldMatch() { + public void testMultiMatchQueryMinShouldMatch() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2"); @@ -939,12 +977,13 @@ public void testMultiMatchQueryMinShouldMatch() { assertHitCount(searchResponse, 0L); } - public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException { + public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", new String[] { "value1", "value2", "value3" }).get(); client().prepareIndex("test").setId("2").setSource("field2", "value1").get(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder boolQuery = boolQuery().must(termQuery("field1", "value1")) .should(boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2")).minimumShouldMatch(3)); SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get(); @@ -971,12 +1010,13 @@ public void testBoolQueryMinShouldMatchBiggerThanNumberOfShouldClauses() throws assertHitCount(searchResponse, 0L); } - public void testFuzzyQueryString() { + public void testFuzzyQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:foobaz~1")).get(); assertNoFailures(searchResponse); assertHitCount(searchResponse, 1L); @@ -995,6 +1035,7 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { client().prepareIndex("test").setId("2").setSource("important", "nothing important", "less_important", "phrase match") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")) .get(); @@ -1007,11 +1048,12 @@ public void testQuotedQueryStringWithBoost() throws InterruptedException { ); } - public void testSpecialRangeSyntaxInQueryString() { + public void testSpecialRangeSyntaxInQueryString() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("str", "foobar", "date", "2012-02-01", "num", 12).get(); client().prepareIndex("test").setId("2").setSource("str", "fred", "date", "2012-02-05", "num", 20).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get(); assertHitCount(searchResponse, 1L); @@ -1117,6 +1159,7 @@ public void testTermsQuery() throws Exception { public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); + indexRandomForConcurrentSearch("lookup"); assertAcked( prepareCreate("lookup2").setMapping( jsonBuilder().startObject() @@ -1132,8 +1175,11 @@ public void testTermsLookupFilter() throws Exception { .endObject() ) ); + indexRandomForConcurrentSearch("lookup2"); assertAcked(prepareCreate("lookup3").setMapping("_source", "enabled=false", "terms", "type=text")); + indexRandomForConcurrentSearch("lookup3"); assertAcked(prepareCreate("test").setMapping("term", "type=text")); + indexRandomForConcurrentSearch("test"); indexRandom( true, @@ -1259,6 +1305,7 @@ public void testBasicQueryById() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "value3").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); assertHitCount(searchResponse, 2L); assertThat(searchResponse.getHits().getHits().length, equalTo(2)); @@ -1313,6 +1360,7 @@ public void testNumericTermsAndRanges() throws Exception { .setSource("num_byte", 17, "num_short", 17, "num_integer", 17, "num_long", 17, "num_float", 17, "num_double", 17) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse; logger.info("--> term query on 1"); @@ -1419,6 +1467,7 @@ public void testNumericRangeFilter_2826() throws Exception { client().prepareIndex("test").setId("3").setSource("field1", "test2", "num_long", 3).get(); client().prepareIndex("test").setId("4").setSource("field1", "test2", "num_long", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setPostFilter(boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4))) @@ -1515,7 +1564,7 @@ public void testSimpleSpan() throws IOException, ExecutionException, Interrupted assertHitCount(searchResponse, 3L); } - public void testSpanMultiTermQuery() throws IOException { + public void testSpanMultiTermQuery() throws IOException, InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("description", "foo other anything bar", "count", 1).get(); @@ -1523,6 +1572,7 @@ public void testSpanMultiTermQuery() throws IOException { client().prepareIndex("test").setId("3").setSource("description", "foo other", "count", 3).get(); client().prepareIndex("test").setId("4").setSource("description", "fop", "count", 4).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(spanOrQuery(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))) @@ -1554,6 +1604,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc client().prepareIndex("test").setId("1").setSource("description", "the quick brown fox jumped over the lazy dog").get(); client().prepareIndex("test").setId("2").setSource("description", "the quick black fox leaped over the sleeping dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery( @@ -1592,7 +1643,7 @@ public void testSpanNot() throws IOException, ExecutionException, InterruptedExc assertHitCount(searchResponse, 1L); } - public void testSimpleDFSQuery() throws IOException { + public void testSimpleDFSQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -1637,6 +1688,7 @@ public void testSimpleDFSQuery() throws IOException { .setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs") .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1659,8 +1711,9 @@ public void testSimpleDFSQuery() throws IOException { assertNoFailures(response); } - public void testMultiFieldQueryString() { + public void testMultiFieldQueryString() throws InterruptedException { client().prepareIndex("test").setId("1").setSource("field1", "value1", "field2", "value2").setRefreshPolicy(IMMEDIATE).get(); + indexRandomForConcurrentSearch("test"); logger.info("regular"); assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1); @@ -1680,11 +1733,12 @@ public void testMultiFieldQueryString() { } // see #3797 - public void testMultiMatchLenientIssue3797() { + public void testMultiMatchLenientIssue3797() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field1", 123, "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(multiMatchQuery("value2", "field2").field("field1", 2).lenient(true)) @@ -1708,6 +1762,7 @@ public void testMinScore() throws ExecutionException, InterruptedException { client().prepareIndex("test").setId("3").setSource("score", 2.0).get(); client().prepareIndex("test").setId("4").setSource("score", 0.5).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(ScoreFunctionBuilders.fieldValueFactorFunction("score").missing(1.0)).setMinScore(1.5f)) @@ -1717,12 +1772,13 @@ public void testMinScore() throws ExecutionException, InterruptedException { assertSecondHit(searchResponse, hasId("1")); } - public void testQueryStringWithSlopAndFields() { + public void testQueryStringWithSlopAndFields() throws InterruptedException { assertAcked(prepareCreate("test")); client().prepareIndex("test").setId("1").setSource("desc", "one two three", "type", "customer").get(); client().prepareIndex("test").setId("2").setSource("desc", "one two three", "type", "product").get(); refresh(); + indexRandomForConcurrentSearch("test"); { SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")) @@ -1789,6 +1845,7 @@ public void testRangeQueryWithTimeZone() throws Exception { .setId("4") .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4) ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1896,11 +1953,12 @@ public void testRangeQueryWithLocaleMapping() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSearchEmptyDoc() { - assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", XContentType.JSON)); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + public void testSearchEmptyDoc() throws InterruptedException { + assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", MediaTypeRegistry.JSON)); + client().prepareIndex("test").setId("1").setSource("{}", MediaTypeRegistry.JSON).get(); refresh(); + indexRandomForConcurrentSearch("test"); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } @@ -1928,6 +1986,7 @@ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedE public void testQueryStringParserCache() throws Exception { createIndex("test"); indexRandom(true, false, client().prepareIndex("test").setId("1").setSource("nameTokens", "xyz")); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) @@ -1958,6 +2017,7 @@ public void testRangeQueryRangeFields_24744() throws Exception { .setSource(jsonBuilder().startObject().startObject("int_range").field("gte", 10).field("lte", 20).endObject().endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); RangeQueryBuilder range = new RangeQueryBuilder("int_range").relation("intersects").from(Integer.MIN_VALUE).to(Integer.MAX_VALUE); SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); @@ -1993,6 +2053,7 @@ public void testNestedQueryWithFieldAlias() throws Exception { index("index", "_doc", "1", source); refresh(); + indexRandomForConcurrentSearch("index"); QueryBuilder nestedQuery = QueryBuilders.nestedQuery( "section", @@ -2021,6 +2082,7 @@ public void testFieldAliasesForMetaFields() throws Exception { IndexRequestBuilder indexRequest = client().prepareIndex("test").setId("1").setRouting("custom").setSource("field", "value"); indexRandom(true, false, indexRequest); + indexRandomForConcurrentSearch("test"); client().admin() .cluster() .prepareUpdateSettings() @@ -2053,7 +2115,7 @@ public void testFieldAliasesForMetaFields() throws Exception { /** * Test that wildcard queries on keyword fields get normalized */ - public void testWildcardQueryNormalizationOnKeywordField() { + public void testWildcardQueryNormalizationOnKeywordField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2064,6 +2126,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); @@ -2079,7 +2142,7 @@ public void testWildcardQueryNormalizationOnKeywordField() { /** * Test that wildcard queries on text fields get normalized */ - public void testWildcardQueryNormalizationOnTextField() { + public void testWildcardQueryNormalizationOnTextField() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2091,6 +2154,7 @@ public void testWildcardQueryNormalizationOnTextField() { ); client().prepareIndex("test").setId("1").setSource("field1", "Bbb Aaa").get(); refresh(); + indexRandomForConcurrentSearch("test"); { // test default case insensitivity: false @@ -2110,10 +2174,11 @@ public void testWildcardQueryNormalizationOnTextField() { } /** tests wildcard case sensitivity */ - public void testWildcardCaseSensitivity() { + public void testWildcardCaseSensitivity() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("field", "type=text")); client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); refresh(); + indexRandomForConcurrentSearch("test"); // test case sensitive SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); @@ -2131,7 +2196,7 @@ public void testWildcardCaseSensitivity() { * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. */ - public void testWildcardQueryNormalizationKeywordSpecialCharacters() { + public void testWildcardQueryNormalizationKeywordSpecialCharacters() throws InterruptedException { assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -2143,6 +2208,7 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { ); client().prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); + indexRandomForConcurrentSearch("test"); WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); @@ -2193,11 +2259,12 @@ public Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() { * set for fuzzy queries with "constant_score" rewrite nested inside a `span_multi` query and would cause NPEs due to an unset * {@link AttributeSource}. */ - public void testIssueFuzzyInsideSpanMulti() { + public void testIssueFuzzyInsideSpanMulti() throws InterruptedException { createIndex("test"); client().prepareIndex("test").setId("1").setSource("field", "foobarbaz").get(); ensureGreen(); refresh(); + indexRandomForConcurrentSearch("test"); BoolQueryBuilder query = boolQuery().filter(spanMultiTermQueryBuilder(fuzzyQuery("field", "foobarbiz").rewrite("constant_score"))); SearchResponse response = client().prepareSearch("test").setQuery(query).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index 46b81ae2e750d..31678d3f018a1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -32,20 +32,20 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; - import org.opensearch.ExceptionsHelper; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; @@ -59,12 +59,12 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; - +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -78,6 +78,7 @@ import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; @@ -93,10 +94,22 @@ /** * Tests for the {@code simple_query_string} query */ -public class SimpleQueryStringIT extends OpenSearchIntegTestCase { +public class SimpleQueryStringIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; + public SimpleQueryStringIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); @@ -131,6 +144,7 @@ public void testSimpleQueryString() throws ExecutionException, InterruptedExcept client().prepareIndex("test").setId("5").setSource("body", "quux baz spaghetti"), client().prepareIndex("test").setId("6").setSource("otherbody", "spaghetti") ); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar")).get(); assertHitCount(searchResponse, 3L); @@ -180,6 +194,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("3").setSource("body", "foo bar"), client().prepareIndex("test").setId("4").setSource("body", "foo baz bar") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 1"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get(); @@ -216,6 +231,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { client().prepareIndex("test").setId("7").setSource("body2", "foo bar", "other", "foo"), client().prepareIndex("test").setId("8").setSource("body2", "foo baz bar", "other", "foo") ); + indexRandomForConcurrentSearch("test"); logger.info("--> query 5"); searchResponse = client().prepareSearch() @@ -237,7 +253,7 @@ public void testSimpleQueryStringMinimumShouldMatch() throws Exception { assertSearchHits(searchResponse, "6", "7", "8"); } - public void testNestedFieldSimpleQueryString() throws IOException { + public void testNestedFieldSimpleQueryString() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -256,6 +272,7 @@ public void testNestedFieldSimpleQueryString() throws IOException { ); client().prepareIndex("test").setId("1").setSource("body", "foo bar baz").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body")).get(); assertHitCount(searchResponse, 1L); @@ -340,6 +357,8 @@ public void testSimpleQueryStringLenient() throws ExecutionException, Interrupte client().prepareIndex("test2").setId("10").setSource("field", 5) ); refresh(); + indexRandomForConcurrentSearch("test1"); + indexRandomForConcurrentSearch("test2"); SearchResponse searchResponse = client().prepareSearch() .setAllowPartialSearchResults(true) @@ -373,17 +392,16 @@ public void testLenientFlagBeingTooLenient() throws Exception { } public void testSimpleQueryStringAnalyzeWildcard() throws ExecutionException, InterruptedException, IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "text") - .field("analyzer", "standard") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "text") + .field("analyzer", "standard") + .endObject() + .endObject() + .endObject() + .toString(); CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); @@ -401,6 +419,7 @@ public void testSimpleQueryStringUsesFieldAnalyzer() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("123").field("foo").field("bar")).get(); assertHitCount(searchResponse, 1L); @@ -412,6 +431,7 @@ public void testSimpleQueryStringOnIndexMetaField() throws Exception { client().prepareIndex("test").setId("2").setSource("foo", 234, "bar", "bcd").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("test").field("_index")).get(); assertHitCount(searchResponse, 2L); @@ -420,17 +440,16 @@ public void testSimpleQueryStringOnIndexMetaField() throws Exception { public void testEmptySimpleQueryStringWithAnalysis() throws Exception { // https://github.com/elastic/elasticsearch/issues/18202 - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("body") - .field("type", "text") - .field("analyzer", "stop") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("body") + .field("type", "text") + .field("analyzer", "stop") + .endObject() + .endObject() + .endObject() + .toString(); CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test1").setMapping(mapping); mappingRequest.get(); @@ -444,7 +463,7 @@ public void testEmptySimpleQueryStringWithAnalysis() throws Exception { public void testBasicAllQuery() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); @@ -452,6 +471,7 @@ public void testBasicAllQuery() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f2", "Bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f3", "foo bar baz")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHitCount(resp, 2L); @@ -468,13 +488,14 @@ public void testBasicAllQuery() throws Exception { public void testWithDate() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo", "f_date", "2015/09/02")); reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -495,7 +516,7 @@ public void testWithDate() throws Exception { public void testWithLotsOfTypes() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); @@ -506,6 +527,7 @@ public void testWithLotsOfTypes() throws Exception { client().prepareIndex("test").setId("2").setSource("f1", "bar", "f_date", "2015/09/01", "f_float", "1.8", "f_ip", "127.0.0.2") ); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get(); assertHits(resp.getHits(), "1", "2"); @@ -526,13 +548,14 @@ public void testWithLotsOfTypes() throws Exception { public void testDocWithAllTypes() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json"); - reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, XContentType.JSON)); + reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON)); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "1"); @@ -571,7 +594,7 @@ public void testDocWithAllTypes() throws Exception { public void testKeywordWithWhitespace() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); List<IndexRequestBuilder> reqs = new ArrayList<>(); @@ -579,6 +602,7 @@ public void testKeywordWithWhitespace() throws Exception { reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar")); reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar")); indexRandom(true, false, reqs); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get(); assertHits(resp.getHits(), "3"); @@ -591,7 +615,7 @@ public void testKeywordWithWhitespace() throws Exception { public void testAllFieldsWithSpecifiedLeniency() throws IOException { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - prepareCreate("test").setSource(indexBody, XContentType.JSON).get(); + prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON).get(); ensureGreen("test"); SearchPhaseExecutionException e = expectThrows( @@ -638,7 +662,7 @@ private void doAssertLimitExceededException(String field, int exceedingFieldCoun public void testFieldAlias() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - assertAcked(prepareCreate("test").setSource(indexBody, XContentType.JSON)); + assertAcked(prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON)); ensureGreen("test"); List<IndexRequestBuilder> indexRequests = new ArrayList<>(); @@ -646,6 +670,7 @@ public void testFieldAlias() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_alias")).get(); @@ -656,7 +681,7 @@ public void testFieldAlias() throws Exception { public void testFieldAliasWithWildcardField() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - assertAcked(prepareCreate("test").setSource(indexBody, XContentType.JSON)); + assertAcked(prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON)); ensureGreen("test"); List<IndexRequestBuilder> indexRequests = new ArrayList<>(); @@ -664,6 +689,7 @@ public void testFieldAliasWithWildcardField() throws Exception { indexRequests.add(client().prepareIndex("test").setId("2").setSource("f3", "value", "f2", "two")); indexRequests.add(client().prepareIndex("test").setId("3").setSource("f3", "another value", "f2", "three")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setQuery(simpleQueryStringQuery("value").field("f3_*")).get(); @@ -674,12 +700,13 @@ public void testFieldAliasWithWildcardField() throws Exception { public void testFieldAliasOnDisallowedFieldType() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); - assertAcked(prepareCreate("test").setSource(indexBody, XContentType.JSON)); + assertAcked(prepareCreate("test").setSource(indexBody, MediaTypeRegistry.JSON)); ensureGreen("test"); List<IndexRequestBuilder> indexRequests = new ArrayList<>(); indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one")); indexRandom(true, false, indexRequests); + indexRandomForConcurrentSearch("test"); // The wildcard field matches aliases for both a text and boolean field. // By default, the boolean field should be ignored when building the query. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index 45bc1ffca6886..7dbc61a3ced39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -32,12 +32,14 @@ package org.opensearch.search.scriptfilter; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; @@ -45,8 +47,9 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -61,12 +64,24 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class ScriptQuerySearchIT extends OpenSearchIntegTestCase { +public class ScriptQuerySearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public ScriptQuerySearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -140,6 +155,7 @@ public void testCustomScriptBinaryField() throws Exception { .get(); flush(); refresh(); + indexRandomForConcurrentSearch("my-index"); SearchResponse response = client().prepareSearch() .setQuery( @@ -192,6 +208,7 @@ public void testCustomScriptBoost() throws Exception { .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("running doc['num1'].value > 1"); SearchResponse response = client().prepareSearch() @@ -238,7 +255,7 @@ public void testCustomScriptBoost() throws Exception { assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(3.0)); } - public void testDisallowExpensiveQueries() { + public void testDisallowExpensiveQueries() throws InterruptedException { try { assertAcked(prepareCreate("test-index").setMapping("num1", "type=double")); int docCount = 10; @@ -246,6 +263,7 @@ public void testDisallowExpensiveQueries() { client().prepareIndex("test-index").setId("" + i).setSource("num1", i).get(); } refresh(); + indexRandomForConcurrentSearch("test-index"); // Execute with search.allow_expensive_queries = null => default value = false => success Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index e0a54e9b4fc36..55b3cfeef7419 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.index.IndexRequestBuilder; @@ -45,18 +46,32 @@ import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -public class DuelScrollIT extends OpenSearchIntegTestCase { +public class DuelScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public DuelScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index 3ad15f852ebc5..35b5a7949b20b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -41,28 +43,29 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Priority; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; - import org.junit.After; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -71,6 +74,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -87,7 +91,19 @@ /** * Tests for scrolling. */ -public class SearchScrollIT extends OpenSearchIntegTestCase { +public class SearchScrollIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @After public void cleanup() throws Exception { assertAcked( @@ -113,6 +129,7 @@ public void testSimpleScrollQueryThenFetch() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -166,6 +183,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setSearchType(SearchType.QUERY_THEN_FETCH) @@ -234,6 +252,7 @@ public void testScrollAndUpdateIndex() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); assertThat(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, equalTo(500L)); assertThat( @@ -306,6 +325,7 @@ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -426,6 +446,7 @@ public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse1 = client().prepareSearch() .setQuery(matchAllQuery()) @@ -504,6 +525,7 @@ public void testDeepScrollingDoesNotBlowUp() throws Exception { .prepareUpdateSettings("index") .setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)) .get(); + indexRandomForConcurrentSearch("index"); for (SearchType searchType : SearchType.values()) { SearchRequestBuilder builder = client().prepareSearch("index") @@ -545,6 +567,7 @@ public void testStringSortMissingAscTerminates() throws Exception { ); client().prepareIndex("test").setId("1").setSource("some_field", "test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last")) @@ -570,12 +593,13 @@ public void testStringSortMissingAscTerminates() throws Exception { assertThat(response.getHits().getHits().length, equalTo(0)); } - public void testCloseAndReopenOrDeleteWithActiveScroll() { + public void testCloseAndReopenOrDeleteWithActiveScroll() throws InterruptedException { createIndex("test"); for (int i = 0; i < 100; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", i).get(); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(35) @@ -661,7 +685,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } - public void testInvalidScrollKeepAlive() throws IOException { + public void testInvalidScrollKeepAlive() throws IOException, InterruptedException { createIndex("test"); for (int i = 0; i < 2; i++) { client().prepareIndex("test") @@ -670,6 +694,7 @@ public void testInvalidScrollKeepAlive() throws IOException { .get(); } refresh(); + indexRandomForConcurrentSearch("test"); assertAcked( client().admin() .cluster() @@ -711,7 +736,7 @@ public void testInvalidScrollKeepAlive() throws IOException { * Ensures that we always create and retain search contexts on every target shards for a scroll request * regardless whether that query can be written to match_no_docs on some target shards or not. */ - public void testScrollRewrittenToMatchNoDocs() { + public void testScrollRewrittenToMatchNoDocs() throws InterruptedException { final int numShards = randomIntBetween(3, 5); assertAcked( client().admin() @@ -724,6 +749,7 @@ public void testScrollRewrittenToMatchNoDocs() { client().prepareIndex("test").setId("2").setSource("created_date", "2020-01-02").get(); client().prepareIndex("test").setId("3").setSource("created_date", "2020-01-03").get(); client().admin().indices().prepareRefresh("test").get(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = null; try { int totalHits = 0; @@ -771,6 +797,7 @@ public void testRestartDataNodesDuringScrollSearch() throws Exception { index("prod", "_doc", "prod-" + i, Collections.emptyMap()); } client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("demo", "prod"); SearchResponse respFromDemoIndex = client().prepareSearch("demo") .setSize(randomIntBetween(1, 10)) .setQuery(new MatchAllQueryBuilder()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index c6519cc3a0cb3..38f65c8c2d0da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -32,26 +32,44 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -public class SearchScrollWithFailingNodesIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 0) +public class SearchScrollWithFailingNodesIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchScrollWithFailingNodesIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected int numberOfShards() { return 2; @@ -63,8 +81,6 @@ protected int numberOfReplicas() { } public void testScanScrollWithShardExceptions() throws Exception { - internalCluster().startNode(); - internalCluster().startNode(); assertAcked( prepareCreate("test") // Enforces that only one shard can only be allocated to a single node @@ -97,7 +113,7 @@ public void testScanScrollWithShardExceptions() throws Exception { assertThat(numHits, equalTo(100L)); clearScroll("_all"); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomDataNode(); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get(); assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards())); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 2a662c9dda088..13c510ff21338 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,7 +32,8 @@ package org.opensearch.search.searchafter; -import org.opensearch.action.ActionFuture; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.CreatePitAction; @@ -43,30 +44,46 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; -import java.util.List; import java.util.ArrayList; -import java.util.Comparator; -import java.util.Collections; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SearchAfterIT extends OpenSearchIntegTestCase { +public class SearchAfterIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String INDEX_NAME = "test"; private static final int NUM_DOCS = 100; + public SearchAfterIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testsShouldFail() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); @@ -197,8 +214,8 @@ public void testPitWithSearchAfter() throws Exception { .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) .get(); assertEquals(3, sr.getHits().getHits().length); - /** - * Add new data and assert PIT results remain the same and normal search results gets refreshed + /* + Add new data and assert PIT results remain the same and normal search results gets refreshed */ indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); sr = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 5d7c6d5891b83..98e749aa48cac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; @@ -39,19 +41,23 @@ import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutionException; @@ -62,14 +68,29 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; -public class SimpleSearchIT extends OpenSearchIntegTestCase { +public class SimpleSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } public void testSearchNullIndex() { expectThrows( @@ -258,10 +279,9 @@ public void testSimpleDateRange() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSimpleTerminateAfterCount() throws Exception { + public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); - int max = randomIntBetween(3, 29); List<IndexRequestBuilder> docbuilders = new ArrayList<>(max); for (int i = 1; i <= max; i++) { @@ -278,9 +298,20 @@ public void testSimpleTerminateAfterCount() throws Exception { searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); - assertHitCount(searchResponse, i); + + // Do not expect an exact match as an optimization introduced by https://issues.apache.org/jira/browse/LUCENE-10620 + // can produce a total hit count > terminated_after, but this only kicks in + // when size = 0 which is when TotalHitCountCollector is used. + if (size == 0) { + assertHitCount(searchResponse, i, max); + } else { + assertHitCount(searchResponse, i); + } assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -292,6 +323,126 @@ public void testSimpleTerminateAfterCount() throws Exception { assertFalse(searchResponse.isTerminatedEarly()); } + public void testSimpleTerminateAfterCountSize0() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(0, max); + } + + public void testSimpleTerminateAfterCountRandomSize() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); + } + + /** + * Special cases when size = 0: + * + * If track_total_hits = true: + * Weight#count optimization can cause totalHits in the response to be up to the total doc count regardless of terminate_after. + * So, we will have to do a range check, not an equality check. + * + * If track_total_hits != true, but set to a value AND terminate_after is set: + * Again, due to the optimization, any count can be returned. + * Up to terminate_after, relation == EQUAL_TO. + * But if track_total_hits_up_to ≥ terminate_after, relation can be EQ _or_ GTE. + * This ambiguity is due to the fact that totalHits == track_total_hits_up_to + * or totalHits > track_total_hits_up_to and SearchPhaseController sets totalHits = track_total_hits_up_to when returning results + * in which case relation = GTE. + * + * @param size + * @throws Exception + */ + public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List<IndexRequestBuilder> docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + // For size = 0, the following queries terminate early, but hits and relation can vary. + if (size > 0) { + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + if (size == 0) { + // Since terminate_after < track_total_hits, we need to do a range check. + assertHitCount(searchResponse, 5, numDocs); + } else { + assertEquals(5, searchResponse.getHits().getTotalHits().value); + } + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize0() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); + } + + public void testSimpleTerminateAfterTrackTotalHitsUpToSize() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); + } + public void testSimpleIndexSortEarlyTerminate() throws Exception { prepareCreate("test").setSettings( Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") @@ -326,7 +477,7 @@ public void testSimpleIndexSortEarlyTerminate() throws Exception { public void testInsaneFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertWindowFails(client().prepareSearch("idx").setFrom(Integer.MAX_VALUE)); assertWindowFails(client().prepareSearch("idx").setSize(Integer.MAX_VALUE)); @@ -334,7 +485,7 @@ public void testInsaneFromAndSize() throws Exception { public void testTooLargeFromAndSize() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertWindowFails(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY))); assertWindowFails(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1)); @@ -347,7 +498,7 @@ public void testTooLargeFromAndSize() throws Exception { public void testLargeFromAndSizeSucceeds() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) - 10).get(), 1); assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); @@ -365,7 +516,7 @@ public void testTooLargeFromAndSizeOkBySetting() throws Exception { Settings.builder() .put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 2) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); @@ -393,7 +544,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { ) .get() ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY)).get(), 1); assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) + 1).get(), 1); @@ -408,7 +559,7 @@ public void testTooLargeFromAndSizeOkByDynamicSetting() throws Exception { public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws Exception { prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), Integer.MAX_VALUE)).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount(client().prepareSearch("idx").setFrom(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); assertHitCount(client().prepareSearch("idx").setSize(IndexSettings.MAX_RESULT_WINDOW_SETTING.get(Settings.EMPTY) * 10).get(), 1); @@ -423,7 +574,7 @@ public void testTooLargeFromAndSizeBackwardsCompatibilityRecommendation() throws public void testTooLargeRescoreWindow() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertRescoreWindowFails(Integer.MAX_VALUE); assertRescoreWindowFails(IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY) + 1); @@ -433,7 +584,7 @@ public void testTooLargeRescoreOkBySetting() throws Exception { int defaultMaxWindow = IndexSettings.MAX_RESCORE_WINDOW_SETTING.get(Settings.EMPTY); prepareCreate("idx").setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) .get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount( client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), @@ -450,7 +601,7 @@ public void testTooLargeRescoreOkByResultWindowSetting() throws Exception { defaultMaxWindow * 2 ) ).get(); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount( client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), @@ -468,7 +619,7 @@ public void testTooLargeRescoreOkByDynamicSetting() throws Exception { .setSettings(Settings.builder().put(IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey(), defaultMaxWindow * 2)) .get() ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount( client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), @@ -489,7 +640,7 @@ public void testTooLargeRescoreOkByDynamicResultWindowSetting() throws Exception ) .get() ); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); assertHitCount( client().prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(defaultMaxWindow + 1)).get(), @@ -515,7 +666,7 @@ public void testTermQueryBigInt() throws Exception { client().prepareIndex("idx") .setId("1") - .setSource("{\"field\" : 80315953321748200608 }", XContentType.JSON) + .setSource("{\"field\" : 80315953321748200608 }", MediaTypeRegistry.JSON) .setRefreshPolicy(RefreshPolicy.IMMEDIATE) .get(); @@ -527,9 +678,26 @@ public void testTermQueryBigInt() throws Exception { assertEquals(1, searchResponse.getHits().getTotalHits().value); } + public void testIndexOnlyFloatField() throws IOException { + prepareCreate("idx").setMapping("field", "type=float,doc_values=false").get(); + ensureGreen("idx"); + + IndexRequestBuilder indexRequestBuilder = client().prepareIndex("idx"); + + for (float i = 9000.0F; i < 20000.0F; i++) { + indexRequestBuilder.setId(String.valueOf(i)).setSource("{\"field\":" + i + "}", MediaTypeRegistry.JSON).get(); + } + String queryJson = "{ \"filter\" : { \"terms\" : { \"field\" : [ 10000.0 ] } } }"; + XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); + parser.nextToken(); + ConstantScoreQueryBuilder query = ConstantScoreQueryBuilder.fromXContent(parser); + SearchResponse searchResponse = client().prepareSearch("idx").setQuery(query).get(); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + } + public void testTooLongRegexInRegexpQuery() throws Exception { createIndex("idx"); - indexRandom(true, client().prepareIndex("idx").setSource("{}", XContentType.JSON)); + indexRandom(true, client().prepareIndex("idx").setSource("{}", MediaTypeRegistry.JSON)); int defaultMaxRegexLength = IndexSettings.MAX_REGEX_LENGTH_SETTING.get(Settings.EMPTY); StringBuilder regexp = new StringBuilder(defaultMaxRegexLength); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index cc7e620f33216..ea73f9ee1a2be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -32,9 +32,9 @@ package org.opensearch.search.slice; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; @@ -42,51 +42,65 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.Scroll; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; -import java.util.List; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; +import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class SearchSliceIT extends OpenSearchIntegTestCase { - private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("invalid_random_kw") - .field("type", "keyword") - .field("doc_values", "false") - .endObject() - .startObject("random_int") - .field("type", "integer") - .field("doc_values", "true") - .endObject() - .startObject("invalid_random_int") - .field("type", "integer") - .field("doc_values", "false") - .endObject() - .endObject() - .endObject() +public class SearchSliceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SearchSliceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } ); + } + + private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("invalid_random_kw") + .field("type", "keyword") + .field("doc_values", "false") + .endObject() + .startObject("random_int") + .field("type", "integer") + .field("doc_values", "true") + .endObject() + .startObject("invalid_random_int") + .field("type", "integer") + .field("doc_values", "false") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index 83e732b39103e..e40928f15e8a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -32,10 +32,11 @@ package org.opensearch.search.sort; -import org.apache.lucene.util.BytesRef; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; - import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -46,23 +47,22 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.functionscore.ScoreFunctionBuilders; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; - +import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -88,6 +88,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.opensearch.script.MockScriptPlugin.NAME; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -101,13 +102,25 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.oneOf; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; + +public class FieldSortIT extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public FieldSortIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } -public class FieldSortIT extends OpenSearchIntegTestCase { public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() { @@ -135,7 +148,7 @@ protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, CustomScriptPlugin.class); } - public void testIssue8226() { + public void testIssue8226() throws InterruptedException { int numIndices = between(5, 10); final boolean useMapping = randomBoolean(); for (int i = 0; i < numIndices; i++) { @@ -145,10 +158,13 @@ public void testIssue8226() { assertAcked(prepareCreate("test_" + i).addAlias(new Alias("test"))); } if (i > 0) { - client().prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", XContentType.JSON).get(); + client().prepareIndex("test_" + i).setId("" + i).setSource("{\"entry\": " + i + "}", MediaTypeRegistry.JSON).get(); } } refresh(); + for (int i = 0; i < numIndices; i++) { + indexRandomForConcurrentSearch("test_" + i); + } // sort DESC SearchResponse searchResponse = client().prepareSearch() .addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long")) @@ -181,7 +197,8 @@ public void testIssue8226() { public void testIssue6614() throws ExecutionException, InterruptedException { List<IndexRequestBuilder> builders = new ArrayList<>(); boolean strictTimeBasedIndices = randomBoolean(); - final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month + // consider only 15 days of the month to avoid hitting open file limit + final int numIndices = randomIntBetween(2, 15); int docs = 0; for (int i = 0; i < numIndices; i++) { final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx"; @@ -259,6 +276,7 @@ public void testTrackScores() throws Exception { jsonBuilder().startObject().field("id", "2").field("svalue", "bbb").field("ivalue", 200).field("dvalue", 0.2).endObject() ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort("svalue", SortOrder.ASC).get(); @@ -427,6 +445,7 @@ public void testScoreSortDirection() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.fieldValueFactorFunction("field"))) @@ -465,6 +484,7 @@ public void testScoreSortDirectionWithFunctionScore() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("field"))) @@ -497,9 +517,9 @@ public void testScoreSortDirectionWithFunctionScore() throws Exception { public void testIssue2986() { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=keyword").get()); - client().prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", XContentType.JSON).get(); - client().prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"field1\":\"value1\"}", MediaTypeRegistry.JSON).get(); + client().prepareIndex("test").setId("2").setSource("{\"field1\":\"value2\"}", MediaTypeRegistry.JSON).get(); + client().prepareIndex("test").setId("3").setSource("{\"field1\":\"value3\"}", MediaTypeRegistry.JSON).get(); refresh(); SearchResponse result = client().prepareSearch("test") .setQuery(matchAllQuery()) @@ -512,7 +532,7 @@ public void testIssue2986() { } } - public void testIssue2991() { + public void testIssue2991() throws InterruptedException { for (int i = 1; i < 4; i++) { try { client().admin().indices().prepareDelete("test").get(); @@ -533,6 +553,7 @@ public void testIssue2991() { client().prepareIndex("test").setId("2").setSource("tag", "beta").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse resp = client().prepareSearch("test") .setSize(2) .setQuery(matchAllQuery()) @@ -586,6 +607,9 @@ public void testSimpleSorts() throws Exception { .startObject("float_value") .field("type", "float") .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() .startObject("double_value") .field("type", "double") .endObject() @@ -609,6 +633,7 @@ public void testSimpleSorts() throws Exception { .field("long_value", i) .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) .field("float_value", 0.1 * i) + .field("half_float_value", 0.1 * i) .field("double_value", 0.1 * i) .endObject() ); @@ -627,6 +652,7 @@ public void testSimpleSorts() throws Exception { } refresh(); + indexRandomForConcurrentSearch("test"); // STRING int size = 1 + random.nextInt(10); @@ -775,6 +801,28 @@ public void testSimpleSorts() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); + // HALF_FLOAT + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, 10L); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + size = 1 + random.nextInt(10); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, 10); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(9 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + // DOUBLE size = 1 + random.nextInt(10); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); @@ -877,6 +925,7 @@ public void testSortMissingNumbers() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // DOUBLE logger.info("--> sort with no missing (same as missing _last)"); @@ -1038,6 +1087,7 @@ public void testSortMissingNumbersMinMax() throws Exception { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // LONG logger.info("--> sort with no missing (same as missing _last)"); @@ -1157,7 +1207,7 @@ public void testSortMissingNumbersMinMax() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3")); } - public void testSortMissingStrings() throws IOException { + public void testSortMissingStrings() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1185,6 +1235,7 @@ public void testSortMissingStrings() throws IOException { flush(); refresh(); + indexRandomForConcurrentSearch("test"); // TODO: WTF? try { @@ -1311,6 +1362,9 @@ public void testSortMVField() throws Exception { .startObject("float_values") .field("type", "float") .endObject() + .startObject("half_float_values") + .field("type", "float") + .endObject() .startObject("double_values") .field("type", "double") .endObject() @@ -1332,6 +1386,7 @@ public void testSortMVField() throws Exception { .array("short_values", 1, 5, 10, 8) .array("byte_values", 1, 5, 10, 8) .array("float_values", 1f, 5f, 10f, 8f) + .array("half_float_values", 1f, 5f, 10f, 8f) .array("double_values", 1d, 5d, 10d, 8d) .array("string_values", "01", "05", "10", "08") .endObject() @@ -1346,6 +1401,7 @@ public void testSortMVField() throws Exception { .array("short_values", 11, 15, 20, 7) .array("byte_values", 11, 15, 20, 7) .array("float_values", 11f, 15f, 20f, 7f) + .array("half_float_values", 11f, 15f, 20f, 7f) .array("double_values", 11d, 15d, 20d, 7d) .array("string_values", "11", "15", "20", "07") .endObject() @@ -1360,6 +1416,7 @@ public void testSortMVField() throws Exception { .array("short_values", 2, 1, 3, -4) .array("byte_values", 2, 1, 3, -4) .array("float_values", 2f, 1f, 3f, -4f) + .array("half_float_values", 2f, 1f, 3f, -4f) .array("double_values", 2d, 1d, 3d, -4d) .array("string_values", "02", "01", "03", "!4") .endObject() @@ -1367,6 +1424,7 @@ public void testSortMVField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1566,6 +1624,34 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.ASC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(-4f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(1f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(7f)); + + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("half_float_values", SortOrder.DESC).get(); + + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(3)); + + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(Integer.toString(2))); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).floatValue(), equalTo(20f)); + + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(Integer.toString(1))); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).floatValue(), equalTo(10f)); + + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(Integer.toString(3))); + assertThat(((Number) searchResponse.getHits().getAt(2).getSortValues()[0]).floatValue(), equalTo(3f)); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).addSort("double_values", SortOrder.ASC).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); @@ -1623,7 +1709,7 @@ public void testSortMVField() throws Exception { assertThat(searchResponse.getHits().getAt(2).getSortValues()[0], equalTo("03")); } - public void testSortOnRareField() throws IOException { + public void testSortOnRareField() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( XContentFactory.jsonBuilder() @@ -1643,6 +1729,7 @@ public void testSortOnRareField() throws IOException { .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(3) @@ -1738,6 +1825,7 @@ public void testSortMetaField() throws Exception { indexReqs[i] = client().prepareIndex("test").setId(Integer.toString(i)).setSource(); } indexRandom(true, indexReqs); + indexRandomForConcurrentSearch("test"); SortOrder order = randomFrom(SortOrder.values()); SearchResponse searchResponse = client().prepareSearch() @@ -1840,6 +1928,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); // We sort on nested field SearchResponse searchResponse = client().prepareSearch() @@ -2025,6 +2114,7 @@ public void testFieldAlias() throws Exception { builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2051,6 +2141,7 @@ public void testFieldAliasesWithMissingValues() throws Exception { builders.add(client().prepareIndex("old_index").setSource(Collections.emptyMap())); builders.add(client().prepareIndex("new_index").setSource("route_length_miles", 100.2)); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("old_index", "new_index"); SearchResponse response = client().prepareSearch() .setQuery(matchAllQuery()) @@ -2120,6 +2211,7 @@ public void testCastDate() throws Exception { builders.add(client().prepareIndex("index_date").setSource("field", "2024-04-11T23:47:17")); builders.add(client().prepareIndex("index_date_nanos").setSource("field", "2024-04-11T23:47:16.854775807Z")); indexRandom(true, true, builders); + indexRandomForConcurrentSearch("index_date", "index_date_nanos"); { SearchResponse response = client().prepareSearch() @@ -2245,7 +2337,7 @@ public void testCastNumericTypeExceptions() throws Exception { } } - public void testLongSortOptimizationCorrectResults() { + public void testLongSortOptimizationCorrectResults() throws InterruptedException { assertAcked( prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)) .setMapping("long_field", "type=long") @@ -2259,9 +2351,10 @@ public void testLongSortOptimizationCorrectResults() { bulkBuilder = client().prepareBulk(); } String source = "{\"long_field\":" + randomLong() + "}"; - bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, XContentType.JSON)); + bulkBuilder.add(client().prepareIndex("test1").setId(Integer.toString(i)).setSource(source, MediaTypeRegistry.JSON)); } refresh(); + indexRandomForConcurrentSearch("test1"); // *** 1. sort DESC on long_field SearchResponse searchResponse = client().prepareSearch() @@ -2291,4 +2384,185 @@ public void testLongSortOptimizationCorrectResults() { } } + public void testSimpleSortsPoints() throws Exception { + final int docs = 100; + + Random random = random(); + assertAcked( + prepareCreate("test").setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("str_value") + .field("type", "keyword") + .endObject() + .startObject("boolean_value") + .field("type", "boolean") + .endObject() + .startObject("byte_value") + .field("type", "byte") + .endObject() + .startObject("short_value") + .field("type", "short") + .endObject() + .startObject("integer_value") + .field("type", "integer") + .endObject() + .startObject("long_value") + .field("type", "long") + .endObject() + .startObject("unsigned_long_value") + .field("type", "unsigned_long") + .endObject() + .startObject("float_value") + .field("type", "float") + .endObject() + .startObject("half_float_value") + .field("type", "half_float") + .endObject() + .startObject("double_value") + .field("type", "double") + .endObject() + .endObject() + .endObject() + ) + ); + ensureGreen(); + BigInteger UNSIGNED_LONG_BASE = Numbers.MAX_UNSIGNED_LONG_VALUE.subtract(BigInteger.valueOf(10000 * docs)); + List<IndexRequestBuilder> builders = new ArrayList<>(); + for (int i = 0; i < docs / 2; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })) + .field("boolean_value", true) + .field("byte_value", i) + .field("short_value", i) + .field("integer_value", i) + .field("long_value", i) + .field("unsigned_long_value", UNSIGNED_LONG_BASE.add(BigInteger.valueOf(10000 * i))) + .field("float_value", 32 * i) + .field("half_float_value", 16 * i) + .field("double_value", 64 * i) + .endObject() + ); + builders.add(builder); + } + + // We keep half of the docs with numeric values and other half without + for (int i = docs / 2; i < docs; i++) { + IndexRequestBuilder builder = client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject().field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) })).endObject() + ); + builders.add(builder); + } + + int j = 0; + Collections.shuffle(builders, random); + for (IndexRequestBuilder builder : builders) { + builder.get(); + if ((++j % 25) == 0) { + refresh(); + } + + } + refresh(); + indexRandomForConcurrentSearch("test"); + + final int size = 2; + // HALF_FLOAT + SearchResponse searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("half_float_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("half_float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // FLOAT + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // DOUBLE + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + + // UNSIGNED_LONG + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.ASC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + searchResponse = client().prepareSearch() + .setQuery(matchAllQuery()) + .setSize(size) + .addSort("unsigned_long_value", SortOrder.DESC) + .get(); + + assertHitCount(searchResponse, docs); + assertThat(searchResponse.getHits().getHits().length, equalTo(size)); + for (int i = 0; i < size; i++) { + assertThat(searchResponse.getHits().getAt(i).getId(), equalTo(Integer.toString(docs / 2 - 1 - i))); + } + + assertThat(searchResponse.toString(), not(containsString("error"))); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 7174262efec1d..492ffce3321e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -32,24 +32,29 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -60,7 +65,19 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class GeoDistanceIT extends OpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public GeoDistanceIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected boolean forbidPrivateIndexSettings() { @@ -169,6 +186,7 @@ public void testDistanceSortingMVFields() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -301,6 +319,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test") @@ -579,7 +598,7 @@ public void testDistanceSortingNestedFields() throws Exception { /** * Issue 3073 */ - public void testGeoDistanceFilter() throws IOException { + public void testGeoDistanceFilter() throws IOException, InterruptedException { Version version = VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; @@ -597,6 +616,7 @@ public void testGeoDistanceFilter() throws IOException { assertAcked(prepareCreate("locations").setSettings(settings).setMapping(mapping)); client().prepareIndex("locations").setId("1").setCreate(true).setSource(source).get(); refresh(); + indexRandomForConcurrentSearch("locations"); client().prepareGet("locations", "1").get(); SearchResponse result = client().prepareSearch("locations") @@ -645,6 +665,7 @@ public void testDistanceSortingWithUnmappedField() throws Exception { .get(); refresh(); + indexRandomForConcurrentSearch("test1", "test2"); // Order: Asc SearchResponse searchResponse = client().prepareSearch("test1", "test2") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 5a0ca1d13633e..b6f53936d5939 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -42,25 +44,38 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.sort.SortBuilders.fieldSort; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSortValues; import static org.hamcrest.Matchers.closeTo; -public class GeoDistanceSortBuilderIT extends OpenSearchIntegTestCase { +public class GeoDistanceSortBuilderIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public GeoDistanceSortBuilderIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } private static final String LOCATION_FIELD = "location"; @@ -70,16 +85,16 @@ protected boolean forbidPrivateIndexSettings() { } public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException { - /** - * | q | d1 | d2 - * | | | - * | | | - * | | | - * |2 o| x | x - * | | | - * |1 o| x | x - * |___________________________ - * 1 2 3 4 5 6 7 + /* + | q | d1 | d2 + | | | + | | | + | | | + |2 o| x | x + | | | + |1 o| x | x + |___________________________ + 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -166,11 +181,10 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce } public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedException, IOException { - /** - * q = (0, 0) - * - * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 - * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 + /* + q = (0, 0) + d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 + d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -235,16 +249,17 @@ protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] point } public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException { - /** q d1 d2 - * |4 o| x | x - * | | | - * |3 o| x | x - * | | | - * |2 o| x | x - * | | | - * |1 o|x |x - * |______________________ - * 1 2 3 4 5 6 + /* + q d1 d2 + |4 o| x | x + | | | + |3 o| x | x + | | | + |2 o| x | x + | | | + |1 o|x |x + |______________________ + 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java index 8ff0790e7cb48..cb8b508c4496b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SimpleSortIT.java @@ -32,12 +32,14 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; -import org.opensearch.common.Strings; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -45,8 +47,8 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; import org.opensearch.search.sort.ScriptSortBuilder.ScriptSortType; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -62,6 +64,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.sort.SortBuilders.scriptSort; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -71,10 +74,22 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimpleSortIT extends OpenSearchIntegTestCase { +public class SimpleSortIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { private static final String DOUBLE_APOSTROPHE = "\u0027\u0027"; + public SimpleSortIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override protected Collection<Class<? extends Plugin>> nodePlugins() { return Arrays.asList(CustomScriptPlugin.class, InternalSettingsPlugin.class); @@ -237,24 +252,23 @@ public void testSimpleSorts() throws Exception { } public void testSortMinValueScript() throws IOException { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("lvalue") - .field("type", "long") - .endObject() - .startObject("dvalue") - .field("type", "double") - .endObject() - .startObject("svalue") - .field("type", "keyword") - .endObject() - .startObject("gvalue") - .field("type", "geo_point") - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("lvalue") + .field("type", "long") + .endObject() + .startObject("dvalue") + .field("type", "double") + .endObject() + .startObject("svalue") + .field("type", "keyword") + .endObject() + .startObject("gvalue") + .field("type", "geo_point") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); @@ -351,18 +365,17 @@ public void testDocumentsWithNullValue() throws Exception { // TODO: sort shouldn't fail when sort field is mapped dynamically // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not // be propagated to all nodes yet and sort operation fail when the sort field is not defined - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("id") - .field("type", "keyword") - .endObject() - .startObject("svalue") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("id") + .field("type", "keyword") + .endObject() + .startObject("svalue") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .toString(); assertAcked(prepareCreate("test").setMapping(mapping)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index e9fc1c54ad234..ec891045cb510 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -8,21 +8,36 @@ package org.opensearch.search.sort; -import static org.hamcrest.Matchers.equalTo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.plugin.CustomSortBuilder; import org.opensearch.search.sort.plugin.CustomSortPlugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; -public class SortFromPluginIT extends OpenSearchIntegTestCase { +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.Matchers.equalTo; + +public class SortFromPluginIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SortFromPluginIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -38,6 +53,7 @@ public void testPluginSort() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test").addSort(new CustomSortBuilder("field", SortOrder.ASC)).get(); assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("3")); @@ -59,6 +75,7 @@ public void testPluginSortXContent() throws Exception { client().prepareIndex("test").setId("3").setSource("field", 0).get(); refresh(); + indexRandomForConcurrentSearch("test"); // builder -> json -> builder SearchResponse searchResponse = client().prepareSearch("test") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index 4f6dd89285bee..4c1e47ef8da99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -31,32 +31,52 @@ package org.opensearch.search.source; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHits; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MetadataFetchingIT extends OpenSearchIntegTestCase { - public void testSimple() { +public class MetadataFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public MetadataFetchingIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSimple() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).setVersion(true).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); @@ -68,12 +88,12 @@ public void testSimple() { assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); } - public void testInnerHits() { + public void testInnerHits() throws InterruptedException { assertAcked(prepareCreate("test").setMapping("nested", "type=nested")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value", "nested", Collections.singletonMap("title", "foo")).get(); refresh(); - + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .storedFields("_none_") .setFetchSource(false) @@ -94,12 +114,13 @@ public void testInnerHits() { assertThat(hits.getAt(0).getSourceAsString(), nullValue()); } - public void testWithRouting() { + public void testWithRouting() throws InterruptedException { assertAcked(prepareCreate("test")); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").setRouting("toto").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").storedFields("_none_").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getId(), nullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index 11223d11ff30d..294657cedcc5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -32,20 +32,41 @@ package org.opensearch.search.source; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; -public class SourceFetchingIT extends OpenSearchIntegTestCase { - public void testSourceDefaultBehavior() { +public class SourceFetchingIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SourceFetchingIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSourceDefaultBehavior() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field", "value"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); @@ -58,12 +79,13 @@ public void testSourceDefaultBehavior() { } - public void testSourceFiltering() { + public void testSourceFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field1", "value", "field2", "value2").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(false).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); @@ -91,12 +113,13 @@ public void testSourceFiltering() { * Test Case for #5132: Source filtering with wildcards broken when given multiple patterns * https://github.com/elastic/elasticsearch/issues/5132 */ - public void testSourceWithWildcardFiltering() { + public void testSourceWithWildcardFiltering() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "value").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null).get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java new file mode 100644 index 0000000000000..f8d2955440bc4 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java @@ -0,0 +1,373 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.stats; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.IndexStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.plugins.Plugin; +import org.opensearch.script.MockScriptPlugin; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.search.SearchService; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.threadpool.ThreadPoolStats; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; + +import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class ConcurrentSearchStatsIT extends OpenSearchIntegTestCase { + + private final int SEGMENT_SLICE_COUNT = 4; + + @Override + protected Collection<Class<? extends Plugin>> nodePlugins() { + return Arrays.asList(ScriptedDelayedPlugin.class, InternalSettingsPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + // Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, SEGMENT_SLICE_COUNT) + .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true) + .build(); + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), false) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) + .build(); + } + + public void testConcurrentQueryCount() throws Exception { + String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + int NUM_SHARDS = randomIntBetween(1, 5); + createIndex( + INDEX_1, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + createIndex( + INDEX_2, + Settings.builder() + .put(indexSettings()) + .put("search.concurrent_segment_search.enabled", false) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + ensureGreen(); + + indexRandom( + false, + true, + client().prepareIndex(INDEX_1).setId("1").setSource("foo", "bar"), + client().prepareIndex(INDEX_1).setId("2").setSource("foo", "baz"), + client().prepareIndex(INDEX_2).setId("1").setSource("foo", "bar"), + client().prepareIndex(INDEX_2).setId("2").setSource("foo", "baz") + ); + + refresh(); + + // Search with custom plugin to ensure that queryTime is significant + client().prepareSearch(INDEX_1, INDEX_2) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + client().prepareSearch(INDEX_1) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + client().prepareSearch(INDEX_2) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + + IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); + IndicesStatsResponse stats = builder.execute().actionGet(); + + assertEquals(4 * NUM_SHARDS, stats.getTotal().search.getTotal().getQueryCount()); + assertEquals(2 * NUM_SHARDS, stats.getTotal().search.getTotal().getConcurrentQueryCount()); + assertThat(stats.getTotal().search.getTotal().getQueryTimeInMillis(), greaterThan(0L)); + assertThat(stats.getTotal().search.getTotal().getConcurrentQueryTimeInMillis(), greaterThan(0L)); + assertThat( + stats.getTotal().search.getTotal().getConcurrentQueryTimeInMillis(), + lessThan(stats.getTotal().search.getTotal().getQueryTimeInMillis()) + ); + } + + /** + * Test average concurrency is correctly calculated across indices for the same node + */ + public void testAvgConcurrencyNodeLevel() throws InterruptedException { + int NUM_SHARDS = 1; + String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + + // Create index test1 with 4 segments + createIndex( + INDEX_1, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX_1).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + + client().prepareSearch(INDEX_1).execute().actionGet(); + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + double expectedConcurrency = SEGMENT_SLICE_COUNT; + assertEquals( + SEGMENT_SLICE_COUNT, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX_1).execute().actionGet(); + + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1) / 2.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + // Create second index test2 with 4 segments + createIndex( + INDEX_2, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX_2).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + + client().prepareSearch(INDEX_2).execute().actionGet(); + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1 + SEGMENT_SLICE_COUNT) / 3.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX_2).execute().actionGet(); + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1 + SEGMENT_SLICE_COUNT + 1) / 4.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + // Check that non-concurrent search requests do not affect the average concurrency + client().admin() + .indices() + .prepareUpdateSettings(INDEX_1) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + client().admin() + .indices() + .prepareUpdateSettings(INDEX_2) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + client().prepareSearch(INDEX_1).execute().actionGet(); + client().prepareSearch(INDEX_2).execute().actionGet(); + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + } + + /** + * Test average concurrency is correctly calculated across shard for the same index + */ + public void testAvgConcurrencyIndexLevel() throws InterruptedException { + int NUM_SHARDS = 2; + String INDEX = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + createIndex( + INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + // Create 4 segments on each shard + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).setRouting("0").get(); + refresh(); + } + for (int i = 4; i < 8; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).setRouting("1").get(); + refresh(); + } + client().prepareSearch(INDEX).execute().actionGet(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + + IndexStats stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + double expectedConcurrency = (SEGMENT_SLICE_COUNT * NUM_SHARDS) / (double) NUM_SHARDS; + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX).execute().actionGet(); + + indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + expectedConcurrency = (SEGMENT_SLICE_COUNT * NUM_SHARDS + 1 * NUM_SHARDS) / (NUM_SHARDS * 2.0); + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + + // Check that non-concurrent search requests do not affect the average concurrency + client().admin() + .indices() + .prepareUpdateSettings(INDEX) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + + client().prepareSearch(INDEX).execute().actionGet(); + + indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + } + + public void testThreadPoolWaitTime() throws Exception { + int NUM_SHARDS = 1; + String INDEX = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + createIndex( + INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + ensureGreen(); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), 10)) + .execute() + .actionGet(); + + client().prepareSearch(INDEX).execute().actionGet(); + + NodesStatsRequestBuilder nodesStatsRequestBuilder = new NodesStatsRequestBuilder( + client().admin().cluster(), + NodesStatsAction.INSTANCE + ).setNodesIds().all(); + NodesStatsResponse nodesStatsResponse = nodesStatsRequestBuilder.execute().actionGet(); + ThreadPoolStats threadPoolStats = nodesStatsResponse.getNodes().get(0).getThreadPool(); + + for (ThreadPoolStats.Stats stats : threadPoolStats) { + if (stats.getName().equals(ThreadPool.Names.INDEX_SEARCHER)) { + assertThat(stats.getWaitTime().micros(), greaterThan(0L)); + } + } + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), 2)) + .execute() + .actionGet(); + } + + public static class ScriptedDelayedPlugin extends MockScriptPlugin { + static final String SCRIPT_NAME = "search_timeout"; + + @Override + public Map<String, Function<Map<String, Object>, Object>> pluginScripts() { + return Collections.singletonMap(SCRIPT_NAME, params -> { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return true; + }); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index c72b5d40553b3..99cb3a4e8ca20 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -32,9 +32,12 @@ package org.opensearch.search.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -50,7 +53,9 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -58,9 +63,11 @@ import java.util.Set; import java.util.function.Function; +import static org.opensearch.action.search.SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -72,8 +79,20 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchStatsIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) +public class SearchStatsIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public SearchStatsIT(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { @@ -103,6 +122,11 @@ public void testSimpleStats() throws Exception { assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10)); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(SEARCH_REQUEST_STATS_ENABLED_KEY, true).build()) + .get(); assertThat(numNodes, lessThanOrEqualTo(shardsIdx1 + shardsIdx2)); assertAcked( prepareCreate("test1").setSettings( @@ -165,20 +189,40 @@ public void testSimpleStats() throws Exception { Set<String> nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; + int numOfCoordinators = 0; + for (NodeStats stat : nodeStats.getNodes()) { Stats total = stat.getIndices().getSearch().getTotal(); + if (total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTimeInMillis() > 0) { + assertThat( + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + numOfCoordinators += 1; + } if (nodeIdsWithIndex.contains(stat.getNode().getId())) { assertThat(total.getQueryCount(), greaterThan(0L)); assertThat(total.getQueryTimeInMillis(), greaterThan(0L)); num++; } else { - assertThat(total.getQueryCount(), equalTo(0L)); + assertThat(total.getQueryCount(), greaterThanOrEqualTo(0L)); assertThat(total.getQueryTimeInMillis(), equalTo(0L)); } } - + assertThat(numOfCoordinators, greaterThan(0)); assertThat(num, greaterThan(0)); - } private Set<String> nodeIdsWithIndex(String... indices) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 8eb957733944d..c72e128a88045 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -31,7 +31,9 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; @@ -46,8 +48,8 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; import org.opensearch.plugins.Plugin; @@ -61,8 +63,8 @@ import org.opensearch.search.suggest.completion.context.CategoryContextMapping; import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -76,10 +78,12 @@ import java.util.Set; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.WAIT_UNTIL; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; @@ -95,7 +99,19 @@ import static org.hamcrest.Matchers.notNullValue; @SuppressCodecs("*") // requires custom completion format -public class CompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class CompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); @@ -1150,6 +1166,7 @@ public void testSkipDuplicates() throws Exception { createIndexAndMapping(mapping); int numDocs = randomIntBetween(10, 100); int numUnique = randomIntBetween(1, numDocs); + logger.info("Suggestion duplicate parameters: numDocs {} numUnique {}", numDocs, numUnique); List<IndexRequestBuilder> indexRequestBuilders = new ArrayList<>(); int[] weights = new int[numUnique]; Integer[] termIds = new Integer[numUnique]; @@ -1159,8 +1176,10 @@ public void testSkipDuplicates() throws Exception { int weight = randomIntBetween(0, 100); weights[id] = Math.max(weight, weights[id]); String suggestion = "suggestion-" + String.format(Locale.ENGLISH, "%03d", id); + logger.info("Creating {}, id {}, weight {}", suggestion, i, id, weight); indexRequestBuilders.add( client().prepareIndex(INDEX) + .setRefreshPolicy(WAIT_UNTIL) .setSource( jsonBuilder().startObject() .startObject(FIELD) @@ -1174,10 +1193,12 @@ public void testSkipDuplicates() throws Exception { indexRandom(true, indexRequestBuilders); Arrays.sort(termIds, Comparator.comparingInt(o -> weights[(int) o]).reversed().thenComparingInt(a -> (int) a)); + logger.info("Expected terms id ordered {}", (Object[]) termIds); String[] expected = new String[numUnique]; for (int i = 0; i < termIds.length; i++) { expected[i] = "suggestion-" + String.format(Locale.ENGLISH, "%03d", termIds[i]); } + logger.info("Expected suggestions field values {}", (Object[]) expected); CompletionSuggestionBuilder completionSuggestionBuilder = SuggestBuilders.completionSuggestion(FIELD) .prefix("sugg") .skipDuplicates(true) @@ -1186,6 +1207,7 @@ public void testSkipDuplicates() throws Exception { SearchResponse searchResponse = client().prepareSearch(INDEX) .suggest(new SuggestBuilder().addSuggestion("suggestions", completionSuggestionBuilder)) .get(); + logger.info("Search Response with Suggestions {}", searchResponse); assertSuggestions(searchResponse, true, "suggestions", expected); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java index 75bae17d867ff..67523e9fd424a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -31,18 +31,19 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; +import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.suggest.CompletionSuggestSearchIT.CompletionMappingBuilder; import org.opensearch.search.suggest.completion.CompletionSuggestionBuilder; import org.opensearch.search.suggest.completion.context.CategoryContextMapping; @@ -51,11 +52,12 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.search.suggest.completion.context.GeoQueryContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -64,12 +66,24 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format -public class ContextCompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class ContextCompletionSuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public ContextCompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 3ffd6ce66831e..e0afdbc816f5c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -32,16 +32,17 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -55,7 +56,7 @@ import org.opensearch.search.suggest.phrase.StupidBackoff; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -74,6 +75,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.opensearch.search.suggest.SuggestBuilders.termSuggestion; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -93,7 +95,18 @@ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that * request, modify again, request again, etc. This makes it very obvious what changes between requests. */ -public class SuggestSearchIT extends OpenSearchIntegTestCase { +public class SuggestSearchIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { @@ -268,6 +281,7 @@ public void testSizeOneShard() throws Exception { index("test", "type1", Integer.toString(i), "text", "abc" + i); } refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); @@ -350,6 +364,7 @@ public void testSimple() throws Exception { index("test", "type1", "3", "text", "abbd"); index("test", "type1", "4", "text", "abcc"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get(); assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue()); @@ -1305,14 +1320,13 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE assertSuggestionSize(searchSuggest, 0, 10, "title"); // suggest with collate - String filterString = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("match_phrase") - .field("{{field}}", "{{suggestion}}") - .endObject() - .endObject() - ); + String filterString = XContentFactory.jsonBuilder() + .startObject() + .startObject("match_phrase") + .field("{{field}}", "{{suggestion}}") + .endObject() + .endObject() + .toString(); PhraseSuggestionBuilder filteredQuerySuggest = suggest.collateQuery(filterString); filteredQuerySuggest.collateParams(Collections.singletonMap("field", "title")); searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", "title", filteredQuerySuggest); @@ -1325,9 +1339,13 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE NumShards numShards = getNumShards("test"); // collate suggest with bad query - String incorrectFilterString = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("test").field("title", "{{suggestion}}").endObject().endObject() - ); + String incorrectFilterString = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .field("title", "{{suggestion}}") + .endObject() + .endObject() + .toString(); PhraseSuggestionBuilder incorrectFilteredSuggest = suggest.collateQuery(incorrectFilterString); Map<String, SuggestionBuilder<?>> namedSuggestion = new HashMap<>(); namedSuggestion.put("my_title_suggestion", incorrectFilteredSuggest); @@ -1339,9 +1357,13 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE } // suggest with collation - String filterStringAsFilter = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("match_phrase").field("title", "{{suggestion}}").endObject().endObject() - ); + String filterStringAsFilter = XContentFactory.jsonBuilder() + .startObject() + .startObject("match_phrase") + .field("title", "{{suggestion}}") + .endObject() + .endObject() + .toString(); PhraseSuggestionBuilder filteredFilterSuggest = suggest.collateQuery(filterStringAsFilter); searchSuggest = searchSuggest( @@ -1352,9 +1374,13 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE assertSuggestionSize(searchSuggest, 0, 2, "title"); // collate suggest with bad query - String filterStr = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("pprefix").field("title", "{{suggestion}}").endObject().endObject() - ); + String filterStr = XContentFactory.jsonBuilder() + .startObject() + .startObject("pprefix") + .field("title", "{{suggestion}}") + .endObject() + .endObject() + .toString(); suggest.collateQuery(filterStr); try { @@ -1365,14 +1391,13 @@ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionE } // collate script failure due to no additional params - String collateWithParams = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("{{query_type}}") - .field("{{query_field}}", "{{suggestion}}") - .endObject() - .endObject() - ); + String collateWithParams = XContentFactory.jsonBuilder() + .startObject() + .startObject("{{query_type}}") + .field("{{query_field}}", "{{suggestion}}") + .endObject() + .endObject() + .toString(); try { searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, namedSuggestion); diff --git a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java index 929aac388b678..b89541c647580 100644 --- a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java @@ -32,17 +32,35 @@ package org.opensearch.similarity; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimilarityIT extends OpenSearchIntegTestCase { +public class SimilarityIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SimilarityIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + public void testCustomBM25Similarity() throws Exception { try { client().admin().indices().prepareDelete("test").execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/AbortedRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/AbortedRestoreIT.java index 97faacb38bc50..318c7d82380ca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/AbortedRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/AbortedRestoreIT.java @@ -32,17 +32,16 @@ package org.opensearch.snapshots; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPoolStats; - import org.hamcrest.Matcher; import java.util.List; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 3de982f89ac80..83f93ab9ff6b5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -31,46 +31,42 @@ package org.opensearch.snapshots; -import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; import org.opensearch.index.snapshots.blobstore.IndexShardSnapshot; +import org.opensearch.index.snapshots.blobstore.SnapshotFiles; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.RepositoryShardId; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; - -import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.common.UUIDs; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; -import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; -import org.opensearch.index.snapshots.blobstore.SnapshotFiles; -import org.opensearch.repositories.IndexId; -import org.opensearch.repositories.RepositoryShardId; -import org.opensearch.repositories.blobstore.BlobStoreRepository; - -import java.nio.file.Path; -import java.util.concurrent.ExecutionException; - import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -161,9 +157,10 @@ public void testCloneSnapshotIndex() throws Exception { public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); @@ -173,15 +170,11 @@ public void testCloneShallowSnapshotIndex() throws Exception { final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -208,23 +201,25 @@ public void testCloneShallowSnapshotIndex() throws Exception { public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); - internalCluster().startDataOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStorePath = randomRepoPath().toAbsolutePath(); + internalCluster().startClusterManagerOnlyNode( + Settings.builder() + .put(LARGE_SNAPSHOT_POOL_SETTINGS) + .put(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)) + .build() + ); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)); final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -243,23 +238,20 @@ public void testShallowCloneNameAvailability() throws Exception { public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -280,23 +272,20 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index e2c2c1dfe62d8..15e92f6f7204b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -32,8 +32,6 @@ package org.opensearch.snapshots; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -46,9 +44,11 @@ import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.discovery.AbstractDisruptionTestCase; import org.opensearch.plugins.Plugin; @@ -56,8 +56,8 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 483d698f3c9a4..e685aaa52df00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -41,8 +41,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 85c1a7251d143..7a52c8aa5018e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -33,7 +33,6 @@ package org.opensearch.snapshots; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -54,24 +53,26 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.Strings; -import org.opensearch.core.ParseField; import org.opensearch.common.Priority; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; import org.opensearch.index.seqno.RetentionLeaseActions; import org.opensearch.index.seqno.RetentionLeases; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.node.Node; @@ -81,13 +82,12 @@ import org.opensearch.rest.AbstractRestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.admin.cluster.RestClusterStateAction; import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; -import org.opensearch.test.InternalTestCluster; import org.opensearch.test.TestCustomMetadata; import org.opensearch.test.disruption.BusyClusterManagerServiceDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; @@ -1457,6 +1457,13 @@ public void testIndexDeletionDuringSnapshotCreationInQueue() throws Exception { clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").get(); ensureGreen("test-idx"); + + // Wait for snapshot process to complete to prevent conflict with repository clean up + assertBusy(() -> { + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2"); + assertTrue(snapshotInfo.state().completed()); + assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); + }, 1, TimeUnit.MINUTES); } private long calculateTotalFilesSize(List<Path> files) { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 2688449294f3d..73feeb84308ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -8,53 +8,56 @@ package org.opensearch.snapshots; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import static org.hamcrest.Matchers.is; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { + private static final String REMOTE_REPO_NAME = "remote-store-repo-name"; + public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); final String snapshot = "snapshot"; createFullSnapshot(snapshotRepoName, snapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 1); assertAcked(startDeleteSnapshot(snapshotRepoName, snapshot).get()); @@ -63,42 +66,38 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); final String shallowSnapshot = "shallow-snapshot"; createFullSnapshot(snapshotRepoName, shallowSnapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 1); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 1); assertAcked(startDeleteSnapshot(snapshotRepoName, shallowSnapshot).get()); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 0); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); } // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9208") public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); @@ -108,12 +107,8 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -122,7 +117,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { List<String> shallowCopySnapshots = createNSnapshots(snapshotRepoName, totalShallowCopySnapshotsCount); List<String> snapshotsToBeDeleted = shallowCopySnapshots.subList(0, randomIntBetween(2, totalShallowCopySnapshotsCount)); int tobeDeletedSnapshotsCount = snapshotsToBeDeleted.size(); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalShallowCopySnapshotsCount); // Deleting subset of shallow copy snapshots assertAcked( @@ -132,7 +127,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { .get() ); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalShallowCopySnapshotsCount - tobeDeletedSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount - tobeDeletedSnapshotsCount); } @@ -142,10 +137,9 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(); - final String dataNode = internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + final String dataNode = internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); ensureStableCluster(2); final String clusterManagerNode = internalCluster().getClusterManagerName(); @@ -155,12 +149,8 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -201,7 +191,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { int totalSnapshotsCount = totalFullCopySnapshotsCount + totalShallowCopySnapshotsCount; - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); // Deleting subset of shallow copy snapshots assertAcked( @@ -213,7 +203,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { totalSnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; totalShallowCopySnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); // Deleting subset of full copy snapshots assertAcked( @@ -224,32 +214,28 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { ); totalSnapshotsCount -= tobeDeletedFullCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); } // Deleting subset of shallow and full copy snapshots as part of single delete call and then deleting all snapshots in the repo. @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -268,7 +254,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { int totalSnapshotsCount = totalFullCopySnapshotsCount + totalShallowCopySnapshotsCount; - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); // Deleting subset of shallow copy snapshots and full copy snapshots assertAcked( @@ -283,12 +269,73 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { totalSnapshotsCount -= (tobeDeletedShallowCopySnapshotsCount + tobeDeletedFullCopySnapshotsCount); totalShallowCopySnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); // Deleting all the remaining snapshots assertAcked(clusterManagerClient.admin().cluster().prepareDeleteSnapshot(snapshotRepoName, "*").get()); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 0); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); + } + + public void testRemoteStoreCleanupForDeletedIndex() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); + ensureStableCluster(2); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreEnabledIndexName) + .get() + .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); + + logger.info("--> create two remote index shallow snapshots"); + List<String> shallowCopySnapshots = createNSnapshots(snapshotRepoName, 2); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + + // delete remote store index + assertAcked(client().admin().indices().prepareDelete(remoteStoreEnabledIndexName)); + + logger.info("--> delete snapshot 1"); + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(0)) + .get(); + assertAcked(deleteSnapshotResponse); + + lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); + + logger.info("--> delete snapshot 2"); + deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(1)) + .get(); + assertAcked(deleteSnapshotResponse); + + Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); + // Delete is async. Give time for it + assertBusy(() -> { + try { + assertThat(RemoteStoreBaseIntegTestCase.getFileCount(indexPath), comparesEqualTo(0)); + } catch (Exception e) {} + }, 30, TimeUnit.SECONDS); } private List<String> createNSnapshots(String repoName, int count) { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index dd6d9834f63da..b19ba9f5862a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; @@ -49,7 +50,6 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.mockstore.MockRepository; import java.io.IOException; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java index 4aabe96bcbe60..1c46e37dea93a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java @@ -37,11 +37,11 @@ import org.opensearch.env.Environment; import org.opensearch.repositories.RepositoryException; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.NodeConfigurationSource; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.nio.MockNioTransportPlugin; import org.junit.After; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java new file mode 100644 index 0000000000000..8e2580aba1745 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -0,0 +1,206 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.snapshots; + +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.opensearch.cluster.SnapshotsInProgress; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteIndexSnapshotStatusApiIT extends AbstractSnapshotIntegTestCase { + + protected Path absolutePath; + final String remoteStoreRepoName = "remote-store-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order + .put(remoteStoreClusterSettings(remoteStoreRepoName, absolutePath)) + .build(); + } + + public void testStatusAPICallForShallowCopySnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + final String snapshot = "snapshot"; + createFullSnapshot(snapshotRepoName, snapshot); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + + final SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, snapshot); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + + // Validating that the incremental file count and incremental file size is zero for shallow copy + final SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); + assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); + assertThat(shallowSnapshotShardState.getStats().getTotalSize(), greaterThan(0L)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), is(0)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), is(0L)); + } + + public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + createFullSnapshot(snapshotRepoName, "test-snap-1"); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + + SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-1"); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + + SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); + final int totalFileCount = shallowSnapshotShardState.getStats().getTotalFileCount(); + final long totalSize = shallowSnapshotShardState.getStats().getTotalSize(); + final int incrementalFileCount = shallowSnapshotShardState.getStats().getIncrementalFileCount(); + final long incrementalSize = shallowSnapshotShardState.getStats().getIncrementalSize(); + + createFullSnapshot(snapshotRepoName, "test-snap-2"); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); + + snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-2"); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), equalTo(totalFileCount)); + assertThat(shallowSnapshotShardState.getStats().getTotalSize(), equalTo(totalSize)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), equalTo(incrementalFileCount)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), equalTo(incrementalSize)); + } + + public void testStatusAPICallInProgressShallowSnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + logger.info("--> snapshot"); + ActionFuture<CreateSnapshotResponse> createSnapshotResponseActionFuture = startFullSnapshot(snapshotRepoName, "test-snap"); + + logger.info("--> wait for data nodes to get blocked"); + awaitNumberOfSnapshotsInProgress(1); + assertEquals( + SnapshotsInProgress.State.STARTED, + client().admin() + .cluster() + .prepareSnapshotStatus(snapshotRepoName) + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .getState() + ); + + logger.info("--> unblock all data nodes"); + unblockAllDataNodes(snapshotRepoName); + + logger.info("--> wait for snapshot to finish"); + createSnapshotResponseActionFuture.actionGet(); + } + + private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) { + return snapshotStatus.getIndices().get(indexName).getShards().get(0); + } + + private static SnapshotStatus getSnapshotStatus(String repoName, String snapshotName) { + try { + return client().admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get().getSnapshots().get(0); + } catch (SnapshotMissingException e) { + throw new AssertionError(e); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index f6df7cccf96f7..dd40c77ba918d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -42,9 +42,9 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.RepositoryVerificationException; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index cae0f59dab36e..0eb37703eb0f1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -32,13 +32,12 @@ package org.opensearch.snapshots; import org.apache.lucene.index.IndexCommit; - import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.index.mapper.MapperService; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 30a836b41e29e..7117818451e14 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -32,62 +32,36 @@ package org.opensearch.snapshots; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.DocWriteResponse; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; -import org.opensearch.client.Requests; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.io.PathUtils; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.IndexSettings; import org.opensearch.indices.InvalidIndexNameException; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; -import org.opensearch.test.InternalTestCluster; - -import java.io.IOException; import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Arrays; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchQuery; @@ -97,13 +71,16 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateMissing; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testParallelRestoreOperations() { String indexName1 = "testindex1"; String indexName2 = "testindex2"; @@ -174,494 +151,6 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } - public void testRestoreRemoteStoreIndicesWithRemoteTranslog() throws IOException, ExecutionException, InterruptedException { - testRestoreOperationsShallowCopyEnabled(); - } - - public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - String snapshotName2 = "test-restore-snapshot2"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - logger.info("Snapshot Path [{}]", absolutePath1); - logger.info("Remote Store Repo Path [{}]", absolutePath2); - String restoredIndexName1 = indexName1 + "-restored"; - String restoredIndexName1Seg = indexName1 + "-restored-seg"; - String restoredIndexName1Doc = indexName1 + "-restored-doc"; - String restoredIndexName2 = indexName2 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); - assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); - ensureGreen(indexName1); - - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(false) - .setIndices(indexName2) - .setRenamePattern(indexName2) - .setRenameReplacement(restoredIndexName2) - .get(); - assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); - assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1, restoredIndexName2); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - ensureRed(restoredIndexName1); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - - // restore index as seg rep enabled with remote store and remote translog disabled - RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Seg) - .get(); - assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Seg); - - GetIndexResponse getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Seg); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); - - // restore index as doc rep based from shallow copy snapshot - RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Doc) - .get(); - assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Doc); - - getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Doc); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); - } - - public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - String snapshotName2 = "test-restore-snapshot2"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - logger.info("Snapshot Path [{}]", absolutePath1); - logger.info("Remote Store Repo Path [{}]", absolutePath2); - String restoredIndexName2 = indexName2 + "-restored"; - - boolean enableShallowCopy = randomBoolean(); - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); - assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); - ensureGreen(indexName1); - - assertAcked(client().admin().indices().prepareClose(indexName1)); - - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndices(indexName1) - .get(); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(false) - .setIndices(indexName2) - .setRenamePattern(indexName2) - .setRenameReplacement(restoredIndexName2) - .get(); - assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); - assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); - ensureGreen(indexName1, restoredIndexName2); - assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); - assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - ensureRed(indexName1); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(indexName1)); - client.admin() - .cluster() - .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(true), PlainActionFuture.newFuture()); - ensureYellowAndNoInitializingShards(indexName1); - ensureGreen(indexName1); - assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(indexName1); - assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); - } - - public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - createRepository(remoteStoreRepo2Name, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) - .build(); - // restore index as a remote store index with different remote store repo - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - // indexing some new docs and validating - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - - private Settings.Builder getIndexSettings(boolean enableRemoteStore, String remoteStoreRepo, int numOfShards, int numOfReplicas) { - Settings.Builder settingsBuilder = Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas); - if (enableRemoteStore) { - settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); - } - return settingsBuilder; - } - - public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { - String indexName1 = "testindex1"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String[] pathTokens = absolutePath1.toString().split("/"); - String basePath = pathTokens[pathTokens.length - 1]; - Arrays.copyOf(pathTokens, pathTokens.length - 1); - Path location = PathUtils.get(String.join("/", pathTokens)); - pathTokens = absolutePath2.toString().split("/"); - String basePath2 = pathTokens[pathTokens.length - 1]; - Arrays.copyOf(pathTokens, pathTokens.length - 1); - Path location2 = PathUtils.get(String.join("/", pathTokens)); - logger.info("Path 1 [{}]", absolutePath1); - logger.info("Path 2 [{}]", absolutePath2); - logger.info("Path 3 [{}]", absolutePath3); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); - createRepository(remoteStoreRepoName, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepoName) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepoName) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - createIndex(indexName1, indexSettings); - - int numDocsInIndex1 = randomIntBetween(2, 5); - indexDocuments(client, indexName1, numDocsInIndex1); - - ensureGreen(indexName1); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - - assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); - - ensureRed(restoredIndexName1); - - client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); - createRepository(remoteStoreRepoNameUpdated, "fs", absolutePath3); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) - .get(); - - assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - - private void indexDocuments(Client client, String indexName, int numOfDocs) { - indexDocuments(client, indexName, 0, numOfDocs); - } - - private void indexDocuments(Client client, String indexName, int fromId, int toId) { - for (int i = fromId; i < toId; i++) { - String id = Integer.toString(i); - client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); - } - client.admin().indices().prepareFlush(indexName).get(); - } - - private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { - for (int i = 0; i < numOfDocs; i++) { - String id = Integer.toString(i); - logger.info("checking for index " + indexName + " with docId" + id); - assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); - } - } - public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index 300e1db09b4c5..90bb2b501764e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -6,7 +6,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.hamcrest.MatcherAssert; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -25,20 +26,27 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.node.Node; import org.opensearch.repositories.fs.FsRepository; +import org.hamcrest.MatcherAssert; import java.io.IOException; import java.nio.file.Files; @@ -46,17 +54,20 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; +import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; -import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { @@ -74,10 +85,10 @@ protected Settings.Builder randomRepositorySettings() { return settings; } - private Settings.Builder chunkedRepositorySettings() { + private Settings.Builder chunkedRepositorySettings(long chunkSize) { final Settings.Builder settings = Settings.builder(); settings.put("location", randomRepoPath()).put("compress", randomBoolean()); - settings.put("chunk_size", 2 << 23, ByteSizeUnit.BYTES); + settings.put("chunk_size", chunkSize, ByteSizeUnit.BYTES); return settings; } @@ -183,10 +194,10 @@ public void testSnapshottingSearchableSnapshots() throws Exception { } /** - * Tests a chunked repository scenario for searchable snapshots by creating an index, + * Tests a default 8mib chunked repository scenario for searchable snapshots by creating an index, * taking a snapshot, restoring it as a searchable snapshot index. */ - public void testCreateSearchableSnapshotWithChunks() throws Exception { + public void testCreateSearchableSnapshotWithDefaultChunks() throws Exception { final int numReplicasIndex = randomIntBetween(1, 4); final String indexName = "test-idx"; final String restoredIndexName = indexName + "-copy"; @@ -194,7 +205,33 @@ public void testCreateSearchableSnapshotWithChunks() throws Exception { final String snapshotName = "test-snap"; final Client client = client(); - Settings.Builder repositorySettings = chunkedRepositorySettings(); + Settings.Builder repositorySettings = chunkedRepositorySettings(2 << 23); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); + createRepositoryWithSettings(repositorySettings, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + deleteIndicesAndEnsureGreen(client, indexName); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + assertDocCount(restoredIndexName, 1000L); + } + + /** + * Tests a small 1000 bytes chunked repository scenario for searchable snapshots by creating an index, + * taking a snapshot, restoring it as a searchable snapshot index. + */ + public void testCreateSearchableSnapshotWithSmallChunks() throws Exception { + final int numReplicasIndex = randomIntBetween(1, 4); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + Settings.Builder repositorySettings = chunkedRepositorySettings(1000); internalCluster().ensureAtLeastNumSearchAndDataNodes(numReplicasIndex + 1); createIndexWithDocsAndEnsureGreen(numReplicasIndex, 1000, indexName); @@ -233,6 +270,62 @@ public void testSearchableSnapshotAllocationForLocalAndRemoteShardsOnSameNode() assertDocCount(indexName, 100L); } + public void testSearchableSnapshotAllocationFilterSettings() throws Exception { + final int numShardsIndex = randomIntBetween(3, 6); + final String indexName = "test-idx"; + final String restoredIndexName = indexName + "-copy"; + final String repoName = "test-repo"; + final String snapshotName = "test-snap"; + final Client client = client(); + + internalCluster().ensureAtLeastNumSearchAndDataNodes(numShardsIndex); + createIndexWithDocsAndEnsureGreen(numShardsIndex, 1, 100, indexName); + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + final Set<String> searchNodes = StreamSupport.stream(clusterService().state().getNodes().spliterator(), false) + .filter(DiscoveryNode::isSearchNode) + .map(DiscoveryNode::getId) + .collect(Collectors.toSet()); + + for (int i = searchNodes.size(); i > 2; --i) { + String pickedNode = randomFrom(searchNodes); + searchNodes.remove(pickedNode); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, true); + assertTrue( + client.admin() + .indices() + .prepareUpdateSettings(restoredIndexName) + .setSettings(Settings.builder().put("index.routing.allocation.exclude._id", pickedNode)) + .execute() + .actionGet() + .isAcknowledged() + ); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertIndexAssignedToNodeOrNot(restoredIndexName, pickedNode, false); + assertIndexAssignedToNodeOrNot(indexName, pickedNode, true); + } + } + + private void assertIndexAssignedToNodeOrNot(String index, String node, boolean assigned) { + final ClusterState state = clusterService().state(); + if (assigned) { + assertTrue(state.getRoutingTable().allShards(index).stream().anyMatch(shard -> shard.currentNodeId().equals(node))); + } else { + assertTrue(state.getRoutingTable().allShards(index).stream().noneMatch(shard -> shard.currentNodeId().equals(node))); + } + } + /** * Tests the functionality of remote shard allocation to * ensure it can handle node drops for failover scenarios and the cluster gets back to a healthy state when @@ -340,11 +433,16 @@ public void testDeleteSearchableSnapshotBackingIndex() throws Exception { } private void createIndexWithDocsAndEnsureGreen(int numReplicasIndex, int numOfDocs, String indexName) throws InterruptedException { + createIndexWithDocsAndEnsureGreen(1, numReplicasIndex, numOfDocs, indexName); + } + + private void createIndexWithDocsAndEnsureGreen(int numShardsIndex, int numReplicasIndex, int numOfDocs, String indexName) + throws InterruptedException { createIndex( indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, Integer.toString(numReplicasIndex)) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicasIndex) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsIndex) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) .build() ); @@ -457,7 +555,9 @@ private void testUpdateIndexSettingsOnlyNotAllowedSettings(String index) { private void testUpdateIndexSettingsOnlyAllowedSettings(String index) { final UpdateSettingsRequestBuilder builder = client().admin().indices().prepareUpdateSettings(index); - builder.setSettings(Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s")); + builder.setSettings( + Map.of("index.max_result_window", 1000, "index.search.slowlog.threshold.query.warn", "10s", "index.number_of_replicas", 0) + ); AcknowledgedResponse settingsResponse = builder.execute().actionGet(); assertThat(settingsResponse, notNullValue()); } @@ -719,6 +819,47 @@ public void testDefaultShardPreference() throws Exception { } } + public void testRestoreSearchableSnapshotWithIndexStoreTypeThrowsException() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName1 = "test-idx-1"; + final int numReplicasIndex1 = randomIntBetween(1, 4); + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(numReplicasIndex1 + 1); + createIndexWithDocsAndEnsureGreen(numReplicasIndex1, 100, indexName1); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName1); + deleteIndicesAndEnsureGreen(client, indexName1); + + internalCluster().ensureAtLeastNumSearchNodes(numReplicasIndex1 + 1); + + // set "index.store.type" to "remote_snapshot" in index settings of restore API and assert appropriate exception with error message + // is thrown. + final SnapshotRestoreException error = expectThrows( + SnapshotRestoreException.class, + () -> client.admin() + .cluster() + .prepareRestoreSnapshot(repoName, snapshotName) + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setIndexSettings( + Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + ) + .setWaitForCompletion(true) + .execute() + .actionGet() + ); + assertThat( + error.getMessage(), + containsString( + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ) + ); + } + /** * Asserts the cache folder count to match the number of shards and the number of indices within the cache folder * as provided. @@ -747,4 +888,75 @@ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, i // Verifies if all the shards (primary and replica) have been deleted assertEquals(numCacheFolderCount, searchNodeFileCachePaths.size()); } + + public void testRelocateSearchableSnapshotIndex() throws Exception { + final String snapshotName = "test-snap"; + final String repoName = "test-repo"; + final String indexName = "test-idx-1"; + final String restoredIndexName = indexName + "-copy"; + final Client client = client(); + + internalCluster().ensureAtLeastNumDataNodes(1); + createIndexWithDocsAndEnsureGreen(0, 100, indexName); + + createRepositoryWithSettings(null, repoName); + takeSnapshot(client, snapshotName, repoName, indexName); + deleteIndicesAndEnsureGreen(client, indexName); + + String searchNode1 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + restoreSnapshotAndEnsureGreen(client, snapshotName, repoName); + assertRemoteSnapshotIndexSettings(client, restoredIndexName); + + String searchNode2 = internalCluster().startSearchOnlyNodes(1).get(0); + internalCluster().validateClusterFormed(); + + final Index index = resolveIndex(restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, true); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + + // relocate the shard from node1 to node2 + client.admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(restoredIndexName, 0, searchNode1, searchNode2)) + .execute() + .actionGet(); + ClusterHealthResponse clusterHealthResponse = client.admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(new TimeValue(5, TimeUnit.MINUTES)) + .execute() + .actionGet(); + assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + assertDocCount(restoredIndexName, 100L); + + assertSearchableSnapshotIndexDirectoryExistence(searchNode1, index, false); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, true); + deleteIndicesAndEnsureGreen(client, restoredIndexName); + assertSearchableSnapshotIndexDirectoryExistence(searchNode2, index, false); + } + + private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception { + final Node node = internalCluster().getInstance(Node.class, nodeName); + final ShardId shardId = new ShardId(index, 0); + final ShardPath shardPath = ShardPath.loadFileCachePath(node.getNodeEnvironment(), shardId); + + assertBusy(() -> { + assertTrue( + "shard state path should " + (exists ? "exist" : "not exist"), + Files.exists(shardPath.getShardStatePath()) == exists + ); + assertTrue("shard cache path should " + (exists ? "exist" : "not exist"), Files.exists(shardPath.getDataPath()) == exists); + }, 30, TimeUnit.SECONDS); + + final Path indexDataPath = node.getNodeEnvironment().fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + final Path indexPath = node.getNodeEnvironment().fileCacheNodePath().indicesPath.resolve(index.getUUID()); + assertBusy(() -> { + assertTrue("index path should " + (exists ? "exist" : "not exist"), Files.exists(indexDataPath) == exists); + assertTrue("index cache path should " + (exists ? "exist" : "not exist"), Files.exists(indexPath) == exists); + }, 30, TimeUnit.SECONDS); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index 0a47b916a3d60..c649c4ab13e7e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -18,10 +18,10 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; import static org.opensearch.indices.replication.SegmentReplicationBaseIT.waitForSearchableDocs; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -47,6 +48,9 @@ public class SegmentReplicationSnapshotIT extends AbstractSnapshotIntegTestCase private static final String REPOSITORY_NAME = "test-segrep-repo"; private static final String SNAPSHOT_NAME = "test-segrep-snapshot"; + protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + public Settings segRepEnableIndexSettings() { return getShardSettings().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); } @@ -70,11 +74,6 @@ public Settings restoreIndexDocRepSettings() { return Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - public void ingestData(int docCount, String indexName) throws Exception { for (int i = 0; i < docCount; i++) { client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); @@ -306,4 +305,63 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio IndicesService indicesService = internalCluster().getInstance(IndicesService.class); assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); } + + /** + * 1. Create index in DOCUMENT replication type + * 2. Snapshot index + * 3. Add new set of nodes with `cluster.indices.replication.strategy` set to SEGMENT and `cluster.index.restrict.replication.type` + * set to true. + * 4. Perform restore on new set of nodes to validate restored index has `DOCUMENT` replication. + */ + public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { + final int documentCount = scaledRandomIntBetween(1, 10); + String originalClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + + // Starting two nodes with primary and replica shards respectively. + final String primaryNode = internalCluster().startDataOnlyNode(); + prepareCreate( + INDEX_NAME, + Settings.builder() + // we want to override cluster replication setting by passing a index replication setting + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) + ).get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < documentCount; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + + createSnapshot(); + + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + + // Start new set of nodes with cluster level replication type setting and restrict replication type setting. + Settings settings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + + // Start new cluster manager node + String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(settings); + + // Remove older nodes + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(originalClusterManagerNode)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + + String newPrimaryNode = internalCluster().startDataOnlyNode(settings); + String newReplicaNode = internalCluster().startDataOnlyNode(settings); + + // Perform snapshot restore + logger.info("--> Performing snapshot restore to target index"); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> restoreSnapshotWithSettings(null)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 4bba25039d376..91b0aa6438753 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -33,11 +33,9 @@ package org.opensearch.snapshots; import org.apache.lucene.util.BytesRef; - import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -65,17 +63,18 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.util.BytesRefUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.BytesRefUtils; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -1885,7 +1884,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { * This test ensures that when a shard is removed from a node (perhaps due to the node * leaving the cluster, then returning), all snapshotting of that shard is aborted, so * all Store references held onto by the snapshot are released. - * + * <p> * See https://github.com/elastic/elasticsearch/issues/20876 */ public void testSnapshotCanceledOnRemovedShard() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotCustomPluginStateIT.java index 85fedead80a85..b2dcab61c05cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotCustomPluginStateIT.java @@ -40,10 +40,10 @@ import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineResponse; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.ingest.IngestTestPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptEngine; @@ -115,7 +115,7 @@ public void testIncludeGlobalState() throws Exception { .endArray() .endObject() ); - assertAcked(clusterAdmin().preparePutPipeline("barbaz", pipelineSource, XContentType.JSON).get()); + assertAcked(clusterAdmin().preparePutPipeline("barbaz", pipelineSource, MediaTypeRegistry.JSON).get()); } if (testScript) { @@ -125,7 +125,7 @@ public void testIncludeGlobalState() throws Exception { .setId("foobar") .setContent( new BytesArray("{\"script\": { \"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"1\"} }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index c22dd90cc930b..c574233d25051 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -33,7 +33,6 @@ package org.opensearch.snapshots; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -45,12 +44,12 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; @@ -77,7 +76,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") .build(); } @@ -112,7 +110,7 @@ public void testStatusApiConsistency() { assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } - public void testStatusAPICallForShallowCopySnapshot() throws Exception { + public void testStatusAPICallForShallowCopySnapshot() { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); @@ -120,10 +118,6 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndex(indexName); ensureGreen(); @@ -133,20 +127,8 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { } refresh(); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - final String snapshot = "snapshot"; createFullSnapshot(snapshotRepoName, snapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); final SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, snapshot); assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); @@ -157,14 +139,6 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { assertThat(snapshotShardState.getStats().getTotalSize(), greaterThan(0L)); assertThat(snapshotShardState.getStats().getIncrementalFileCount(), greaterThan(0)); assertThat(snapshotShardState.getStats().getIncrementalSize(), greaterThan(0L)); - - // Validating that the incremental file count and incremental file size is zero for shallow copy - final SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); - assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); - assertThat(shallowSnapshotShardState.getStats().getTotalSize(), greaterThan(0L)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), is(0)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), is(0L)); } public void testStatusAPICallInProgressSnapshot() throws Exception { @@ -245,63 +219,6 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { ); } - public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { - disableRepoConsistencyCheck("Remote store repository is being used for the test"); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - - final String snapshotRepoName = "snapshot-repo-name"; - createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - - final String indexName = "index-1"; - createIndex(indexName); - ensureGreen(); - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - - createFullSnapshot(snapshotRepoName, "test-snap-1"); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); - - SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-1"); - assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); - - SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); - final int totalFileCount = shallowSnapshotShardState.getStats().getTotalFileCount(); - final long totalSize = shallowSnapshotShardState.getStats().getTotalSize(); - final int incrementalFileCount = shallowSnapshotShardState.getStats().getIncrementalFileCount(); - final long incrementalSize = shallowSnapshotShardState.getStats().getIncrementalSize(); - - createFullSnapshot(snapshotRepoName, "test-snap-2"); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); - - snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-2"); - assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); - shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), equalTo(totalFileCount)); - assertThat(shallowSnapshotShardState.getStats().getTotalSize(), equalTo(totalSize)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), equalTo(incrementalFileCount)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), equalTo(incrementalSize)); - } - public void testGetSnapshotsWithoutIndices() throws Exception { createRepository("test-repo", "fs"); @@ -441,17 +358,12 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { } public void testStatusAPICallInProgressShallowSnapshot() throws Exception { - disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "mock", remoteStoreRepoPath); - final String indexName = "index-1"; createIndex(indexName); ensureGreen(); @@ -461,17 +373,6 @@ public void testStatusAPICallInProgressShallowSnapshot() throws Exception { } refresh(); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - logger.info("--> snapshot"); ActionFuture<CreateSnapshotResponse> createSnapshotResponseActionFuture = startFullSnapshot(snapshotRepoName, "test-snap"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java new file mode 100644 index 0000000000000..28b84655a2cc7 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String systemRepoName = "system-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(systemRepoName, absolutePath)) + .build(); + } + + public void testRestrictedSettingsCantBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + RepositoryException e = expectThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() + ); + assertEquals( + e.getMessage(), + "[system-repo-name] trying to modify an unmodifiable attribute type of system " + + "repository from current value [reloadable-fs] to new value [mock]" + ); + } + + public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); + + assertAcked( + client.admin() + .cluster() + .preparePutRepository(systemRepoName) + .setType(ReloadableFsRepository.TYPE) + .setSettings(repoSettings) + .get() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 437b6c25ef4b6..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -33,7 +33,6 @@ package org.opensearch.update; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.alias.Alias; @@ -49,16 +48,17 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateNoopIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateNoopIT.java index 292cd7f97ca58..8d66cf998cd7f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateNoopIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateNoopIT.java @@ -34,9 +34,9 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.action.update.UpdateResponse; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; diff --git a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java index 30701e3bae7b7..4ac2e1669ca67 100644 --- a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java @@ -34,10 +34,10 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.opensearch.client.Client; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.MoreLikeThisQueryBuilder.Item; @@ -48,7 +48,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; - import org.hamcrest.Matcher; import java.io.IOException; @@ -271,7 +270,10 @@ public void testExplainDateRangeInQueryString() { long twoMonthsAgo = now.minus(2, ChronoUnit.MONTHS).truncatedTo(ChronoUnit.DAYS).toEpochSecond() * 1000; long rangeEnd = (now.plus(1, ChronoUnit.DAYS).truncatedTo(ChronoUnit.DAYS).toEpochSecond() * 1000) - 1; - assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("past:[" + twoMonthsAgo + " TO " + rangeEnd + "]")); + assertThat( + response.getQueryExplanation().get(0).getExplanation(), + containsString("past:[" + twoMonthsAgo + " TO " + rangeEnd + "]") + ); assertThat(response.isValid(), equalTo(true)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java index 661efbaf9cd01..9c0a1fa26098c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java @@ -32,9 +32,9 @@ package org.opensearch.versioning; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Map; diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java index ca41a7ddcd76e..7f016caf22149 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -38,15 +38,15 @@ import org.opensearch.cluster.coordination.LinearizabilityChecker; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.discovery.AbstractDisruptionTestCase; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.test.OpenSearchIntegTestCase; @@ -93,7 +93,7 @@ * provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure * is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a * dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed. - * + * <p> * Such writes are not necessarily fully replicated and can be lost. There is no * guarantee that the previous value did not have the specified primaryTerm and seqNo</li> * <li>CAS writes with other exceptions might or might not have taken place. If they have taken place, then after invocation but not diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java index 5898bba9762ad..8cd7b419f7989 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/SimpleVersioningIT.java @@ -32,7 +32,6 @@ package org.opensearch.versioning; import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.action.ActionResponse; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkResponse; @@ -43,6 +42,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; import org.opensearch.index.VersionType; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.test.OpenSearchIntegTestCase; diff --git a/server/src/main/java/org/apache/lucene/analysis/miscellaneous/DisableGraphAttribute.java b/server/src/main/java/org/apache/lucene/analysis/miscellaneous/DisableGraphAttribute.java index 01058afa6db96..089d4c1dcfac2 100644 --- a/server/src/main/java/org/apache/lucene/analysis/miscellaneous/DisableGraphAttribute.java +++ b/server/src/main/java/org/apache/lucene/analysis/miscellaneous/DisableGraphAttribute.java @@ -33,8 +33,8 @@ package org.apache.lucene.analysis.miscellaneous; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.util.Attribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; +import org.apache.lucene.util.Attribute; /** * This attribute can be used to indicate that the {@link PositionLengthAttribute} diff --git a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java index 8f67bb87b5c42..0ada02a09d157 100644 --- a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java +++ b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java @@ -59,8 +59,8 @@ public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) { * Returns total bytes written by this merge. **/ public static long getTotalBytesWritten(Thread thread, MergePolicy.OneMerge merge) throws IOException { - /** - * TODO: The number of bytes written during the merge should be accessible in OneMerge. + /* + TODO: The number of bytes written during the merge should be accessible in OneMerge. */ if (thread instanceof ConcurrentMergeScheduler.MergeThread) { return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getTotalBytesWritten(); diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 3a36a6ff103e0..b47b974b96fed 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -35,13 +35,14 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermStates; import org.apache.lucene.index.TermState; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.TermQuery; @@ -93,16 +94,16 @@ public BlendedTermQuery(Term[] terms, float[] boosts) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } - IndexReaderContext context = reader.getContext(); + IndexReader reader = searcher.getIndexReader(); TermStates[] ctx = new TermStates[terms.length]; int[] docFreqs = new int[ctx.length]; for (int i = 0; i < terms.length; i++) { - ctx[i] = TermStates.build(context, terms[i], true); + ctx[i] = TermStates.build(searcher, terms[i], true); docFreqs[i] = ctx[i].docFreq(); } diff --git a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java index 9a8c295d60ec7..2f00ea69207a7 100644 --- a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java +++ b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java @@ -38,7 +38,7 @@ /** * This class is just a workaround to make {@link QueryParser#handleBareFuzzy(String, Token, String)} accessible by sub-classes. * It is needed for {@link QueryParser}s that need to override the parsing of the slop in a fuzzy query (e.g. word<b>~2</b>, word<b>~</b>). - * + * <p> * TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom. */ public class XQueryParser extends QueryParser { diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index fe26c313d72b2..961587113173d 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -33,13 +33,14 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.PriorityQueue; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.util.ArrayList; import java.util.HashSet; @@ -144,7 +145,7 @@ private static class MergeSortQueue extends PriorityQueue<ShardRef> { reverseMul = new int[sortFields.length]; for (int compIDX = 0; compIDX < sortFields.length; compIDX++) { final SortField sortField = sortFields[compIDX]; - comparators[compIDX] = sortField.getComparator(1, false); + comparators[compIDX] = sortField.getComparator(1, Pruning.NONE); reverseMul[compIDX] = sortField.getReverse() ? -1 : 1; } } diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index e4c299ba572b1..9ca0491bc29f5 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -50,7 +50,7 @@ * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. - * + * <p> * TODO: If the sort is based on score we should propagate the mininum competitive score when <code>orderedGroups</code> is full. * This is safe for collapsing since the group <code>sort</code> is the same as the query sort. */ diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java index 6fde39b16a59a..4edcdea42b53b 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java @@ -43,13 +43,13 @@ * If the {@link BreakIterator} cannot find a passage smaller than the maximum length, * a secondary break iterator is used to re-split the passage at the first boundary after * maximum length. - * + * <p> * This is useful to split passages created by {@link BreakIterator}s like `sentence` that * can create big outliers on semi-structured text. - * + * <p> * * WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}. - * + * <p> * TODO: We should be able to create passages incrementally, starting from the offset of the first match and expanding or not * depending on the offsets of subsequent matches. This is currently impossible because {@link FieldHighlighter} uses * only the first matching offset to derive the start and end of each passage. diff --git a/server/src/main/java/org/apache/lucene/util/LongHashSet.java b/server/src/main/java/org/apache/lucene/util/LongHashSet.java new file mode 100644 index 0000000000000..a463e8a189585 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/util/LongHashSet.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.util; + +import org.apache.lucene.util.packed.PackedInts; + +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** Set of longs, optimized for docvalues usage */ +public final class LongHashSet implements Accountable { + private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(LongHashSet.class); + + private static final long MISSING = Long.MIN_VALUE; + + final long[] table; + final int mask; + final boolean hasMissingValue; + final int size; + /** minimum value in the set, or Long.MAX_VALUE for an empty set */ + public final long minValue; + /** maximum value in the set, or Long.MIN_VALUE for an empty set */ + public final long maxValue; + + /** Construct a set. Values must be in sorted order. */ + public LongHashSet(long[] values) { + int tableSize = Math.toIntExact(values.length * 3L / 2); + tableSize = 1 << PackedInts.bitsRequired(tableSize); // make it a power of 2 + assert tableSize >= values.length * 3L / 2; + table = new long[tableSize]; + Arrays.fill(table, MISSING); + mask = tableSize - 1; + boolean hasMissingValue = false; + int size = 0; + long previousValue = Long.MIN_VALUE; // for assert + for (long value : values) { + if (value == MISSING) { + size += hasMissingValue ? 0 : 1; + hasMissingValue = true; + } else if (add(value)) { + ++size; + } + assert value >= previousValue : "values must be provided in sorted order"; + previousValue = value; + } + this.hasMissingValue = hasMissingValue; + this.size = size; + this.minValue = values.length == 0 ? Long.MAX_VALUE : values[0]; + this.maxValue = values.length == 0 ? Long.MIN_VALUE : values[values.length - 1]; + } + + private boolean add(long l) { + assert l != MISSING; + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + table[i] = l; + return true; + } else if (table[i] == l) { + // already added + return false; + } + } + } + + /** + * check for membership in the set. + * + * <p>You should use {@link #minValue} and {@link #maxValue} to guide/terminate iteration before + * calling this. + */ + public boolean contains(long l) { + if (l == MISSING) { + return hasMissingValue; + } + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + return false; + } else if (table[i] == l) { + return true; + } + } + } + + /** returns a stream of all values contained in this set */ + LongStream stream() { + LongStream stream = Arrays.stream(table).filter(v -> v != MISSING); + if (hasMissingValue) { + stream = LongStream.concat(LongStream.of(MISSING), stream); + } + return stream; + } + + @Override + public int hashCode() { + return Objects.hash(size, minValue, maxValue, mask, hasMissingValue, Arrays.hashCode(table)); + } + + @Override + public boolean equals(Object obj) { + if (obj != null && obj instanceof LongHashSet) { + LongHashSet that = (LongHashSet) obj; + return size == that.size + && minValue == that.minValue + && maxValue == that.maxValue + && mask == that.mask + && hasMissingValue == that.hasMissingValue + && Arrays.equals(table, that.table); + } + return false; + } + + @Override + public String toString() { + return stream().mapToObj(String::valueOf).collect(Collectors.joining(", ", "[", "]")); + } + + /** number of elements in the set */ + public int size() { + return size; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES + RamUsageEstimator.sizeOfObject(table); + } +} diff --git a/server/src/main/java/org/apache/lucene/util/SPIClassIterator.java b/server/src/main/java/org/apache/lucene/util/SPIClassIterator.java index 1480c9aeeb2d8..1dea54bcf0176 100644 --- a/server/src/main/java/org/apache/lucene/util/SPIClassIterator.java +++ b/server/src/main/java/org/apache/lucene/util/SPIClassIterator.java @@ -30,9 +30,9 @@ package org.apache.lucene.util; +import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; -import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.URL; import java.nio.charset.StandardCharsets; diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java index d811b245606ba..9e9f6d1fd817d 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java @@ -29,16 +29,17 @@ */ package org.apache.lucene.util.packed; -import java.io.IOException; -import java.util.Arrays; import org.apache.lucene.store.DataInput; import org.apache.lucene.util.RamUsageEstimator; +import java.io.IOException; +import java.util.Arrays; + /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + * <p> * Space optimized random access capable array of values with a fixed number of bits/value. Values * are packed contiguously. * diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java index ef7644c32a843..53cf4ed8e2273 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java @@ -16,16 +16,17 @@ */ package org.apache.lucene.util.packed; -import java.io.IOException; -import java.util.Arrays; import org.apache.lucene.store.DataInput; import org.apache.lucene.util.RamUsageEstimator; +import java.io.IOException; +import java.util.Arrays; + /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + * <p> * This class is similar to {@link Packed64} except that it trades space for speed by ensuring that * a single block needs to be read/written in order to read/write a value. */ diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java index 9a277a7b5f2f4..4260d34ead7c9 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java @@ -16,9 +16,6 @@ */ package org.apache.lucene.util.packed; -import java.io.EOFException; -import java.io.IOException; -import java.util.Arrays; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; @@ -32,11 +29,15 @@ import org.apache.lucene.util.packed.PackedInts.ReaderIterator; import org.apache.lucene.util.packed.PackedInts.Writer; +import java.io.EOFException; +import java.io.IOException; +import java.util.Arrays; + /** * Forked from Lucene 8.x; removed in Lucene 8.9 - * + * <p> * Todo: further investigate a better alternative - * + * <p> * Simplistic compression for array of unsigned long values. Each value is {@code >= 0} and {@code * <=} a specified maximum value. The values are stored as packed ints, with each value consuming a * fixed number of bits. diff --git a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java index 50b021d41123f..51093390d57e0 100644 --- a/server/src/main/java/org/opensearch/OpenSearchGenerationException.java +++ b/server/src/main/java/org/opensearch/OpenSearchGenerationException.java @@ -32,6 +32,7 @@ package org.opensearch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -39,8 +40,9 @@ /** * A generic exception indicating failure to generate. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenSearchGenerationException extends OpenSearchException { public OpenSearchGenerationException(String msg) { diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index d53164427debf..c5a5ce12b238c 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -8,11 +8,11 @@ package org.opensearch; -import org.opensearch.core.index.snapshots.IndexShardSnapshotException; - import static org.opensearch.OpenSearchException.OpenSearchExceptionHandle; import static org.opensearch.OpenSearchException.OpenSearchExceptionHandleRegistry.registerExceptionHandle; import static org.opensearch.OpenSearchException.UNKNOWN_VERSION_ADDED; +import static org.opensearch.Version.V_2_10_0; +import static org.opensearch.Version.V_2_13_0; import static org.opensearch.Version.V_2_1_0; import static org.opensearch.Version.V_2_4_0; import static org.opensearch.Version.V_2_5_0; @@ -288,14 +288,6 @@ public static void registerExceptions() { UNKNOWN_VERSION_ADDED ) ); - registerExceptionHandle( - new OpenSearchExceptionHandle( - org.opensearch.OpenSearchParseException.class, - org.opensearch.OpenSearchParseException::new, - 35, - UNKNOWN_VERSION_ADDED - ) - ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.search.SearchException.class, @@ -684,7 +676,12 @@ public static void registerExceptions() { ) ); registerExceptionHandle( - new OpenSearchExceptionHandle(IndexShardSnapshotException.class, IndexShardSnapshotException::new, 98, UNKNOWN_VERSION_ADDED) + new OpenSearchExceptionHandle( + org.opensearch.core.index.snapshots.IndexShardSnapshotException.class, + org.opensearch.core.index.snapshots.IndexShardSnapshotException::new, + 98, + UNKNOWN_VERSION_ADDED + ) ); registerExceptionHandle( new OpenSearchExceptionHandle( @@ -915,14 +912,6 @@ public static void registerExceptions() { UNKNOWN_VERSION_ADDED ) ); - registerExceptionHandle( - new OpenSearchExceptionHandle( - org.opensearch.common.breaker.CircuitBreakingException.class, - org.opensearch.common.breaker.CircuitBreakingException::new, - 133, - UNKNOWN_VERSION_ADDED - ) - ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.transport.NodeNotConnectedException.class, @@ -1003,14 +992,6 @@ public static void registerExceptions() { UNKNOWN_VERSION_ADDED ) ); - registerExceptionHandle( - new OpenSearchExceptionHandle( - org.opensearch.tasks.TaskCancelledException.class, - org.opensearch.tasks.TaskCancelledException::new, - 146, - UNKNOWN_VERSION_ADDED - ) - ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.env.ShardLockObtainFailedException.class, @@ -1196,6 +1177,30 @@ public static void registerExceptions() { V_2_7_0 ) ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.crypto.CryptoRegistryException.class, + org.opensearch.crypto.CryptoRegistryException::new, + 171, + V_2_10_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewNotFoundException.class, + org.opensearch.action.admin.indices.view.ViewNotFoundException::new, + 172, + V_2_13_0 + ) + ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException.class, + org.opensearch.action.admin.indices.view.ViewAlreadyExistsException::new, + 173, + V_2_13_0 + ) + ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.cluster.block.IndexCreateBlockException.class, diff --git a/server/src/main/java/org/opensearch/SpecialPermission.java b/server/src/main/java/org/opensearch/SpecialPermission.java index 8a694d4543f32..8348f0844acc6 100644 --- a/server/src/main/java/org/opensearch/SpecialPermission.java +++ b/server/src/main/java/org/opensearch/SpecialPermission.java @@ -98,6 +98,7 @@ public SpecialPermission(String name, String actions) { /** * Check that the current stack has {@link SpecialPermission} access according to the {@link SecurityManager}. */ + @SuppressWarnings("removal") public static void check() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 7899324a3301e..86cbb8a6307be 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -32,12 +32,13 @@ package org.opensearch.action; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportResponseHandler; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 2ce1d4bcd4b02..b19bf9590f43b 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -40,10 +40,10 @@ import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; -import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; -import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; @@ -56,9 +56,7 @@ import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.opensearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsAction; import org.opensearch.action.admin.cluster.node.stats.TransportNodesStatsAction; -import org.opensearch.action.admin.cluster.remotestore.stats.TransportRemoteStoreStatsAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskAction; @@ -71,6 +69,8 @@ import org.opensearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; import org.opensearch.action.admin.cluster.remotestore.restore.TransportRestoreRemoteStoreAction; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsAction; +import org.opensearch.action.admin.cluster.remotestore.stats.TransportRemoteStoreStatsAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -175,11 +175,11 @@ import org.opensearch.action.admin.indices.recovery.TransportRecoveryAction; import org.opensearch.action.admin.indices.refresh.RefreshAction; import org.opensearch.action.admin.indices.refresh.TransportRefreshAction; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsAction; +import org.opensearch.action.admin.indices.replication.TransportSegmentReplicationStatsAction; import org.opensearch.action.admin.indices.resolve.ResolveIndexAction; import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.TransportRolloverAction; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsAction; -import org.opensearch.action.admin.indices.replication.TransportSegmentReplicationStatsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.PitSegmentsAction; import org.opensearch.action.admin.indices.segments.TransportIndicesSegmentsAction; @@ -224,6 +224,12 @@ import org.opensearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.opensearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.TransportBulkAction; import org.opensearch.action.bulk.TransportShardBulkAction; @@ -256,10 +262,10 @@ import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.DeleteSearchPipelineAction; import org.opensearch.action.search.DeleteSearchPipelineTransportAction; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.GetSearchPipelineAction; import org.opensearch.action.search.GetSearchPipelineTransportAction; import org.opensearch.action.search.MultiSearchAction; -import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.PutSearchPipelineAction; import org.opensearch.action.search.PutSearchPipelineTransportAction; import org.opensearch.action.search.SearchAction; @@ -294,14 +300,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.action.ExtensionProxyAction; import org.opensearch.extensions.action.ExtensionProxyTransportAction; import org.opensearch.extensions.rest.RestInitializeExtensionAction; -import org.opensearch.index.seqno.RetentionLeaseActions; +import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.identity.IdentityService; +import org.opensearch.index.seqno.RetentionLeaseActions; import org.opensearch.indices.SystemIndices; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.persistent.CompletionPersistentTaskAction; import org.opensearch.persistent.RemovePersistentTaskAction; import org.opensearch.persistent.StartPersistentTaskAction; @@ -331,6 +339,7 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDecommissionAction; import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; @@ -348,7 +357,6 @@ import org.opensearch.rest.action.admin.cluster.RestNodesStatsAction; import org.opensearch.rest.action.admin.cluster.RestNodesUsageAction; import org.opensearch.rest.action.admin.cluster.RestPendingClusterTasksAction; -import org.opensearch.rest.action.admin.cluster.RestDecommissionAction; import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction; @@ -407,16 +415,17 @@ import org.opensearch.rest.action.admin.indices.RestUpgradeAction; import org.opensearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.opensearch.rest.action.admin.indices.RestValidateQueryAction; +import org.opensearch.rest.action.admin.indices.RestViewAction; import org.opensearch.rest.action.cat.AbstractCatAction; import org.opensearch.rest.action.cat.RestAliasAction; import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestCatAction; import org.opensearch.rest.action.cat.RestCatRecoveryAction; import org.opensearch.rest.action.cat.RestCatSegmentReplicationAction; +import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestFielddataAction; import org.opensearch.rest.action.cat.RestHealthAction; import org.opensearch.rest.action.cat.RestIndicesAction; -import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; import org.opensearch.rest.action.cat.RestNodesAction; import org.opensearch.rest.action.cat.RestPitSegmentsAction; @@ -455,7 +464,6 @@ import org.opensearch.rest.action.search.RestPutSearchPipelineAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.rest.action.search.RestSearchScrollAction; -import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.usage.UsageService; @@ -720,6 +728,14 @@ public <Request extends ActionRequest, Response extends ActionResponse> void reg actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); actions.register(DataStreamsStatsAction.INSTANCE, DataStreamsStatsAction.TransportAction.class); + // Views: + actions.register(CreateViewAction.INSTANCE, CreateViewAction.TransportAction.class); + actions.register(DeleteViewAction.INSTANCE, DeleteViewAction.TransportAction.class); + actions.register(GetViewAction.INSTANCE, GetViewAction.TransportAction.class); + actions.register(UpdateViewAction.INSTANCE, UpdateViewAction.TransportAction.class); + actions.register(ListViewNamesAction.INSTANCE, ListViewNamesAction.TransportAction.class); + actions.register(SearchViewAction.INSTANCE, SearchViewAction.TransportAction.class); + // Persistent tasks: actions.register(StartPersistentTaskAction.INSTANCE, StartPersistentTaskAction.TransportAction.class); actions.register(UpdatePersistentTaskStatusAction.INSTANCE, UpdatePersistentTaskStatusAction.TransportAction.class); @@ -914,6 +930,14 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) { registerHandler.accept(new RestResolveIndexAction()); registerHandler.accept(new RestDataStreamsStatsAction()); + // View API + registerHandler.accept(new RestViewAction.CreateViewHandler()); + registerHandler.accept(new RestViewAction.DeleteViewHandler()); + registerHandler.accept(new RestViewAction.GetViewHandler()); + registerHandler.accept(new RestViewAction.UpdateViewHandler()); + registerHandler.accept(new RestViewAction.SearchViewHandler()); + registerHandler.accept(new RestViewAction.ListViewNamesHandler()); + // CAT API registerHandler.accept(new RestAllocationAction()); registerHandler.accept(new RestCatSegmentReplicationAction()); @@ -971,12 +995,8 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) { registerHandler.accept(new RestCatAction(catActions)); registerHandler.accept(new RestDecommissionAction()); registerHandler.accept(new RestGetDecommissionStateAction()); - - // Remote Store APIs - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - registerHandler.accept(new RestRemoteStoreStatsAction()); - registerHandler.accept(new RestRestoreRemoteStoreAction()); - } + registerHandler.accept(new RestRemoteStoreStatsAction()); + registerHandler.accept(new RestRestoreRemoteStoreAction()); } @Override diff --git a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java index 27358a0412468..1e0af8c9f9a73 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java @@ -33,7 +33,10 @@ package org.opensearch.action; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index ffba4d2eb50c0..c30da41066dbd 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -33,10 +33,12 @@ package org.opensearch.action; import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.PublicApi; /** * Base exception for an action request validation extendable by plugins * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/ActionRunnable.java b/server/src/main/java/org/opensearch/action/ActionRunnable.java index 2c3f70afda75d..073c2e784a404 100644 --- a/server/src/main/java/org/opensearch/action/ActionRunnable.java +++ b/server/src/main/java/org/opensearch/action/ActionRunnable.java @@ -36,6 +36,7 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; /** * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index 3fad07ca6fce1..dae931bdd1891 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -32,9 +32,10 @@ package org.opensearch.action; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.transport.TransportRequestOptions; /** diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 4c5d5628b1aac..8fe6f49c08af0 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -32,14 +32,17 @@ package org.opensearch.action; +import org.opensearch.common.annotation.PublicApi; + /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to * one or more indices and one or more aliases. Meant to be used for aliases management requests (e.g. add/remove alias, * get aliases) that hold aliases and indices in separate fields. * Allows to retrieve which indices and aliases the action relates to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AliasesRequest extends IndicesRequest.Replaceable { /** @@ -54,7 +57,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable { /** * Replaces current aliases with the provided aliases. - * + * <p> * Sometimes aliases expressions need to be resolved to concrete aliases prior to executing the transport action. */ void replaceAliases(String... aliases); diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index 65242f4925669..e09c76430defc 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -38,11 +38,12 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.index.VersionType; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.VersionType; import java.io.IOException; import java.util.Locale; @@ -55,8 +56,9 @@ * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.opensearch.action.bulk.BulkRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DocWriteRequest<T> extends IndicesRequest, Accountable { // Flag set for disallowing index auto creation for an individual write request. @@ -149,7 +151,7 @@ public interface DocWriteRequest<T> extends IndicesRequest, Accountable { /** * If set, only perform this request if the document was last modification was assigned this primary term. - * + * <p> * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ @@ -169,7 +171,10 @@ public interface DocWriteRequest<T> extends IndicesRequest, Accountable { /** * Requested operation type to perform on the document + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum OpType { /** * Index the source. If there an existing document with the id, it will diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index d1e3647bc3496..aada56ed93fd3 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -38,18 +38,19 @@ import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.io.UnsupportedEncodingException; @@ -64,8 +65,9 @@ /** * A base class for the response of a write operation that involves a single doc * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject { private static final String _SHARDS = "_shards"; @@ -80,7 +82,10 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr /** * An enum that represents the results of CRUD operations, primarily used to communicate the type of * operation that occurred. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Result implements Writeable { CREATED(0), UPDATED(1), @@ -341,7 +346,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t /** * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * + * <p> * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. diff --git a/server/src/main/java/org/opensearch/action/IndicesRequest.java b/server/src/main/java/org/opensearch/action/IndicesRequest.java index 7e4c2f5076cda..f42818595d3cc 100644 --- a/server/src/main/java/org/opensearch/action/IndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/IndicesRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; /** * Needs to be implemented by all {@link org.opensearch.action.ActionRequest} subclasses that relate to @@ -40,8 +41,9 @@ * In case of internal requests originated during the distributed execution of an external request, * they will still return the indices that the original request related to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndicesRequest { /** diff --git a/server/src/main/java/org/opensearch/action/LatchedActionListener.java b/server/src/main/java/org/opensearch/action/LatchedActionListener.java index 57d5c9282465f..c2400d3b59850 100644 --- a/server/src/main/java/org/opensearch/action/LatchedActionListener.java +++ b/server/src/main/java/org/opensearch/action/LatchedActionListener.java @@ -32,6 +32,8 @@ package org.opensearch.action; +import org.opensearch.core.action.ActionListener; + import java.util.concurrent.CountDownLatch; /** diff --git a/server/src/main/java/org/opensearch/action/ListenableActionFuture.java b/server/src/main/java/org/opensearch/action/ListenableActionFuture.java index 95bf3a2ae1384..1679ec804643e 100644 --- a/server/src/main/java/org/opensearch/action/ListenableActionFuture.java +++ b/server/src/main/java/org/opensearch/action/ListenableActionFuture.java @@ -32,6 +32,9 @@ package org.opensearch.action; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.core.action.ActionListener; + /** * An {@link ActionFuture} that listeners can be added to. * diff --git a/server/src/main/java/org/opensearch/action/OriginalIndices.java b/server/src/main/java/org/opensearch/action/OriginalIndices.java index 1e24c64bc60fc..1c26bf9d416f5 100644 --- a/server/src/main/java/org/opensearch/action/OriginalIndices.java +++ b/server/src/main/java/org/opensearch/action/OriginalIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Used to keep track of original indices within internal (e.g. shard level) requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class OriginalIndices implements IndicesRequest { // constant to use when original indices are not applicable and will not be serialized across the wire diff --git a/server/src/main/java/org/opensearch/action/RoutingMissingException.java b/server/src/main/java/org/opensearch/action/RoutingMissingException.java index 2395bfb21bf4b..f99e86820bbb2 100644 --- a/server/src/main/java/org/opensearch/action/RoutingMissingException.java +++ b/server/src/main/java/org/opensearch/action/RoutingMissingException.java @@ -35,8 +35,8 @@ import org.opensearch.OpenSearchException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.mapper.MapperService; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.mapper.MapperService; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/action/StepListener.java b/server/src/main/java/org/opensearch/action/StepListener.java index b99fe20a46ed8..5701d20db90b8 100644 --- a/server/src/main/java/org/opensearch/action/StepListener.java +++ b/server/src/main/java/org/opensearch/action/StepListener.java @@ -33,9 +33,11 @@ package org.opensearch.action; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.FutureUtils; import org.opensearch.common.util.concurrent.ListenableFuture; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.NotifyOnceListener; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; diff --git a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java index 46ad8dc796d09..5948dd3e2b7cb 100644 --- a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java @@ -38,11 +38,11 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; @@ -50,7 +50,7 @@ /** * Information about task operation failures - * + * <p> * The class is final due to serialization limitations * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/opensearch/action/TransportActionNodeProxy.java index a04e89d9a1f8f..1088a3a4f8679 100644 --- a/server/src/main/java/org/opensearch/action/TransportActionNodeProxy.java +++ b/server/src/main/java/org/opensearch/action/TransportActionNodeProxy.java @@ -34,6 +34,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 625aa91e6ea7f..4a5c3b076faba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -35,6 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,8 +49,9 @@ /** * A request to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequest extends ClusterManagerNodeRequest<ClusterAllocationExplainRequest> { private static final ObjectParser<ClusterAllocationExplainRequest, Void> PARSER = new ObjectParser<>("cluster/allocation/explain"); @@ -95,7 +97,7 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica * will be picked for explanation. If no replicas are unassigned, the first assigned replica will * be explained. - * + * <p> * Package private for testing. */ ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index d85cb3929873d..31781dda04957 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for requests to explain the allocation of a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterAllocationExplainRequest, ClusterAllocationExplainResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 5987d718a20ba..17afdd862cf66 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -32,7 +32,8 @@ package org.opensearch.action.admin.cluster.allocation; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Explanation response for a shard in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAllocationExplainResponse extends ActionResponse { private ClusterAllocationExplanation cae; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index a80e5ff0ea7ab..3c8f07613561d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -40,12 +40,13 @@ import org.opensearch.cluster.routing.allocation.AllocationDecision; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.time.Instant; @@ -58,8 +59,9 @@ * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 30d1e617258c5..d59c7e50c55cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterInfo; @@ -53,6 +52,7 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 5c48f8adf8240..9fcb82b5c4a21 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -37,11 +37,11 @@ import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java index 3c511afe1cbff..7855f9643c4ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsResponse.java @@ -31,7 +31,7 @@ package org.opensearch.action.admin.cluster.configuration; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java index 95c6d2f4afd47..eda0175c90728 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java @@ -33,9 +33,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java index afbf7af81363f..eb2ccaa7f5390 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsResponse.java @@ -31,7 +31,7 @@ package org.opensearch.action.admin.cluster.configuration; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index 727458e8c0971..f578925e54ce7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -49,12 +48,13 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -64,8 +64,8 @@ import java.util.function.Predicate; import java.util.stream.Collectors; -import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.addExclusionAndGetState; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; /** * Transport endpoint action for adding exclusions to voting config diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 83b08626a2fcd..c3c08b9636518 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -49,8 +48,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java new file mode 100644 index 0000000000000..6e4d7b84f204e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java @@ -0,0 +1,183 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.crypto; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; +import static org.opensearch.common.settings.Settings.readSettingsFromStream; +import static org.opensearch.common.settings.Settings.writeSettingsToStream; + +/** + * Crypto settings supplied during a put repository request + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") +public class CryptoSettings implements Writeable, ToXContentObject { + private String keyProviderName; + private String keyProviderType; + private Settings settings = EMPTY_SETTINGS; + + public CryptoSettings(StreamInput in) throws IOException { + keyProviderName = in.readString(); + keyProviderType = in.readString(); + settings = readSettingsFromStream(in); + } + + public CryptoSettings(String keyProviderName) { + this.keyProviderName = keyProviderName; + } + + /** + * Validate settings supplied in put repository request. + * @return Exception in case validation fails. + */ + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (keyProviderName == null) { + validationException = addValidationError("key_provider_name is missing", validationException); + } + if (keyProviderType == null) { + validationException = addValidationError("key_provider_type is missing", validationException); + } + return validationException; + } + + /** + * Returns key provider name + * @return keyProviderName + */ + public String getKeyProviderName() { + return keyProviderName; + } + + /** + * Returns key provider type + * @return keyProviderType + */ + public String getKeyProviderType() { + return keyProviderType; + } + + /** + * Returns crypto settings + * @return settings + */ + public Settings getSettings() { + return settings; + } + + /** + * Constructs a new crypto settings with provided key provider name. + * @param keyProviderName Name of the key provider + */ + public CryptoSettings keyProviderName(String keyProviderName) { + this.keyProviderName = keyProviderName; + return this; + } + + /** + * Constructs a new crypto settings with provided key provider type. + * @param keyProviderType Type of key provider to be used in encryption. + */ + public CryptoSettings keyProviderType(String keyProviderType) { + this.keyProviderType = keyProviderType; + return this; + } + + /** + * Sets the encryption settings + * + * @param settings for encryption + * @return this request + */ + public CryptoSettings settings(Settings.Builder settings) { + this.settings = settings.build(); + return this; + } + + /** + * Sets the encryption settings. + * + * @param source encryption settings in json or yaml format + * @param xContentType the content type of the source + * @return this request + */ + public CryptoSettings settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + + /** + * Sets the encryption settings. + * + * @param source encryption settings + * @return this request + */ + public CryptoSettings settings(Map<String, Object> source) { + this.settings = Settings.builder().loadFromMap(source).build(); + return this; + } + + /** + * Parses crypto settings definition. + * + * @param cryptoDefinition crypto settings definition + */ + public CryptoSettings(Map<String, Object> cryptoDefinition) { + for (Map.Entry<String, Object> entry : cryptoDefinition.entrySet()) { + if (entry.getKey().equals("key_provider_name")) { + keyProviderName(entry.getValue().toString()); + } else if (entry.getKey().equals("key_provider_type")) { + keyProviderType(entry.getValue().toString()); + } else if (entry.getKey().equals("settings")) { + if (!(entry.getValue() instanceof Map)) { + throw new IllegalArgumentException("Malformed settings section in crypto settings, should include an inner object"); + } + @SuppressWarnings("unchecked") + Map<String, Object> sub = (Map<String, Object>) entry.getValue(); + settings(sub); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(keyProviderName); + out.writeString(keyProviderType); + writeSettingsToStream(settings, out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("key_provider_name", keyProviderName); + builder.field("key_provider_type", keyProviderType); + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java new file mode 100644 index 0000000000000..bb9375c20c87e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Crypto client request and settings handlers. + */ +package org.opensearch.action.admin.cluster.crypto; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java index 79b7381801da6..8243be21ba487 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,8 +19,9 @@ /** * Request for deleting decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest<DeleteDecommissionStateRequest> { public DeleteDecommissionStateRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java index 08f194c53f18e..94075d6ec860d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for Delete decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteDecommissionStateRequest, DeleteDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java index 3d0404c25373b..13b056a019200 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response returned after deletion of decommission request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DeleteDecommissionStateResponse extends AcknowledgedResponse { public DeleteDecommissionStateResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java index 6d52934a8ba3f..8901375a4095a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -20,6 +19,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java index 4fe26df19a289..3ecbbfbbc7285 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequest.java @@ -10,9 +10,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -21,8 +22,9 @@ /** * Get Decommissioned attribute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequest extends ClusterManagerNodeReadRequest<GetDecommissionStateRequest> { private String attributeName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java index e766e9c674ff7..13eb375f0d00e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Get decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetDecommissionStateRequest, GetDecommissionStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java index 41344abf7b153..9010c0e7d9388 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/GetDecommissionStateResponse.java @@ -9,8 +9,9 @@ package org.opensearch.action.admin.cluster.decommission.awareness.get; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -26,8 +27,9 @@ /** * Response for decommission status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class GetDecommissionStateResponse extends ActionResponse implements ToXContentObject { private String attributeValue; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java index 64744a24b4afd..22feb4d99297a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/get/TransportGetDecommissionStateAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.decommission.awareness.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -18,6 +17,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java index cbf201d4ffeec..9070aa5a0dc55 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequest.java @@ -11,10 +11,11 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.decommission.DecommissionAttribute; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; @@ -23,8 +24,9 @@ /** * Registers a decommission request with decommission attribute and timeout * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequest extends ClusterManagerNodeRequest<DecommissionRequest> { public static final TimeValue DEFAULT_NODE_DRAINING_TIMEOUT = TimeValue.timeValueSeconds(120); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java index c3591fff54885..e965110cdb9df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionRequestBuilder.java @@ -12,13 +12,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Register decommission request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DecommissionRequest, DecommissionResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java index 13c1775b005b3..a2401cdf91b07 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -18,8 +19,9 @@ /** * Response for decommission request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionResponse extends AcknowledgedResponse implements ToXContentObject { public DecommissionResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java index f8a145c9063b9..0b1fd380ffdda 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/TransportDecommissionAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -20,6 +19,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index e595c2560a07b..ec8b01d853da6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -40,9 +40,10 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; import java.util.Objects; @@ -53,8 +54,9 @@ /** * Transport request for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequest extends ClusterManagerNodeReadRequest<ClusterHealthRequest> implements IndicesRequest.Replaceable { private String[] indices; @@ -351,8 +353,9 @@ public ActionRequestValidationException validate() { /** * The level of the health request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Level { CLUSTER, INDICES, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index cca9d35d8aa6f..a9a3756755265 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -38,13 +38,15 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for requesting cluster health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterHealthRequest, ClusterHealthResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index 714abe86df71e..1a27f161343e8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -33,25 +33,26 @@ package org.opensearch.action.admin.cluster.health; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.awarenesshealth.ClusterAwarenessHealth; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.health.ClusterIndexHealth; import org.opensearch.cluster.health.ClusterStateHealth; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.HashMap; @@ -67,8 +68,9 @@ /** * Transport response for Cluster Health * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject { private static final String CLUSTER_NAME = "cluster_name"; private static final String STATUS = "status"; @@ -421,7 +423,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index d84179fa6bc0a..1cc357a4c20f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; @@ -58,10 +57,11 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.discovery.Discovery; import org.opensearch.index.IndexNotFoundException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 29a1c3d243c9d..9e52b90f7bd38 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -33,9 +33,10 @@ package org.opensearch.action.admin.cluster.node.hotthreads; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -43,8 +44,9 @@ /** * Transport request for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequest extends BaseNodesRequest<NodesHotThreadsRequest> { int threads = 3; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 3639439dd3fb8..51b455b41115b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder class for requesting OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< NodesHotThreadsRequest, NodesHotThreadsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index 5af9ce50a4bfe..eeddd2deb7ff8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport response for OpenSearch Hot Threads * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesHotThreadsResponse extends BaseNodesResponse<NodeHotThreads> { public NodesHotThreadsResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java index acf40e3a9de3c..544fd1fb6aaf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java @@ -37,16 +37,16 @@ import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.service.ReportingService; import org.opensearch.http.HttpInfo; import org.opensearch.ingest.IngestInfo; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.os.OsInfo; import org.opensearch.monitor.process.ProcessInfo; -import org.opensearch.node.ReportingService; import org.opensearch.search.aggregations.support.AggregationInfo; import org.opensearch.search.pipeline.SearchPipelineInfo; import org.opensearch.threadpool.ThreadPoolInfo; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index 31cacda7c3487..17b633c533218 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.info; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,11 +48,12 @@ /** * A request to get node (cluster) level information. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> { - private Set<String> requestedMetrics = Metric.allMetrics(); + private Set<String> requestedMetrics = Metric.defaultMetrics(); /** * Create a new NodeInfoRequest from a {@link StreamInput} object. @@ -71,7 +73,7 @@ public NodesInfoRequest(StreamInput in) throws IOException { */ public NodesInfoRequest(String... nodesIds) { super(nodesIds); - all(); + defaultMetrics(); } /** @@ -83,13 +85,24 @@ public NodesInfoRequest clear() { } /** - * Sets to return all the data. + * Sets to return data for all the metrics. + * See {@link Metric} */ public NodesInfoRequest all() { requestedMetrics.addAll(Metric.allMetrics()); return this; } + /** + * Sets to return data for default metrics only. + * See {@link Metric} + * See {@link Metric#defaultMetrics()}. + */ + public NodesInfoRequest defaultMetrics() { + requestedMetrics.addAll(Metric.defaultMetrics()); + return this; + } + /** * Get the names of requested metrics */ @@ -154,7 +167,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * An enumeration of the "core" sections of metrics that may be requested - * from the nodes information endpoint. Eventually this list list will be + * from the nodes information endpoint. Eventually this list will be * pluggable. */ public enum Metric { @@ -185,8 +198,25 @@ boolean containedIn(Set<String> metricNames) { return metricNames.contains(this.metricName()); } + /** + * Return all available metrics. + * See {@link Metric} + */ public static Set<String> allMetrics() { return Arrays.stream(values()).map(Metric::metricName).collect(Collectors.toSet()); } + + /** + * Return "the default" set of metrics. + * Similar to {@link #allMetrics()} except {@link Metric#SEARCH_PIPELINES} metric is not included. + * <br> + * The motivation to define the default set of metrics was to keep the default response + * size at bay. Metrics that are NOT included in the default set were typically introduced later + * and are considered to contain specific type of information that is not usually useful unless you + * know that you really need it. + */ + public static Set<String> defaultMetrics() { + return allMetrics().stream().filter(metric -> !(metric.equals(SEARCH_PIPELINES.metricName()))).collect(Collectors.toSet()); + } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index 76ef75b77a1cf..4c3191b017948 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport action for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> { public NodesInfoRequestBuilder(OpenSearchClient client, NodesInfoAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java index 3dcb30a091a94..7ddd70185e8ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -36,13 +36,13 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.http.HttpInfo; import org.opensearch.ingest.IngestInfo; import org.opensearch.monitor.jvm.JvmInfo; @@ -60,8 +60,9 @@ /** * Transport response for OpenSearch Node Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements ToXContentFragment { public NodesInfoResponse(StreamInput in) throws IOException { @@ -165,7 +166,7 @@ public String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java index 961fa6dac810f..13f7211d48e9a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import org.opensearch.plugins.PluginInfo; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java index 95b4d0d918578..c164962cadcdc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/liveness/LivenessResponse.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.cluster.node.liveness; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index dbfe8378ef394..aab7ea54f87c2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -33,17 +33,15 @@ package org.opensearch.action.admin.cluster.node.reload; import org.opensearch.action.support.nodes.BaseNodesRequest; -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - import org.opensearch.common.CharArrays; import org.opensearch.common.Nullable; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.settings.SecureString; +import java.io.IOException; import java.util.Arrays; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java index 2f2162947aeea..d970e4c9e5468 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.settings.SecureString; /** * Builder for the reload secure settings nodes request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< NodesReloadSecureSettingsRequest, NodesReloadSecureSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java index 22044f0c69c48..6c250a8daaf3e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -38,12 +38,12 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; + import java.io.IOException; import java.util.List; @@ -99,7 +99,7 @@ public String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (final IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index f0582daee7261..92b8af0b8aa84 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -34,20 +34,20 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; +import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.KeyStoreWrapper; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.core.common.settings.SecureString; -import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.plugins.PluginsService; import org.opensearch.plugins.ReloadablePlugin; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 3b6c85ee6e091..8562a7eb37709 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -41,21 +41,25 @@ import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.NodeIndicesStats; -import org.opensearch.indices.breaker.AllCircuitBreakerStats; import org.opensearch.ingest.IngestStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.jvm.JvmStats; import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; +import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -127,6 +131,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchBackpressureStats searchBackpressureStats; + @Nullable + private SegmentReplicationRejectionStats segmentReplicationRejectionStats; + @Nullable private ClusterManagerThrottlingStats clusterManagerThrottlingStats; @@ -142,6 +149,15 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + + @Nullable + private RepositoriesStats repositoriesStats; + + @Nullable + private AdmissionControlStats admissionControlStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -198,6 +214,26 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); + } else { + segmentReplicationRejectionStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); + } else { + repositoriesStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + admissionControlStats = in.readOptionalWriteable(AdmissionControlStats::new); + } else { + admissionControlStats = null; + } } public NodeStats( @@ -216,6 +252,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -224,7 +261,10 @@ public NodeStats( @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, - @Nullable SearchPipelineStats searchPipelineStats + @Nullable SearchPipelineStats searchPipelineStats, + @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, + @Nullable RepositoriesStats repositoriesStats, + @Nullable AdmissionControlStats admissionControlStats ) { super(node); this.timestamp = timestamp; @@ -241,6 +281,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -250,6 +291,9 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; + this.repositoriesStats = repositoriesStats; + this.admissionControlStats = admissionControlStats; } public long getTimestamp() { @@ -344,6 +388,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -387,6 +436,21 @@ public SearchPipelineStats getSearchPipelineStats() { return searchPipelineStats; } + @Nullable + public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { + return segmentReplicationRejectionStats; + } + + @Nullable + public RepositoriesStats getRepositoriesStats() { + return repositoriesStats; + } + + @Nullable + public AdmissionControlStats getAdmissionControlStats() { + return admissionControlStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -430,6 +494,18 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(resourceUsageStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(segmentReplicationRejectionStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(repositoriesStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(admissionControlStats); + } } @Override @@ -520,7 +596,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } + if (getSegmentReplicationRejectionStats() != null) { + getSegmentReplicationRejectionStats().toXContent(builder, params); + } + if (getRepositoriesStats() != null) { + getRepositoriesStats().toXContent(builder, params); + } + if (getAdmissionControlStats() != null) { + getAdmissionControlStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 34cf03812d233..1af56f10b95ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -34,8 +34,10 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; + import java.io.IOException; import java.util.Arrays; import java.util.HashSet; @@ -47,8 +49,9 @@ /** * A request to get node (cluster) level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> { private CommonStatsFlags indices = new CommonStatsFlags(); @@ -212,7 +215,11 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"), + SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), + REPOSITORIES("repositories"), + ADMISSION_CONTROL("admission_control"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index e382278f5ddb8..58149e9a34a34 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder< NodesStatsRequest, NodesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 0037d1ea27873..73a938568acc3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -35,12 +35,12 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.List; @@ -48,8 +48,9 @@ /** * Transport response for obtaining OpenSearch Node Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesStatsResponse extends BaseNodesResponse<NodeStats> implements ToXContentFragment { public NodesStatsResponse(StreamInput in) throws IOException { @@ -92,7 +93,7 @@ public String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 615abbaef845d..1df73d3b4394d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,11 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), + NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index 360765e8f4803..183fb2a236148 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.tasks.CancellableTask; @@ -44,8 +45,9 @@ /** * A request to cancel tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> { public static final String DEFAULT_REASON = "by user request"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index ee19e8b104603..ac02dfdf7381f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to cancel tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksRequestBuilder extends TasksRequestBuilder<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> { public CancelTasksRequestBuilder(OpenSearchClient client, CancelTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 4741504ddd035..1f86c7c22c2eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -35,10 +35,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.tasks.TaskInfo; @@ -49,8 +50,9 @@ /** * Returns the list of tasks that were cancelled * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancelTasksResponse extends ListTasksResponse { private static final ConstructingObjectParser<CancelTasksResponse, Void> PARSER = setupParser( @@ -81,6 +83,6 @@ public static CancelTasksResponse fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index 909fb009aa100..634ce1e2d4095 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -33,13 +33,13 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.tasks.TransportTasksAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.TaskInfo; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index b504c7e6a39b0..13c6d645d4c3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -34,10 +34,11 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import java.io.IOException; @@ -46,8 +47,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequest extends ActionRequest { private TaskId taskId = TaskId.EMPTY_TASK_ID; private boolean waitForCompletion = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java index 17b2a21b2863b..ea42e1770e7f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskRequestBuilder extends ActionRequestBuilder<GetTaskRequest, GetTaskResponse> { public GetTaskRequestBuilder(OpenSearchClient client, GetTaskAction action) { super(client, action, new GetTaskRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index b32e59fc77794..80901373e14d5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -32,11 +32,12 @@ package org.opensearch.action.admin.cluster.node.tasks.get; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.TaskResult; @@ -48,8 +49,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetTaskResponse extends ActionResponse implements ToXContentObject { private final TaskResult task; @@ -85,6 +87,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 886c8a6671dd0..ab6451382aa88 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -32,10 +32,9 @@ package org.opensearch.action.admin.cluster.node.tasks.get; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; @@ -48,13 +47,15 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexNotFoundException; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResult; import org.opensearch.tasks.TaskResultsService; import org.opensearch.threadpool.ThreadPool; @@ -67,7 +68,7 @@ /** * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. - * + * <p> * The general flow is: * <ul> * <li>If this isn't being executed on the node to which the requested TaskId belongs then move to that node. @@ -84,6 +85,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques private final Client client; private final NamedXContentRegistry xContentRegistry; + private final TaskResourceTrackingService taskResourceTrackingService; + @Inject public TransportGetTaskAction( ThreadPool threadPool, @@ -91,7 +94,8 @@ public TransportGetTaskAction( ActionFilters actionFilters, ClusterService clusterService, Client client, - NamedXContentRegistry xContentRegistry + NamedXContentRegistry xContentRegistry, + TaskResourceTrackingService taskResourceTrackingService ) { super(GetTaskAction.NAME, transportService, actionFilters, GetTaskRequest::new); this.threadPool = threadPool; @@ -99,6 +103,7 @@ public TransportGetTaskAction( this.transportService = transportService; this.client = new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN); this.xContentRegistry = xContentRegistry; + this.taskResourceTrackingService = taskResourceTrackingService; } @Override @@ -173,6 +178,7 @@ public void onFailure(Exception e) { } }); } else { + taskResourceTrackingService.refreshResourceStats(runningTask); TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); listener.onResponse(new GetTaskResponse(new TaskResult(false, info))); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 070b93c788ef0..6ee56b0da7884 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; import org.opensearch.action.support.tasks.BaseTasksRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get node tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> { private boolean detailed = false; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 45beb0dd899b5..a195b98d06e76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.tasks.TasksRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for the request to retrieve the list of tasks running on the specified nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> { public ListTasksRequestBuilder(OpenSearchClient client, ListTasksAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 85b165df68cfa..337151a2a9268 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -38,17 +38,18 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import java.io.IOException; @@ -64,8 +65,9 @@ /** * Returns the list of tasks currently running on the nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { private static final String TASKS = "tasks"; @@ -262,6 +264,6 @@ public static ListTasksResponse fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java index 0ca114ae0ed5c..fb23a41b8dc19 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.node.tasks.list; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.TaskInfo; @@ -45,8 +46,9 @@ /** * Information about a currently running task and all its subtasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TaskGroup implements ToXContentObject { private final TaskInfo task; @@ -65,8 +67,9 @@ public static Builder builder(TaskInfo taskInfo) { /** * Builder for the Task Group * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private TaskInfo taskInfo; private List<Builder> childTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index aede3fe5b1cc0..1c543e60c46e0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.node.tasks.list; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; @@ -40,6 +39,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskResourceTrackingService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java index e31b88ace953f..385f48d5690c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.node.usage; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport request for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequest extends BaseNodesRequest<NodesUsageRequest> { private boolean restActions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java index 7d1823b59dc04..ec1176ac634fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for collecting OpenSearch telemetry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageRequestBuilder extends NodesOperationRequestBuilder< NodesUsageRequest, NodesUsageResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java index 59b1f99429cfe..7dc8a318b2cf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -35,12 +35,12 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.List; @@ -49,8 +49,9 @@ * The response for the nodes usage api which contains the individual usage * statistics for all nodes queried. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodesUsageResponse extends BaseNodesResponse<NodeUsage> implements ToXContentFragment { public NodesUsageResponse(StreamInput in) throws IOException { @@ -93,7 +94,7 @@ public String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java index 55f75a142a53c..7e4911c10c50e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -44,7 +44,7 @@ public final class RemoteInfoAction extends ActionType<RemoteInfoResponse> { public static final String NAME = "cluster:monitor/remote/info"; public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); - public RemoteInfoAction() { + private RemoteInfoAction() { super(NAME, RemoteInfoResponse::new); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java index 8173da8455434..2f47de4f6a5fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -32,12 +32,12 @@ package org.opensearch.action.admin.cluster.remote; -import org.opensearch.action.ActionResponse; -import org.opensearch.transport.RemoteConnectionInfo; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.transport.RemoteConnectionInfo; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index 794d21f1cbc7c..cb7c965069987 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin.cluster.remote; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.tasks.Task; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java index eb1935158c231..afa2058d1deba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java @@ -11,10 +11,11 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -30,8 +31,9 @@ /** * Restore remote store request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class RestoreRemoteStoreRequest extends ClusterManagerNodeRequest<RestoreRemoteStoreRequest> implements ToXContentObject { private String[] indices = Strings.EMPTY_ARRAY; @@ -205,7 +207,7 @@ public int hashCode() { @Override public String toString() { - return org.opensearch.common.Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java index 4ff03cb880cf3..3de926c256f46 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreResponse.java @@ -8,16 +8,17 @@ package org.opensearch.action.admin.cluster.remotestore.restore; -import org.opensearch.action.ActionResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.RestoreInfo; import java.io.IOException; @@ -28,8 +29,9 @@ /** * Contains information about remote store restores * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public final class RestoreRemoteStoreResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java index 613bf078b2c96..2b0e5c74cad53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/TransportRestoreRemoteStoreAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.remotestore.restore; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; @@ -18,8 +17,9 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.snapshots.RestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -33,14 +33,14 @@ public final class TransportRestoreRemoteStoreAction extends TransportClusterManagerNodeAction< RestoreRemoteStoreRequest, RestoreRemoteStoreResponse> { - private final RestoreService restoreService; + private final RemoteStoreRestoreService restoreService; @Inject public TransportRestoreRemoteStoreAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - RestoreService restoreService, + RemoteStoreRestoreService restoreService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver ) { @@ -84,20 +84,17 @@ protected void clusterManagerOperation( final ClusterState state, final ActionListener<RestoreRemoteStoreResponse> listener ) { - restoreService.restoreFromRemoteStore( - request, - ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { - if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { - RestoreClusterStateListener.createAndRegisterListener( - clusterService, - restoreCompletionResponse, - delegatedListener, - RestoreRemoteStoreResponse::new - ); - } else { - delegatedListener.onResponse(new RestoreRemoteStoreResponse(restoreCompletionResponse.getRestoreInfo())); - } - }) - ); + restoreService.restore(request, ActionListener.delegateFailure(listener, (delegatedListener, restoreCompletionResponse) -> { + if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { + RestoreClusterStateListener.createAndRegisterListener( + clusterService, + restoreCompletionResponse, + delegatedListener, + RestoreRemoteStoreResponse::new + ); + } else { + delegatedListener.onResponse(new RestoreRemoteStoreResponse(restoreCompletionResponse.getRestoreInfo())); + } + })); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index 5ac9c1cf5f74c..7bddd0deb373b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -8,88 +8,241 @@ package org.opensearch.action.admin.cluster.remotestore.stats; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import java.io.IOException; /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStats implements Writeable, ToXContentFragment { + /** + * Stats related to Remote Segment Store operations + */ + private final RemoteSegmentTransferTracker.Stats remoteSegmentShardStats; + + /** + * Stats related to Remote Translog Store operations + */ + private final RemoteTranslogTransferTracker.Stats remoteTranslogShardStats; + private final ShardRouting shardRouting; + + RemoteStoreStats( + RemoteSegmentTransferTracker.Stats remoteSegmentUploadShardStats, + RemoteTranslogTransferTracker.Stats remoteTranslogShardStats, + ShardRouting shardRouting + ) { + this.remoteSegmentShardStats = remoteSegmentUploadShardStats; + this.remoteTranslogShardStats = remoteTranslogShardStats; + this.shardRouting = shardRouting; + } - private final RemoteRefreshSegmentTracker.Stats remoteSegmentUploadShardStats; + RemoteStoreStats(StreamInput in) throws IOException { + remoteSegmentShardStats = in.readOptionalWriteable(RemoteSegmentTransferTracker.Stats::new); + remoteTranslogShardStats = in.readOptionalWriteable(RemoteTranslogTransferTracker.Stats::new); + this.shardRouting = new ShardRouting(in); + } - public RemoteStoreStats(RemoteRefreshSegmentTracker.Stats remoteSegmentUploadShardStats) { - this.remoteSegmentUploadShardStats = remoteSegmentUploadShardStats; + public RemoteSegmentTransferTracker.Stats getSegmentStats() { + return remoteSegmentShardStats; } - public RemoteStoreStats(StreamInput in) throws IOException { - remoteSegmentUploadShardStats = in.readOptionalWriteable(RemoteRefreshSegmentTracker.Stats::new); + public ShardRouting getShardRouting() { + return shardRouting; } - public RemoteRefreshSegmentTracker.Stats getStats() { - return remoteSegmentUploadShardStats; + public RemoteTranslogTransferTracker.Stats getTranslogStats() { + return remoteTranslogShardStats; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject() - .field(Fields.SHARD_ID, remoteSegmentUploadShardStats.shardId) - .field(Fields.LOCAL_REFRESH_TIMESTAMP, remoteSegmentUploadShardStats.localRefreshClockTimeMs) - .field(Fields.REMOTE_REFRESH_TIMESTAMP, remoteSegmentUploadShardStats.remoteRefreshClockTimeMs) - .field(Fields.REFRESH_TIME_LAG_IN_MILLIS, remoteSegmentUploadShardStats.refreshTimeLagMs) - .field(Fields.REFRESH_LAG, remoteSegmentUploadShardStats.localRefreshNumber - remoteSegmentUploadShardStats.remoteRefreshNumber) - .field(Fields.BYTES_LAG, remoteSegmentUploadShardStats.bytesLag) + builder.startObject(); + buildShardRouting(builder); + + builder.startObject(Fields.SEGMENT); + builder.startObject(SubFields.DOWNLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { + buildSegmentDownloadStats(builder); + } + builder.endObject(); // segment.download + builder.startObject(SubFields.UPLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteSegmentShardStats.totalUploadsStarted != 0) { + buildSegmentUploadStats(builder); + } + builder.endObject(); // segment.upload + builder.endObject(); // segment + + builder.startObject(Fields.TRANSLOG); + builder.startObject(SubFields.UPLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteTranslogShardStats.totalUploadsStarted > 0) { + buildTranslogUploadStats(builder); + } + builder.endObject(); // translog.upload + builder.startObject(SubFields.DOWNLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteTranslogShardStats.totalDownloadsSucceeded > 0) { + buildTranslogDownloadStats(builder); + } + builder.endObject(); // translog.download + builder.endObject(); // translog + + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(remoteSegmentShardStats); + out.writeOptionalWriteable(remoteTranslogShardStats); + shardRouting.writeTo(out); + } - .field(Fields.BACKPRESSURE_REJECTION_COUNT, remoteSegmentUploadShardStats.rejectionCount) - .field(Fields.CONSECUTIVE_FAILURE_COUNT, remoteSegmentUploadShardStats.consecutiveFailuresCount); + private void buildTranslogUploadStats(XContentBuilder builder) throws IOException { + builder.field(UploadStatsFields.LAST_SUCCESSFUL_UPLOAD_TIMESTAMP, remoteTranslogShardStats.lastSuccessfulUploadTimestamp); - builder.startObject(Fields.TOTAL_REMOTE_REFRESH); - builder.field(SubFields.STARTED, remoteSegmentUploadShardStats.totalUploadsStarted) - .field(SubFields.SUCCEEDED, remoteSegmentUploadShardStats.totalUploadsSucceeded) - .field(SubFields.FAILED, remoteSegmentUploadShardStats.totalUploadsFailed); + builder.startObject(UploadStatsFields.TOTAL_UPLOADS); + builder.field(SubFields.STARTED, remoteTranslogShardStats.totalUploadsStarted) + .field(SubFields.FAILED, remoteTranslogShardStats.totalUploadsFailed) + .field(SubFields.SUCCEEDED, remoteTranslogShardStats.totalUploadsSucceeded); builder.endObject(); - builder.startObject(Fields.TOTAL_UPLOADS_IN_BYTES); - builder.field(SubFields.STARTED, remoteSegmentUploadShardStats.uploadBytesStarted) - .field(SubFields.SUCCEEDED, remoteSegmentUploadShardStats.uploadBytesSucceeded) - .field(SubFields.FAILED, remoteSegmentUploadShardStats.uploadBytesFailed); + builder.startObject(UploadStatsFields.TOTAL_UPLOAD_SIZE); + builder.field(SubFields.STARTED_BYTES, remoteTranslogShardStats.uploadBytesStarted) + .field(SubFields.FAILED_BYTES, remoteTranslogShardStats.uploadBytesFailed) + .field(SubFields.SUCCEEDED_BYTES, remoteTranslogShardStats.uploadBytesSucceeded); builder.endObject(); - builder.startObject(Fields.REMOTE_REFRESH_SIZE_IN_BYTES); - builder.field(SubFields.LAST_SUCCESSFUL, remoteSegmentUploadShardStats.lastSuccessfulRemoteRefreshBytes); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadBytesMovingAverage); + builder.field(UploadStatsFields.TOTAL_UPLOAD_TIME_IN_MILLIS, remoteTranslogShardStats.totalUploadTimeInMillis); + + builder.startObject(UploadStatsFields.UPLOAD_SIZE_IN_BYTES); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadBytesMovingAverage); + builder.endObject(); + + builder.startObject(UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadBytesPerSecMovingAverage); + builder.endObject(); + + builder.startObject(UploadStatsFields.UPLOAD_TIME_IN_MILLIS); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadTimeMovingAverage); + builder.endObject(); + } + + private void buildTranslogDownloadStats(XContentBuilder builder) throws IOException { + builder.field(DownloadStatsFields.LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP, remoteTranslogShardStats.lastSuccessfulDownloadTimestamp); + + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOADS); + builder.field(SubFields.SUCCEEDED, remoteTranslogShardStats.totalDownloadsSucceeded); builder.endObject(); - builder.startObject(Fields.UPLOAD_LATENCY_IN_BYTES_PER_SEC); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadBytesPerSecMovingAverage); + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOAD_SIZE); + builder.field(SubFields.SUCCEEDED_BYTES, remoteTranslogShardStats.downloadBytesSucceeded); builder.endObject(); - builder.startObject(Fields.REMOTE_REFRESH_LATENCY_IN_MILLIS); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadTimeMovingAverage); + + builder.field(DownloadStatsFields.TOTAL_DOWNLOAD_TIME_IN_MILLIS, remoteTranslogShardStats.totalDownloadTimeInMillis); + + builder.startObject(DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadBytesMovingAverage); builder.endObject(); + + builder.startObject(DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadBytesPerSecMovingAverage); builder.endObject(); - return builder; + builder.startObject(DownloadStatsFields.DOWNLOAD_TIME_IN_MILLIS); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadTimeMovingAverage); + builder.endObject(); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(remoteSegmentUploadShardStats); + private void buildSegmentUploadStats(XContentBuilder builder) throws IOException { + builder.field(UploadStatsFields.LOCAL_REFRESH_TIMESTAMP, remoteSegmentShardStats.localRefreshClockTimeMs) + .field(UploadStatsFields.REMOTE_REFRESH_TIMESTAMP, remoteSegmentShardStats.remoteRefreshClockTimeMs) + .field(UploadStatsFields.REFRESH_TIME_LAG_IN_MILLIS, remoteSegmentShardStats.refreshTimeLagMs) + .field(UploadStatsFields.REFRESH_LAG, remoteSegmentShardStats.localRefreshNumber - remoteSegmentShardStats.remoteRefreshNumber) + .field(UploadStatsFields.BYTES_LAG, remoteSegmentShardStats.bytesLag) + .field(UploadStatsFields.BACKPRESSURE_REJECTION_COUNT, remoteSegmentShardStats.rejectionCount) + .field(UploadStatsFields.CONSECUTIVE_FAILURE_COUNT, remoteSegmentShardStats.consecutiveFailuresCount); + builder.startObject(UploadStatsFields.TOTAL_UPLOADS) + .field(SubFields.STARTED, remoteSegmentShardStats.totalUploadsStarted) + .field(SubFields.SUCCEEDED, remoteSegmentShardStats.totalUploadsSucceeded) + .field(SubFields.FAILED, remoteSegmentShardStats.totalUploadsFailed); + builder.endObject(); + builder.startObject(UploadStatsFields.TOTAL_UPLOAD_SIZE) + .field(SubFields.STARTED_BYTES, remoteSegmentShardStats.uploadBytesStarted) + .field(SubFields.SUCCEEDED_BYTES, remoteSegmentShardStats.uploadBytesSucceeded) + .field(SubFields.FAILED_BYTES, remoteSegmentShardStats.uploadBytesFailed); + builder.endObject(); + builder.startObject(UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES) + .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.lastSuccessfulRemoteRefreshBytes) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesMovingAverage); + builder.endObject(); + builder.startObject(UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesPerSecMovingAverage); + builder.endObject(); + builder.startObject(UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadTimeMovingAverage); + builder.endObject(); + } + + private void buildSegmentDownloadStats(XContentBuilder builder) throws IOException { + builder.field( + DownloadStatsFields.LAST_SYNC_TIMESTAMP, + remoteSegmentShardStats.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOAD_SIZE) + .field(SubFields.STARTED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted) + .field(SubFields.SUCCEEDED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesSucceeded) + .field(SubFields.FAILED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesFailed); + builder.endObject(); + builder.startObject(DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES) + .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage); + builder.endObject(); + builder.startObject(DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage); + builder.endObject(); + } + + private void buildShardRouting(XContentBuilder builder) throws IOException { + builder.startObject(Fields.ROUTING); + builder.field(RoutingFields.STATE, shardRouting.state()); + builder.field(RoutingFields.PRIMARY, shardRouting.primary()); + builder.field(RoutingFields.NODE_ID, shardRouting.currentNodeId()); + builder.endObject(); } /** * Fields for remote store stats response */ static final class Fields { - static final String SHARD_ID = "shard_id"; + static final String ROUTING = "routing"; + static final String SEGMENT = "segment"; + static final String TRANSLOG = "translog"; + } + static final class RoutingFields { + static final String STATE = "state"; + static final String PRIMARY = "primary"; + static final String NODE_ID = "node"; + } + + /** + * Fields for remote store stats response + */ + public static final class UploadStatsFields { /** * Lag in terms of bytes b/w local and remote store */ @@ -126,46 +279,115 @@ static final class Fields { static final String CONSECUTIVE_FAILURE_COUNT = "consecutive_failure_count"; /** - * Represents the number of remote refreshes + * Represents the size of new data to be uploaded as part of a refresh */ - static final String TOTAL_REMOTE_REFRESH = "total_remote_refresh"; + static final String REMOTE_REFRESH_SIZE_IN_BYTES = "remote_refresh_size_in_bytes"; + + /** + * Time taken by a single remote refresh + */ + static final String REMOTE_REFRESH_LATENCY_IN_MILLIS = "remote_refresh_latency_in_millis"; + + /** + * Timestamp of last successful remote store upload + */ + static final String LAST_SUCCESSFUL_UPLOAD_TIMESTAMP = "last_successful_upload_timestamp"; + + /** + * Count of files uploaded to remote store + */ + public static final String TOTAL_UPLOADS = "total_uploads"; /** * Represents the total uploads to remote store in bytes */ - static final String TOTAL_UPLOADS_IN_BYTES = "total_uploads_in_bytes"; + public static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; /** - * Represents the size of new data to be uploaded as part of a refresh + * Total time spent on remote store uploads */ - static final String REMOTE_REFRESH_SIZE_IN_BYTES = "remote_refresh_size_in_bytes"; + static final String TOTAL_UPLOAD_TIME_IN_MILLIS = "total_upload_time_in_millis"; + + /** + * Represents the size of new data to be transferred as part of a remote store upload + */ + static final String UPLOAD_SIZE_IN_BYTES = "upload_size_in_bytes"; /** * Represents the speed of remote store uploads in bytes per sec */ - static final String UPLOAD_LATENCY_IN_BYTES_PER_SEC = "upload_latency_in_bytes_per_sec"; + static final String UPLOAD_SPEED_IN_BYTES_PER_SEC = "upload_speed_in_bytes_per_sec"; /** - * Time taken by a single remote refresh + * Time taken by a remote store upload */ - static final String REMOTE_REFRESH_LATENCY_IN_MILLIS = "remote_refresh_latency_in_millis"; + static final String UPLOAD_TIME_IN_MILLIS = "upload_time_in_millis"; + } + + static final class DownloadStatsFields { + /** + * Epoch timestamp of the last successful download + */ + public static final String LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP = "last_successful_download_timestamp"; + + /** + * Last successful sync from remote in milliseconds + */ + static final String LAST_SYNC_TIMESTAMP = "last_sync_timestamp"; + + /** + * Count of files downloaded from remote store + */ + public static final String TOTAL_DOWNLOADS = "total_downloads"; + + /** + * Total time spent in downloads from remote store + */ + public static final String TOTAL_DOWNLOAD_TIME_IN_MILLIS = "total_download_time_in_millis"; + + /** + * Total bytes of files downloaded from the remote store + */ + static final String TOTAL_DOWNLOAD_SIZE = "total_download_size"; + + /** + * Average size of a file downloaded from the remote store + */ + static final String DOWNLOAD_SIZE_IN_BYTES = "download_size_in_bytes"; + + /** + * Average speed (in bytes/sec) of a remote store download + */ + static final String DOWNLOAD_SPEED_IN_BYTES_PER_SEC = "download_speed_in_bytes_per_sec"; + + /** + * Average time spent on a remote store download + */ + public static final String DOWNLOAD_TIME_IN_MILLIS = "download_time_in_millis"; } /** - * Reusable sub fields for {@link Fields} + * Reusable sub fields for {@link UploadStatsFields} and {@link DownloadStatsFields} */ - static final class SubFields { - static final String STARTED = "started"; - static final String SUCCEEDED = "succeeded"; - static final String FAILED = "failed"; + public static final class SubFields { + public static final String STARTED = "started"; + public static final String SUCCEEDED = "succeeded"; + public static final String FAILED = "failed"; + + public static final String STARTED_BYTES = "started_bytes"; + public static final String SUCCEEDED_BYTES = "succeeded_bytes"; + public static final String FAILED_BYTES = "failed_bytes"; + + static final String DOWNLOAD = "download"; + public static final String UPLOAD = "upload"; /** - * Moving avg over last N values stat for a {@link Fields} + * Moving avg over last N values stat */ static final String MOVING_AVG = "moving_avg"; /** - * Most recent successful attempt stat for a {@link Fields} + * Most recent successful attempt stat */ static final String LAST_SUCCESSFUL = "last_successful"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java index f09cf79c5154c..12c316adc75cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequest.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Encapsulates all remote store stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequest extends BroadcastRequest<RemoteStoreStatsRequest> { private String[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java index c31e4a1fd6178..4da700d3dc51b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsRequestBuilder.java @@ -10,13 +10,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Builder for RemoteStoreStatsRequest * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsRequestBuilder extends BroadcastOperationRequestBuilder< RemoteStoreStatsRequest, RemoteStoreStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java index 20023e30a271e..cad57d148770b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java @@ -8,67 +8,94 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * Remote Store stats response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreStatsResponse extends BroadcastResponse { - private final RemoteStoreStats[] shards; + private final RemoteStoreStats[] remoteStoreStats; public RemoteStoreStatsResponse(StreamInput in) throws IOException { super(in); - shards = in.readArray(RemoteStoreStats::new, RemoteStoreStats[]::new); + remoteStoreStats = in.readArray(RemoteStoreStats::new, RemoteStoreStats[]::new); } public RemoteStoreStatsResponse( - RemoteStoreStats[] shards, + RemoteStoreStats[] remoteStoreStats, int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures ) { super(totalShards, successfulShards, failedShards, shardFailures); - this.shards = shards; + this.remoteStoreStats = remoteStoreStats; } - public RemoteStoreStats[] getShards() { - return this.shards; + public RemoteStoreStats[] getRemoteStoreStats() { + return this.remoteStoreStats; } - public RemoteStoreStats getAt(int position) { - return shards[position]; + public Map<String, Map<Integer, List<RemoteStoreStats>>> groupByIndexAndShards() { + Map<String, Map<Integer, List<RemoteStoreStats>>> indexWiseStats = new HashMap<>(); + for (RemoteStoreStats shardStat : remoteStoreStats) { + indexWiseStats.computeIfAbsent(shardStat.getShardRouting().getIndexName(), k -> new HashMap<>()) + .computeIfAbsent(shardStat.getShardRouting().getId(), k -> new ArrayList<>()) + .add(shardStat); + } + return indexWiseStats; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeArray(shards); + out.writeArray(remoteStoreStats); } @Override protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { - builder.startArray("stats"); - for (RemoteStoreStats shard : shards) { - shard.toXContent(builder, params); + Map<String, Map<Integer, List<RemoteStoreStats>>> indexWiseStats = groupByIndexAndShards(); + builder.startObject(Fields.INDICES); + for (String indexName : indexWiseStats.keySet()) { + builder.startObject(indexName); + builder.startObject(Fields.SHARDS); + for (int shardId : indexWiseStats.get(indexName).keySet()) { + builder.startArray(Integer.toString(shardId)); + for (RemoteStoreStats shardStat : indexWiseStats.get(indexName).get(shardId)) { + shardStat.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + builder.endObject(); } - builder.endArray(); + builder.endObject(); } @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, false); + return Strings.toString(MediaTypeRegistry.JSON, this, true, false); + } + + static final class Fields { + static final String SHARDS = "shards"; + static final String INDICES = "indices"; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java index 434abd1207f50..bd8db4a160bf6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java @@ -9,7 +9,6 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -21,10 +20,12 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; @@ -49,7 +50,8 @@ public class TransportRemoteStoreStatsAction extends TransportBroadcastByNodeAct RemoteStoreStats> { private final IndicesService indicesService; - private final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; + + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; @Inject public TransportRemoteStoreStatsAction( @@ -58,7 +60,7 @@ public TransportRemoteStoreStatsAction( IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { super( RemoteStoreStatsAction.NAME, @@ -70,7 +72,7 @@ public TransportRemoteStoreStatsAction( ThreadPool.Names.MANAGEMENT ); this.indicesService = indicesService; - this.remoteRefreshSegmentPressureService = remoteRefreshSegmentPressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; } /** @@ -95,7 +97,6 @@ protected ShardsIterator shards(ClusterState clusterState, RemoteStoreStatsReque || (shardRouting.currentNodeId() == null || shardRouting.currentNodeId().equals(clusterState.getNodes().getLocalNodeId())) ) - .filter(ShardRouting::primary) .filter( shardRouting -> Boolean.parseBoolean( clusterState.getMetadata().index(shardRouting.index()).getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) @@ -153,11 +154,15 @@ protected RemoteStoreStats shardOperation(RemoteStoreStatsRequest request, Shard throw new ShardNotFoundException(indexShard.shardId()); } - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker( + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker( + indexShard.shardId() + ); + assert Objects.nonNull(remoteSegmentTransferTracker); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker( indexShard.shardId() ); - assert Objects.nonNull(remoteRefreshSegmentTracker); + assert Objects.nonNull(remoteTranslogTransferTracker); - return new RemoteStoreStats(remoteRefreshSegmentTracker.stats()); + return new RemoteStoreStats(remoteSegmentTransferTracker.stats(), remoteTranslogTransferTracker.stats(), indexShard.routingEntry()); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 7e3f4cd95fc72..3e408c6114690 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * Transport request for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequest extends AcknowledgedRequest<CleanupRepositoryRequest> { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index 95c4fb372572f..34e42b157e627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport builder for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CleanupRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CleanupRepositoryRequest, CleanupRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java index 7d9dcd25aca8c..e6790e8cbe708 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -31,8 +31,9 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; @@ -46,8 +47,9 @@ /** * Transport response for cleaning up snapshot repositories * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser<CleanupRepositoryResponse, Void> PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 2b91c6641211d..774bffa10da4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; @@ -51,6 +50,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.repositories.RepositoriesService; @@ -67,7 +67,7 @@ /** * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. - * + * <p> * The steps taken by the repository cleanup operation are as follows: * <ol> * <li>Check that there are no running repository cleanup, snapshot create, or snapshot delete actions diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 352a3772e039b..04fdf22bee593 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * <p> * The unregister repository command just unregisters the repository. No data is getting deleted from the repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequest extends AcknowledgedRequest<DeleteRepositoryRequest> { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index ffef8d5b41979..6f5d0495e1c9f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for unregister repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< DeleteRepositoryRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 05a95b926f576..3d779befe474e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -32,16 +32,16 @@ package org.opensearch.action.admin.cluster.repositories.delete; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index fb1f79dab4e39..80a86f1b79209 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -34,9 +34,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -45,8 +46,9 @@ /** * Get repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequest extends ClusterManagerNodeReadRequest<GetRepositoriesRequest> { private String[] repositories = Strings.EMPTY_ARRAY; @@ -88,7 +90,7 @@ public ActionRequestValidationException validate() { /** * The names of the repositories. * - * @return list of repositories + * @return array of repository names */ public String[] repositories() { return this.repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 4b93aff4c25bc..b0c18f952b3df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetRepositoriesRequest, GetRepositoriesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 53a94a8a56773..f467b240aac31 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -32,9 +32,10 @@ package org.opensearch.action.admin.cluster.repositories.get; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -42,16 +43,17 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** * Get repositories response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRepositoriesResponse extends ActionResponse implements ToXContentObject { private RepositoriesMetadata repositories; @@ -83,7 +85,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index d367f75607d36..c7d784dbc96e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.repositories.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -44,8 +43,9 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index b001e9456e78c..3fbd5743b5c8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -32,22 +32,25 @@ package org.opensearch.action.admin.cluster.repositories.put; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Map; import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; -import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Register repository request. @@ -55,8 +58,9 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryRequest> implements ToXContentObject { private String name; @@ -67,12 +71,18 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque private Settings settings = EMPTY_SETTINGS; + private CryptoSettings cryptoSettings; + public PutRepositoryRequest(StreamInput in) throws IOException { super(in); name = in.readString(); type = in.readString(); settings = readSettingsFromStream(in); verify = in.readBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + cryptoSettings = in.readOptionalWriteable(CryptoSettings::new); + } } public PutRepositoryRequest() {} @@ -93,6 +103,9 @@ public ActionRequestValidationException validate() { if (type == null) { validationException = addValidationError("type is missing", validationException); } + if (cryptoSettings != null) { + validationException = cryptoSettings.validate(); + } return validationException; } @@ -164,11 +177,11 @@ public PutRepositoryRequest settings(Settings.Builder settings) { * Sets the repository settings. * * @param source repository settings in json or yaml format - * @param xContentType the content type of the source + * @param mediaType the content type of the source * @return this request */ - public PutRepositoryRequest settings(String source, XContentType xContentType) { - this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + public PutRepositoryRequest settings(String source, final MediaType mediaType) { + this.settings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } @@ -207,6 +220,26 @@ public boolean verify() { return this.verify; } + /** + * Sets the repository crypto settings + * + * @param cryptoSettings repository crypto settings + * @return this request + */ + public PutRepositoryRequest cryptoSettings(CryptoSettings cryptoSettings) { + this.cryptoSettings = cryptoSettings; + return this; + } + + /** + * Returns repository encryption settings + * + * @return repository encryption settings + */ + public CryptoSettings cryptoSettings() { + return cryptoSettings; + } + /** * Parses repository definition. * @@ -224,6 +257,14 @@ public PutRepositoryRequest source(Map<String, Object> repositoryDefinition) { @SuppressWarnings("unchecked") Map<String, Object> sub = (Map<String, Object>) entry.getValue(); settings(sub); + } else if (name.equals("crypto_settings")) { + if (!(entry.getValue() instanceof Map)) { + throw new IllegalArgumentException("Malformed encryption_settings section, should include an inner object"); + } + @SuppressWarnings("unchecked") + Map<String, Object> sub = (Map<String, Object>) entry.getValue(); + CryptoSettings cryptoSettings = new CryptoSettings(sub); + cryptoSettings(cryptoSettings); } } return this; @@ -236,6 +277,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); writeSettingsToStream(settings, out); out.writeBoolean(verify); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(cryptoSettings); + } } @Override @@ -249,6 +293,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); builder.field("verify", verify); + + if (cryptoSettings != null) { + builder.startObject("crypto_settings"); + cryptoSettings.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 6e1b2795b6375..22aa6d7dc7c00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -32,9 +32,11 @@ package org.opensearch.action.admin.cluster.repositories.put; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -43,8 +45,9 @@ /** * Register repository request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< PutRepositoryRequest, AcknowledgedResponse, @@ -141,4 +144,15 @@ public PutRepositoryRequestBuilder setVerify(boolean verify) { request.verify(verify); return this; } + + /** + * Sets the repository encryption settings + * + * @param cryptoSettings repository crypto settings builder + * @return this builder + */ + public PutRepositoryRequestBuilder setEncryptionSettings(CryptoSettings cryptoSettings) { + request.cryptoSettings(cryptoSettings); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 16f7d6d5700bf..1eadab6b1352e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -32,16 +32,16 @@ package org.opensearch.action.admin.cluster.repositories.put; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; @@ -100,7 +100,7 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener<AcknowledgedResponse> listener ) { - repositoriesService.registerRepository( + repositoriesService.registerOrUpdateRepository( request, ActionListener.delegateFailure( listener, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index ec54d7640c1d3..d717d2200902f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.repositories.verify; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -42,6 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index b84161e716f5d..ae6c92d8625ca 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Verify repository request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequest extends AcknowledgedRequest<VerifyRepositoryRequest> { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index c405fb9bc12cd..023f223700775 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for verify repository request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< VerifyRepositoryRequest, VerifyRepositoryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 8b9142a39e9c9..12d6a4cca5683 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -32,14 +32,15 @@ package org.opensearch.action.admin.cluster.repositories.verify; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -54,8 +55,9 @@ /** * Verify repository response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { static final String NODES = "nodes"; @@ -64,8 +66,9 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte /** * Inner Node View * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NodeView implements Writeable, ToXContentObject { private static final ObjectParser.NamedObjectParser<NodeView, Void> PARSER; static { @@ -196,7 +199,7 @@ public static VerifyRepositoryResponse fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index a6addce14787d..1cefaa4866110 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Request to submit cluster reroute allocation commands * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteRequest> { private AllocationCommands commands = new AllocationCommands(); private boolean dryRun; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 01d52cb43320d..fc8310bdf7852 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; +import org.opensearch.common.annotation.PublicApi; /** * Builder for a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder< ClusterRerouteRequest, ClusterRerouteResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index a62029218ca25..ff01888040e82 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -46,8 +47,9 @@ /** * Response returned after a cluster reroute request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXContentObject { private final ClusterState state; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 61f2a6301a2dd..134583a56f489 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.admin.indices.shards.IndicesShardStoresAction; import org.opensearch.action.admin.indices.shards.IndicesShardStoresRequest; @@ -59,8 +58,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java index e1d5b62c59966..a56e7bbcfbfa4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; /** * This request is specific to the REST client. {@link ClusterStateRequest} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java index 8d29baa82562a..6be255696251f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java @@ -32,17 +32,17 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Objects; @@ -176,7 +176,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index aaa89ee269fd8..77aee99c2e902 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -34,29 +34,31 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Map; import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; -import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Request for an update cluster settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpdateSettingsRequest> implements ToXContentObject { private static final ParseField PERSISTENT = new ParseField("persistent"); @@ -120,8 +122,8 @@ public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) /** * Sets the source containing the transient settings to be updated. They will not survive a full cluster restart */ - public ClusterUpdateSettingsRequest transientSettings(String source, XContentType xContentType) { - this.transientSettings = Settings.builder().loadFromSource(source, xContentType).build(); + public ClusterUpdateSettingsRequest transientSettings(String source, final MediaType mediaType) { + this.transientSettings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } @@ -152,8 +154,8 @@ public ClusterUpdateSettingsRequest persistentSettings(Settings.Builder settings /** * Sets the source containing the persistent settings to be updated. They will get applied cross restarts */ - public ClusterUpdateSettingsRequest persistentSettings(String source, XContentType xContentType) { - this.persistentSettings = Settings.builder().loadFromSource(source, xContentType).build(); + public ClusterUpdateSettingsRequest persistentSettings(String source, final MediaType mediaType) { + this.persistentSettings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 4d08c94f78b6a..53f1f17bbeb50 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -42,8 +43,9 @@ /** * Builder for a cluster update settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 6daa2a76482dd..2dfdb49736773 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -33,10 +33,11 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -49,8 +50,9 @@ /** * A response for a cluster update settings action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { private static final ParseField PERSISTENT = new ParseField("persistent"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 542ca612d2b6c..2f3cc77b05550 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.AckedClusterStateUpdateTask; @@ -53,8 +52,9 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 0ee7e8d67d307..eaafca21e1894 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -33,20 +33,22 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; /** * Transport action for searching shard groups * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsGroup implements Writeable, ToXContentObject { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index e1d88faa10082..62e05ebb37e28 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Objects; @@ -47,8 +48,9 @@ /** * Transport request for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest<ClusterSearchShardsRequest> implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 674a2c2c36221..c4f8a29bbcf3d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, ClusterSearchShardsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index d43ef8a0c9c61..8ab6455bca4a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.cluster.shards; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -47,8 +48,9 @@ /** * Transport response for searching shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { private final ClusterSearchShardsGroup[] groups; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 7e20e6a1b31d5..a2a65b6400c97 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.shards; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -45,6 +44,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java index ad6b035d1f51e..72de28ca7e699 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequest.java @@ -15,11 +15,12 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -32,8 +33,9 @@ /** * Request to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequest extends ClusterManagerNodeRequest<ClusterDeleteWeightedRoutingRequest> { private static final Logger logger = LogManager.getLogger(ClusterDeleteWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java index bb34fea589534..e0d4e0ad2abed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to delete weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterDeleteWeightedRoutingRequest, ClusterDeleteWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java index 2a417e9f4287f..f6a18ae5055ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,8 +18,9 @@ /** * Response from deleting weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterDeleteWeightedRoutingResponse extends AcknowledgedResponse { ClusterDeleteWeightedRoutingResponse(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java index 903fbb13d9f45..cea85ebf588bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/TransportDeleteWeightedRoutingAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -20,6 +19,7 @@ import org.opensearch.cluster.routing.WeightedRoutingService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java index 7dcec15c750fc..937829de1db00 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -20,8 +21,9 @@ /** * Request to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequest extends ClusterManagerNodeReadRequest<ClusterGetWeightedRoutingRequest> { String awarenessAttribute; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java index 82f4c1106461d..3cb5e7d3d07b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to get weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterGetWeightedRoutingRequest, ClusterGetWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java index 91bd9ac2f00d1..b109ecb7de5d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/ClusterGetWeightedRoutingResponse.java @@ -9,10 +9,10 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.get; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionResponse; - import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -29,8 +29,9 @@ /** * Response from fetching weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterGetWeightedRoutingResponse extends ActionResponse implements ToXContentObject { private static final String WEIGHTS = "weights"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java index e73e662314378..50368d85e0011 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/get/TransportGetWeightedRoutingAction.java @@ -8,24 +8,21 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.get; -import org.opensearch.action.ActionListener; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; - import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.WeightedRoutingService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; - import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java index bf1f42aedc4bb..c310e28610184 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java @@ -16,11 +16,12 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -37,8 +38,9 @@ /** * Request to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequest extends ClusterManagerNodeRequest<ClusterPutWeightedRoutingRequest> { private static final Logger logger = LogManager.getLogger(ClusterPutWeightedRoutingRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java index adfb2cf02f6d9..c520b0efd9838 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestBuilder.java @@ -11,12 +11,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to update weights for weighted round-robin shard routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterPutWeightedRoutingRequest, ClusterPutWeightedRoutingResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java index cbf10aa74f8a2..4fee2f05a8715 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java @@ -9,6 +9,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.put; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Response from updating weights for weighted round-robin search routing policy. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class ClusterPutWeightedRoutingResponse extends AcknowledgedResponse { public ClusterPutWeightedRoutingResponse(boolean acknowledged) { super(acknowledged); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java index ed178884faf54..2f94842eaa39b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/TransportAddWeightedRoutingAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.put; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; @@ -19,6 +18,7 @@ import org.opensearch.cluster.routing.WeightedRoutingService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index fedcfa1f5d9ff..4a05911610137 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -36,10 +36,11 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,8 +51,9 @@ /** * Transport request for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequest extends ClusterManagerNodeRequest<CloneSnapshotRequest> implements IndicesRequest.Replaceable, @@ -187,6 +189,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 6de90bb4e7a06..839a1b935ad1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -34,16 +34,18 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; /** * Transport request builder for cloning a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloneSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 05b3a5d676c5b..54ef372609390 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -32,16 +32,16 @@ package org.opensearch.action.admin.cluster.snapshots.clone; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 5da3f2eb01260..0ad3071a99045 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -38,16 +38,17 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -73,11 +74,12 @@ * <li>must not contain hash sign ('#')</li> * <li>must not start with underscore ('_')</li> * <li>must be lowercase</li> - * <li>must not contain invalid file name characters {@link org.opensearch.common.Strings#INVALID_FILENAME_CHARS} </li> + * <li>must not contain invalid file name characters {@link Strings#INVALID_FILENAME_CHARS} </li> * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequest extends ClusterManagerNodeRequest<CreateSnapshotRequest> implements IndicesRequest.Replaceable, @@ -261,7 +263,7 @@ public CreateSnapshotRequest indices(List<String> indices) { /** * Returns a list of indices that should be included into the snapshot * - * @return list of indices + * @return array of index names */ @Override public String[] indices() { @@ -316,7 +318,7 @@ public CreateSnapshotRequest partial(boolean partial) { /** * If set to true the operation should wait for the snapshot completion before returning. - * + * <p> * By default, the operation will return as soon as snapshot is initialized. It can be changed by setting this * flag to true. * @@ -387,9 +389,9 @@ public CreateSnapshotRequest settings(String source, MediaType mediaType) { */ public CreateSnapshotRequest settings(Map<String, Object> source) { try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.map(source); - settings(Strings.toString(builder), builder.contentType()); + settings(builder.toString(), builder.contentType()); } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + source + "]", e); } @@ -446,7 +448,7 @@ public CreateSnapshotRequest source(Map<String, Object> source) { String name = entry.getKey(); if (name.equals("indices")) { if (entry.getValue() instanceof String) { - indices(org.opensearch.core.common.Strings.splitStringByCommaToArray((String) entry.getValue())); + indices(Strings.splitStringByCommaToArray((String) entry.getValue())); } else if (entry.getValue() instanceof List) { indices((List<String>) entry.getValue()); } else { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 40d440419819c..c378c416cc973 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -43,8 +44,9 @@ /** * Create snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CreateSnapshotRequest, CreateSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index bef95c417c038..5d25cf6cacfab 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -32,16 +32,17 @@ package org.opensearch.action.admin.cluster.snapshots.create; -import org.opensearch.action.ActionResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotInfo.SnapshotInfoBuilder; @@ -51,8 +52,9 @@ /** * Create snapshot response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { private static final ObjectParser<CreateSnapshotResponse, Void> PARSER = new ObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 79f365bd3a951..bb3bf014f213b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.create; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -41,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index d08c3033e7e10..21280381610f7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ * Delete snapshot request removes snapshots from the repository and cleans up all files that are associated with the snapshots. * All files that are shared with at least one other existing snapshot are left intact. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequest extends ClusterManagerNodeRequest<DeleteSnapshotRequest> { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index f61c58d449a02..f4da1ec0f7785 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -32,15 +32,17 @@ package org.opensearch.action.admin.cluster.snapshots.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Delete snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteSnapshotRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 04b2c0b1be1b1..e8462e4d822f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -32,16 +32,16 @@ package org.opensearch.action.admin.cluster.snapshots.delete; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 7492b5a434fe0..7e95885c60f93 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -34,9 +34,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -45,8 +46,9 @@ /** * Get snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequest extends ClusterManagerNodeRequest<GetSnapshotsRequest> { public static final String ALL_SNAPSHOTS = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 3434f1cb47a99..983325aa575d7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Get snapshots request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< GetSnapshotsRequest, GetSnapshotsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 3c96c67f78fd4..6b0e8ba8a372f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -32,13 +32,14 @@ package org.opensearch.action.admin.cluster.snapshots.get; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -53,8 +54,9 @@ /** * Get snapshots response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject { @SuppressWarnings("unchecked") @@ -128,6 +130,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 11ea4112f6e67..c7fdc59334874 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -36,9 +36,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotsInProgress; @@ -48,8 +47,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -138,57 +138,64 @@ protected void clusterManagerOperation( currentSnapshots.add(snapshotInfo); } - final RepositoryData repositoryData; + final StepListener<RepositoryData> repositoryDataListener = new StepListener<>(); if (isCurrentSnapshotsOnly(request.snapshots()) == false) { - repositoryData = PlainActionFuture.get(fut -> repositoriesService.getRepositoryData(repository, fut)); - for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - allSnapshotIds.put(snapshotId.getName(), snapshotId); - } + repositoriesService.getRepositoryData(repository, repositoryDataListener); } else { - repositoryData = null; + // Setting repositoryDataListener response to be null if the request has only current snapshot + repositoryDataListener.onResponse(null); } + repositoryDataListener.whenComplete(repositoryData -> { + if (repositoryData != null) { + for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { + allSnapshotIds.put(snapshotId.getName(), snapshotId); + } + } - final Set<SnapshotId> toResolve = new HashSet<>(); - if (isAllSnapshots(request.snapshots())) { - toResolve.addAll(allSnapshotIds.values()); - } else { - for (String snapshotOrPattern : request.snapshots()) { - if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { - toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshotId).collect(Collectors.toList())); - } else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { - if (allSnapshotIds.containsKey(snapshotOrPattern)) { - toResolve.add(allSnapshotIds.get(snapshotOrPattern)); - } else if (request.ignoreUnavailable() == false) { - throw new SnapshotMissingException(repository, snapshotOrPattern); - } - } else { - for (Map.Entry<String, SnapshotId> entry : allSnapshotIds.entrySet()) { - if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) { - toResolve.add(entry.getValue()); + final Set<SnapshotId> toResolve = new HashSet<>(); + if (isAllSnapshots(request.snapshots())) { + toResolve.addAll(allSnapshotIds.values()); + } else { + for (String snapshotOrPattern : request.snapshots()) { + if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { + toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshotId).collect(Collectors.toList())); + } else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { + if (allSnapshotIds.containsKey(snapshotOrPattern)) { + toResolve.add(allSnapshotIds.get(snapshotOrPattern)); + } else if (request.ignoreUnavailable() == false) { + throw new SnapshotMissingException(repository, snapshotOrPattern); + } + } else { + for (Map.Entry<String, SnapshotId> entry : allSnapshotIds.entrySet()) { + if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) { + toResolve.add(entry.getValue()); + } } } } - } - if (toResolve.isEmpty() && request.ignoreUnavailable() == false && isCurrentSnapshotsOnly(request.snapshots()) == false) { - throw new SnapshotMissingException(repository, request.snapshots()[0]); + if (toResolve.isEmpty() + && request.ignoreUnavailable() == false + && isCurrentSnapshotsOnly(request.snapshots()) == false) { + throw new SnapshotMissingException(repository, request.snapshots()[0]); + } } - } - final List<SnapshotInfo> snapshotInfos; - if (request.verbose()) { - snapshotInfos = snapshots(snapshotsInProgress, repository, new ArrayList<>(toResolve), request.ignoreUnavailable()); - } else { - if (repositoryData != null) { - // want non-current snapshots as well, which are found in the repository data - snapshotInfos = buildSimpleSnapshotInfos(toResolve, repositoryData, currentSnapshots); + final List<SnapshotInfo> snapshotInfos; + if (request.verbose()) { + snapshotInfos = snapshots(snapshotsInProgress, repository, new ArrayList<>(toResolve), request.ignoreUnavailable()); } else { - // only want current snapshots - snapshotInfos = currentSnapshots.stream().map(SnapshotInfo::basic).collect(Collectors.toList()); - CollectionUtil.timSort(snapshotInfos); + if (repositoryData != null) { + // want non-current snapshots as well, which are found in the repository data + snapshotInfos = buildSimpleSnapshotInfos(toResolve, repositoryData, currentSnapshots); + } else { + // only want current snapshots + snapshotInfos = currentSnapshots.stream().map(SnapshotInfo::basic).collect(Collectors.toList()); + CollectionUtil.timSort(snapshotInfos); + } } - } - listener.onResponse(new GetSnapshotsResponse(snapshotInfos)); + listener.onResponse(new GetSnapshotsResponse(snapshotInfos)); + }, listener::onFailure); } catch (Exception e) { listener.onFailure(e); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java index 6fd593fdd3e08..1f6e865b78ffd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.RestoreInProgress; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 840564a4bd7a2..492ef86bb7843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -37,15 +37,16 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; @@ -55,23 +56,27 @@ import java.util.Objects; import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; -import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Restore snapshot request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequest extends ClusterManagerNodeRequest<RestoreSnapshotRequest> implements ToXContentObject { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestoreSnapshotRequest.class); /** * Enumeration of possible storage types + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum StorageType { LOCAL("local"), REMOTE_SNAPSHOT("remote_snapshot"); @@ -151,7 +156,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && in.getVersion().onOrAfter(Version.V_2_9_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { sourceRemoteStoreRepository = in.readOptionalString(); } } @@ -175,7 +180,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && out.getVersion().onOrAfter(Version.V_2_9_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalString(sourceRemoteStoreRepository); } } @@ -497,7 +502,7 @@ public Settings indexSettings() { * this is the snapshot that this request restores. If the client can only identify a snapshot by its name then there is a risk that the * desired snapshot may be deleted and replaced by a new snapshot with the same name which is inconsistent with the original one. This * method lets us fail the restore if the precise snapshot we want is not available. - * + * <p> * This is for internal use only and is not exposed in the REST layer. */ public RestoreSnapshotRequest snapshotUuid(String snapshotUuid) { @@ -517,7 +522,7 @@ public String snapshotUuid() { /** * Sets the storage type for this request. */ - RestoreSnapshotRequest storageType(StorageType storageType) { + public RestoreSnapshotRequest storageType(StorageType storageType) { this.storageType = storageType; return this; } @@ -615,11 +620,6 @@ public RestoreSnapshotRequest source(Map<String, Object> source) { } } else if (name.equals("source_remote_store_repository")) { - if (!FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new IllegalArgumentException( - "Unsupported parameter " + name + ". Please enable remote store feature flag for this experimental feature" - ); - } if (entry.getValue() instanceof String) { setSourceRemoteStoreRepository((String) entry.getValue()); } else { @@ -670,7 +670,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && sourceRemoteStoreRepository != null) { + if (sourceRemoteStoreRepository != null) { builder.field("source_remote_store_repository", sourceRemoteStoreRepository); } builder.endObject(); @@ -700,48 +700,29 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - equals = Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); - } + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); return equals; } @Override public int hashCode() { int result; - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType, - sourceRemoteStoreRepository - ); - } else { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType - ); - } + result = Objects.hash( + snapshot, + repository, + indicesOptions, + renamePattern, + renameReplacement, + waitForCompletion, + includeGlobalState, + partial, + includeAliases, + indexSettings, + snapshotUuid, + storageType, + sourceRemoteStoreRepository + ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); return result; @@ -749,6 +730,6 @@ public int hashCode() { @Override public String toString() { - return org.opensearch.common.Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index d9cca536d1c41..39eaadf3c8de6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Restore snapshot request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RestoreSnapshotRequest, RestoreSnapshotResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index 2cf1b40b92761..c94645a6deb8f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -32,17 +32,18 @@ package org.opensearch.action.admin.cluster.snapshots.restore; -import org.opensearch.action.ActionResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.RestoreInfo; import java.io.IOException; @@ -53,8 +54,9 @@ /** * Contains information about restores snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index ec4d6b8412318..73e3070f7e44b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.restore; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -41,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.RestoreService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 6e250962d1210..cd7f4b392c61d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; + /** * Stage for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SnapshotIndexShardStage { /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 070f77f603621..f991e90cb0728 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -35,17 +35,18 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.core.index.Index; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import java.io.IOException; @@ -56,8 +57,9 @@ /** * Status for snapshotting an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexShardStatus extends BroadcastShardResponse implements ToXContentFragment { private SnapshotIndexShardStage stage = SnapshotIndexShardStage.INIT; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 9c2db62c33bd0..402fee76bc663 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; @@ -54,8 +55,9 @@ /** * Represents snapshot status of all shards in the index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotIndexStatus implements Iterable<SnapshotIndexShardStatus>, ToXContentFragment { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index ad514a13312ba..bfd0c23c579bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; @@ -47,8 +48,9 @@ /** * Status of a snapshot shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardsStats implements ToXContentObject { private int initializingShards; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java index 282585a43183a..f287e94edd0dc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -32,26 +32,28 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; /** * Stats for snapshots * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStats implements Writeable, ToXContentObject { private long startTime; @@ -357,7 +359,7 @@ void add(SnapshotStats stats, boolean updateTimestamps) { time = endTime - startTime; } assert time >= 0 : "Update with [" - + Strings.toString(XContentType.JSON, stats) + + Strings.toString(MediaTypeRegistry.JSON, stats) + "][" + updateTimestamps + "] resulted in negative total time [" diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index a3b401980b109..e0f380b3ebbe6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -35,13 +35,14 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -68,8 +69,9 @@ /** * Status of a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotStatus implements ToXContentObject, Writeable { private final Snapshot snapshot; @@ -206,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, false); + return Strings.toString(MediaTypeRegistry.JSON, this, true, false); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 6aeff1d980f0c..061e73f1094b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -34,9 +34,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -45,8 +46,9 @@ /** * Get snapshot status request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequest extends ClusterManagerNodeRequest<SnapshotsStatusRequest> { private String repository = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 55f156d4a470e..9377eca60e353 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Snapshots status request builder * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< SnapshotsStatusRequest, SnapshotsStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index bb7bb7eb70b68..df436a587c2ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -51,8 +52,9 @@ /** * Snapshot status response * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject { private final List<SnapshotStatus> snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 78952805e5b49..7f6c039cf2ecc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; @@ -47,10 +46,11 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.repositories.IndexId; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java index bc6724d3081d6..90a52f7406d57 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java @@ -36,18 +36,20 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; /** * Transport request for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequest extends ClusterManagerNodeReadRequest<ClusterStateRequest> implements IndicesRequest.Replaceable { public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index b9bfeca9f7386..01a49c15fc1ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Transport request builder for obtaining cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterStateRequest, ClusterStateResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index ce3b020280b16..d09105b2bd0a0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,10 +32,11 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * The response for getting the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateResponse extends ActionResponse { private ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index e57d2cf1b9803..4aaa7f1950823 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -48,15 +47,16 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.node.NodeClosedException; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.function.Predicate; import java.util.Map; +import java.util.function.Predicate; /** * Transport action for obtaining cluster state diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java index 9cdd5bf244ecb..b7054ae99361b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/AnalysisStats.java @@ -35,12 +35,13 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -61,8 +62,9 @@ /** * Statistics about analysis usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AnalysisStats implements ToXContentFragment, Writeable { /** @@ -347,6 +349,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 63ac76ae65783..26e554f44fca1 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.cache.query.QueryCacheStats; @@ -50,8 +51,9 @@ /** * Cluster Stats per index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsIndices implements ToXContentFragment { private int indexCount; @@ -180,8 +182,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Shard Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardStats implements ToXContentFragment { int indices; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 699884ca0eab3..b44e9cfc5c74a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -38,13 +38,14 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryModule; @@ -71,8 +72,9 @@ /** * Per Node Cluster Stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsNodes implements ToXContentFragment { private final Counts counts; @@ -214,8 +216,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Counts * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Counts implements ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -282,8 +285,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Operating System Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class OsStats implements ToXContentFragment { final int availableProcessors; final int allocatedProcessors; @@ -395,8 +399,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner Process Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ProcessStats implements ToXContentFragment { final int count; @@ -498,8 +503,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Stats * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmStats implements ToXContentFragment { private final Map<JvmVersion, Integer> versions; @@ -626,8 +632,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Inner JVM Version * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class JvmVersion { String version; String vmName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index dc472c10f550b..6a99451c596ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * A request to get cluster level stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> { public ClusterStatsRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index aaf5e3aeffeb8..0dcb03dc26d0e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.nodes.NodesOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder< ClusterStatsRequest, ClusterStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index 1dda39a17babc..cc002b689a2a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -37,12 +37,12 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.List; @@ -51,8 +51,9 @@ /** * Transport response for obtaining cluster stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResponse> implements ToXContentFragment { final ClusterStatsNodes nodesStats; @@ -168,7 +169,7 @@ public String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java index f833c52493e00..f73c363b2ea60 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/IndexFeatureStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Statistics about an index feature. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexFeatureStats implements ToXContent, Writeable { final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java index 90c78f30ea78d..8e6fdb02b1f22 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/MappingStats.java @@ -35,11 +35,12 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -58,8 +59,9 @@ /** * Usage statistics about mappings usage. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MappingStats implements ToXContentFragment, Writeable { /** @@ -131,7 +133,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 18098bc31432f..9c5dcc9e9de3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,10 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, + false, + false, + false, false ); List<ShardStats> shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 7d92162015950..0bb4f3625ddad 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequest extends AcknowledgedRequest<DeleteStoredScriptRequest> { private String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index 34e0d429f2098..cbadde386d5a9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting stored scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< DeleteStoredScriptRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java index f269a813dbaa4..b91d636b5ec76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptContextResponse.java @@ -32,15 +32,15 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptContextInfo; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java index 192e89e08f25d..eabac2eb94a02 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java @@ -32,14 +32,14 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptLanguagesInfo; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 70384b5fb648e..25bc3ecd6b7ee 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ /** * Transport request for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequest extends ClusterManagerNodeReadRequest<GetStoredScriptRequest> { protected String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index ae969963be62f..ca0bd32f1f38b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetStoredScriptRequest, GetStoredScriptResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 02bee957e6216..a81faff2abb03 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -32,16 +32,17 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.StoredScriptSource; import java.io.IOException; @@ -53,8 +54,9 @@ /** * Transport response for getting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStoredScriptResponse extends ActionResponse implements StatusToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 761373a001ffe..8731b18fff338 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -35,15 +35,16 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.StoredScriptSource; import java.io.IOException; @@ -54,8 +55,9 @@ /** * Transport request for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> implements ToXContentFragment { private String id; @@ -68,7 +70,7 @@ public PutStoredScriptRequest(StreamInput in) throws IOException { super(in); id = in.readOptionalString(); content = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -140,10 +142,10 @@ public StoredScriptSource source() { /** * Set the script source and the content type of the bytes. */ - public PutStoredScriptRequest content(BytesReference content, XContentType xContentType) { + public PutStoredScriptRequest content(BytesReference content, MediaType mediaType) { this.content = content; - this.mediaType = Objects.requireNonNull(xContentType); - this.source = StoredScriptSource.parse(content, xContentType); + this.mediaType = Objects.requireNonNull(mediaType); + this.source = StoredScriptSource.parse(content, mediaType); return this; } @@ -152,7 +154,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(id); out.writeBytesReference(content); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index ed46b12d96106..46773177e9a74 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaType; /** * Transport request builder for putting stored script * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder< PutStoredScriptRequest, AcknowledgedResponse, @@ -60,8 +62,8 @@ public PutStoredScriptRequestBuilder setId(String id) { /** * Set the source of the script along with the content type of the source */ - public PutStoredScriptRequestBuilder setContent(BytesReference source, XContentType xContentType) { - request.content(source, xContentType); + public PutStoredScriptRequestBuilder setContent(BytesReference source, MediaType mediaType) { + request.content(source, mediaType); return this; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index f126a8474a456..b0863939fd04c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -32,10 +32,9 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -44,6 +43,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java index 88184d59932ea..0dd997a47bc7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptContextAction.java @@ -31,10 +31,10 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.script.ScriptContextInfo; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java index 0ecd6e8cf35d7..f41a22bcd0a4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetScriptLanguageAction.java @@ -32,10 +32,10 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.script.ScriptService; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index 7b8a2805bf1a6..db1f1edde2812 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -41,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index ac518291bd930..61ee641b4764d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -32,10 +32,9 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -44,6 +43,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 83e8b93b32e0f..16103f02be596 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequest extends ClusterManagerNodeReadRequest<PendingClusterTasksRequest> { public PendingClusterTasksRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index b5e77f291a701..c932c2e91f314 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< PendingClusterTasksRequest, PendingClusterTasksResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index f0d75ce77cd8a..9f4568c88b273 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.cluster.tasks; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.service.PendingClusterTask; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -46,8 +47,9 @@ /** * Transport response for getting pending cluster tasks * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTasksResponse extends ActionResponse implements Iterable<PendingClusterTask>, ToXContentObject { private final List<PendingClusterTask> pendingTasks; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 0bd761ca811f3..5d5053cc80738 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -43,6 +42,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.PendingClusterTask; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java index 9118cdd56babd..0b56216790d94 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/Alias.java @@ -34,18 +34,18 @@ import org.opensearch.OpenSearchGenerationException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.QueryBuilder; import java.io.IOException; @@ -55,8 +55,9 @@ /** * Represents an alias, to be associated with an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Alias implements Writeable, ToXContentFragment { private static final ParseField FILTER = new ParseField("filter"); @@ -127,9 +128,9 @@ public Alias filter(Map<String, Object> filter) { return this; } try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.map(filter); - this.filter = Strings.toString(builder); + this.filter = builder.toString(); return this; } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + filter + "]", e); @@ -145,10 +146,10 @@ public Alias filter(QueryBuilder filterBuilder) { return this; } try { - XContentBuilder builder = XContentFactory.jsonBuilder(); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.close(); - this.filter = Strings.toString(builder); + this.filter = builder.toString(); return this; } catch (IOException e) { throw new OpenSearchGenerationException("Failed to build json for alias request", e); @@ -278,7 +279,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (filter != null) { try (InputStream stream = new BytesArray(filter).streamInput()) { - builder.rawField(FILTER.getPreferredName(), stream, XContentType.JSON); + builder.rawField(FILTER.getPreferredName(), stream, MediaTypeRegistry.JSON); } } @@ -305,7 +306,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 142cbe6a0ab0b..6ce62dda32d0a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -38,16 +38,17 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.core.xcontent.ToXContent; @@ -72,8 +73,9 @@ /** * A request to add/remove aliases for one or more indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> implements ToXContentObject { private List<AliasActions> allAliasActions = new ArrayList<>(); @@ -95,8 +97,9 @@ public IndicesAliasesRequest() {} /** * Request to take one or more actions on one or more indexes and alias combinations. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AliasActions implements AliasesRequest, Writeable, ToXContentObject { private static final ParseField INDEX = new ParseField("index"); @@ -118,8 +121,9 @@ public static class AliasActions implements AliasesRequest, Writeable, ToXConten /** * The type of request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { ADD((byte) 0, AliasActions.ADD), REMOVE((byte) 1, AliasActions.REMOVE), @@ -216,9 +220,11 @@ private static ObjectParser<AliasActions, Void> parser(String name, Supplier<Ali ADD_PARSER.declareField(AliasActions::searchRouting, XContentParser::text, SEARCH_ROUTING, ValueType.INT); ADD_PARSER.declareField(AliasActions::writeIndex, XContentParser::booleanValue, IS_WRITE_INDEX, ValueType.BOOLEAN); ADD_PARSER.declareField(AliasActions::isHidden, XContentParser::booleanValue, IS_HIDDEN, ValueType.BOOLEAN); - ADD_PARSER.declareField(AliasActions::mustExist, XContentParser::booleanValue, MUST_EXIST, ValueType.BOOLEAN); } private static final ObjectParser<AliasActions, Void> REMOVE_PARSER = parser(REMOVE.getPreferredName(), AliasActions::remove); + static { + REMOVE_PARSER.declareField(AliasActions::mustExist, XContentParser::booleanValue, MUST_EXIST, ValueType.BOOLEAN); + } private static final ObjectParser<AliasActions, Void> REMOVE_INDEX_PARSER = parser( REMOVE_INDEX.getPreferredName(), AliasActions::removeIndex @@ -428,9 +434,9 @@ public AliasActions filter(Map<String, Object> filter) { return this; } try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.map(filter); - this.filter = org.opensearch.common.Strings.toString(builder); + this.filter = builder.toString(); return this; } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + filter + "]", e); @@ -446,7 +452,7 @@ public AliasActions filter(QueryBuilder filter) { XContentBuilder builder = XContentFactory.jsonBuilder(); filter.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.close(); - this.filter = org.opensearch.common.Strings.toString(builder); + this.filter = builder.toString(); return this; } catch (IOException e) { throw new OpenSearchGenerationException("Failed to build json for alias request", e); @@ -532,7 +538,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } if (false == Strings.isEmpty(filter)) { try (InputStream stream = new BytesArray(filter).streamInput()) { - builder.rawField(FILTER.getPreferredName(), stream, XContentType.JSON); + builder.rawField(FILTER.getPreferredName(), stream, MediaTypeRegistry.JSON); } } if (false == Strings.isEmpty(routing)) { @@ -550,6 +556,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (null != isHidden) { builder.field(IS_HIDDEN.getPreferredName(), isHidden); } + if (null != mustExist) { + builder.field(MUST_EXIST.getPreferredName(), mustExist); + } builder.endObject(); builder.endObject(); return builder; @@ -578,6 +587,8 @@ public String toString() { + searchRouting + ",writeIndex=" + writeIndex + + ",mustExist=" + + mustExist + "]"; } @@ -596,12 +607,13 @@ public boolean equals(Object obj) { && Objects.equals(indexRouting, other.indexRouting) && Objects.equals(searchRouting, other.searchRouting) && Objects.equals(writeIndex, other.writeIndex) - && Objects.equals(isHidden, other.isHidden); + && Objects.equals(isHidden, other.isHidden) + && Objects.equals(mustExist, other.mustExist); } @Override public int hashCode() { - return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting, writeIndex, isHidden); + return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting, writeIndex, isHidden, mustExist); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index 13c57cc781925..d262c9cd42ce9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; import java.util.Map; @@ -42,8 +43,9 @@ /** * Builder for request to modify many aliases at once. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder< IndicesAliasesRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index bc8cafce98ff3..81cb3102cfcb9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -34,11 +34,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -51,6 +50,8 @@ import org.opensearch.cluster.metadata.MetadataIndexAliasesService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; @@ -59,6 +60,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -220,12 +222,33 @@ private static String[] concreteAliases(IndicesAliasesRequest.AliasActions actio // for DELETE we expand the aliases String[] indexAsArray = { concreteIndex }; final Map<String, List<AliasMetadata>> aliasMetadata = metadata.findAliases(action, indexAsArray); - List<String> finalAliases = new ArrayList<>(); + Set<String> finalAliases = new HashSet<>(); for (final List<AliasMetadata> curAliases : aliasMetadata.values()) { for (AliasMetadata aliasMeta : curAliases) { finalAliases.add(aliasMeta.alias()); } } + + // must_exist can only be set in the Remove Action in Update aliases API, + // we check the value here to make the behavior consistent with Delete aliases API + if (action.mustExist() != null) { + // if must_exist is false, we should make the remove action execute silently, + // so we return the original specified aliases to avoid AliasesNotFoundException + if (!action.mustExist()) { + return action.aliases(); + } + + // if there is any non-existing aliases specified in the request and must_exist is true, throw exception in advance + if (finalAliases.isEmpty()) { + throw new AliasesNotFoundException(action.aliases()); + } + String[] nonExistingAliases = Arrays.stream(action.aliases()) + .filter(originalAlias -> finalAliases.stream().noneMatch(finalAlias -> Regex.simpleMatch(originalAlias, finalAlias))) + .toArray(String[]::new); + if (nonExistingAliases.length != 0) { + throw new AliasesNotFoundException(nonExistingAliases); + } + } return finalAliases.toArray(new String[0]); } else { // for ADD and REMOVE_INDEX we just return the current aliases diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index a21b8e97a6d6e..814a65e2a5bf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -33,11 +33,11 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; +import org.opensearch.core.action.ActionResponse; /** * Base request builder for listing index aliases @@ -81,7 +81,7 @@ public Builder addIndices(String... indices) { /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + * <p> * For example indices that don't exist. */ @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java index 5833f14c6e0d3..00d754c8fb029 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -35,17 +35,19 @@ import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Transport request for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequest extends ClusterManagerNodeReadRequest<GetAliasesRequest> implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java index aecbd689a647c..e9a15e9f9dfb3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequestBuilder.java @@ -33,12 +33,14 @@ package org.opensearch.action.admin.indices.alias.get; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesRequestBuilder extends BaseAliasesRequestBuilder<GetAliasesResponse, GetAliasesRequestBuilder> { public GetAliasesRequestBuilder(OpenSearchClient client, GetAliasesAction action, String... aliases) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java index 7408500f4f2a0..71cbbe2c6594f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.indices.alias.get; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ /** * Transport response for listing index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetAliasesResponse extends ActionResponse { private final Map<String, List<AliasMetadata>> aliases; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 92301e962a55f..3aca9c1976f16 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.alias.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -42,9 +41,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.indices.SystemIndices; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java index 37e05151c8179..a70f12bed4f1f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeAction.java @@ -33,15 +33,16 @@ package org.opensearch.action.admin.indices.analyze; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.single.shard.SingleShardRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; @@ -62,8 +63,9 @@ /** * Transport action for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeAction extends ActionType<AnalyzeAction.Response> { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); @@ -77,8 +79,9 @@ private AnalyzeAction() { * A request to analyze a text associated with a specific index. Allow to provide * the actual analyzer name to perform the analysis with. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends SingleShardRequest<Request> { private String[] text; @@ -308,8 +311,9 @@ public static Request fromXContent(XContentParser parser, String index) throws I /** * Inner Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { private final DetailAnalyzeResponse detail; @@ -386,7 +390,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } /** @@ -404,8 +408,9 @@ static final class Fields { /** * Inner Analyze Token * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeToken implements Writeable, ToXContentObject { private final String term; private final int startOffset; @@ -542,8 +547,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Detail Analyze Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DetailAnalyzeResponse implements Writeable, ToXContentFragment { private final boolean customAnalyzer; @@ -709,8 +715,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner Analyze Token List * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AnalyzeTokenList implements Writeable, ToXContentObject { private final String name; private final AnalyzeToken[] tokens; @@ -783,8 +790,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Inner character filtered text * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CharFilteredText implements Writeable, ToXContentObject { private final String name; private final String[] texts; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index a7f21b2af16fc..b0240a4db82cd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -33,14 +33,16 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.Map; /** * Transport request builder for analyzing text * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder< AnalyzeAction.Request, AnalyzeAction.Response, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 97e2bae33f3e3..8dc55e580b9ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -49,9 +49,10 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisRegistry; @@ -64,7 +65,6 @@ import org.opensearch.index.analysis.TokenizerFactory; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.StringFieldType; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 786c5a9c86ca7..57266b5aec58f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -34,17 +34,19 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Transport request for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCacheRequest> { private boolean queryCache = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index e0513e77a5aa5..074e2ce0b35eb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for clearing cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, ClearIndicesCacheResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java index 449993246d808..6fe180d900311 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.indices.cache.clear; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -45,8 +46,9 @@ /** * The response of a clear cache action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearIndicesCacheResponse extends BroadcastResponse { private static final ConstructingObjectParser<ClearIndicesCacheResponse, Void> PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index a371047873d11..acc6a6c14c5fd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.cache.clear; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -43,6 +42,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.shard.ShardPath; import org.opensearch.indices.IndicesService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index 8c6dde80d8d97..e785c31c4a0b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; @@ -48,8 +49,9 @@ /** * A request to close an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index b3b53a0043c70..92c32c9ace490 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for close index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> { public CloseIndexRequestBuilder(OpenSearchClient client, CloseIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 8e58ee92db80a..2e0c5cb5842b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -32,18 +32,19 @@ package org.opensearch.action.admin.indices.close; import org.opensearch.OpenSearchException; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.List; @@ -54,8 +55,9 @@ /** * Transport response for closing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List<IndexResult> indices; @@ -93,14 +95,15 @@ protected void addCustomFields(final XContentBuilder builder, final Params param @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Inner index result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IndexResult implements Writeable, ToXContentFragment { private final Index index; @@ -192,15 +195,16 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } /** * Shard Result from Close Index Response * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardResult implements Writeable, ToXContentFragment { private final int id; @@ -251,14 +255,15 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Inner Failure if something goes wrong * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; @@ -297,7 +302,7 @@ public XContentBuilder innerToXContent(final XContentBuilder builder, final Para @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } static Failure readFailure(final StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java index b4c9ad30e4e84..cde2ef63df02d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; @@ -46,11 +45,12 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index b1f3fb913b250..a8a512a5ad05a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationOperation; @@ -45,14 +44,15 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java index 22e0c8aedf4f5..02dd2de803f58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; @@ -42,9 +41,9 @@ import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataCreateDataStreamService; import org.opensearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; @@ -55,6 +54,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 53840b7697e45..01b4cd779c261 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -42,22 +42,22 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -81,8 +81,9 @@ * @see org.opensearch.client.Requests#createIndexRequest(String) * @see CreateIndexResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest> implements IndicesRequest { public static final ParseField MAPPINGS = new ParseField("mappings"); @@ -221,7 +222,7 @@ public CreateIndexRequest settings(String source, XContentType xContentType) { /** * The settings to create the index with (using a generic MediaType) */ - private CreateIndexRequest settings(String source, MediaType mediaType) { + public CreateIndexRequest settings(String source, MediaType mediaType) { this.settings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } @@ -230,7 +231,7 @@ private CreateIndexRequest settings(String source, MediaType mediaType) { * Allows to set the settings using a json builder. */ public CreateIndexRequest settings(XContentBuilder builder) { - settings(Strings.toString(builder), builder.contentType()); + settings(builder.toString(), builder.contentType()); return this; } @@ -244,7 +245,7 @@ public CreateIndexRequest settings(Map<String, ?> source) { /** * Set the mapping for this index - * + * <p> * The mapping should be in the form of a JSON string, with an outer _doc key * <pre> * .mapping("{\"_doc\":{\"properties\": ... }}") @@ -270,7 +271,7 @@ public CreateIndexRequest mapping(String source, XContentType xContentType) { /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -297,7 +298,7 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT /** * Adds mapping that will be added when the index gets created. - * + * <p> * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -346,7 +347,7 @@ private CreateIndexRequest mapping(String type, Map<String, ?> source) { try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.map(source); - return mapping(Strings.toString(builder)); + return mapping(builder.toString()); } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + source + "]", e); } @@ -433,7 +434,7 @@ public CreateIndexRequest source(String source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -459,7 +460,7 @@ public CreateIndexRequest source(byte[] source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + * <p> * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(byte[] source, MediaType mediaType) { @@ -593,4 +594,25 @@ public void writeTo(StreamOutput out) throws IOException { } waitForActiveShards.writeTo(out); } + + @Override + public String toString() { + return "CreateIndexRequest{" + + "cause='" + + cause + + '\'' + + ", index='" + + index + + '\'' + + ", settings=" + + settings + + ", mappings='" + + mappings + + '\'' + + ", aliases=" + + aliases + + ", waitForActiveShards=" + + waitForActiveShards + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index a716959614065..b233f45422967 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -36,19 +36,22 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; import java.util.Map; /** * Builder for a create index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder< CreateIndexRequest, CreateIndexResponse, @@ -97,8 +100,8 @@ public CreateIndexRequestBuilder setSettings(XContentBuilder builder) { /** * The settings to create the index with (either json or yaml format) */ - public CreateIndexRequestBuilder setSettings(String source, XContentType xContentType) { - request.settings(source, xContentType); + public CreateIndexRequestBuilder setSettings(String source, MediaType mediaType) { + request.settings(source, mediaType); return this; } @@ -200,24 +203,24 @@ public CreateIndexRequestBuilder addAlias(Alias alias) { /** * Sets the settings and mappings as a single source. */ - public CreateIndexRequestBuilder setSource(String source, XContentType xContentType) { - request.source(source, xContentType); + public CreateIndexRequestBuilder setSource(String source, MediaType mediaType) { + request.source(source, mediaType); return this; } /** * Sets the settings and mappings as a single source. */ - public CreateIndexRequestBuilder setSource(BytesReference source, XContentType xContentType) { - request.source(source, xContentType); + public CreateIndexRequestBuilder setSource(BytesReference source, MediaType mediaType) { + request.source(source, mediaType); return this; } /** * Sets the settings and mappings as a single source. */ - public CreateIndexRequestBuilder setSource(byte[] source, XContentType xContentType) { - request.source(source, xContentType); + public CreateIndexRequestBuilder setSource(byte[] source, MediaType mediaType) { + request.source(source, mediaType); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index 1b3ad48402eed..3258ffd8672a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.create; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ /** * A response for a create index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateIndexResponse extends ShardsAcknowledgedResponse { private static final ParseField INDEX = new ParseField("index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java index c8a51da38662f..b5f822bd45b7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; @@ -42,6 +41,7 @@ import org.opensearch.cluster.metadata.MetadataCreateIndexService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 0443325f82778..c5c03c93785d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -44,8 +45,9 @@ * Represents a request to delete a particular dangling index, specified by its UUID. The {@link #acceptDataLoss} * flag must also be explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDanglingIndexRequest extends AcknowledgedRequest<DeleteDanglingIndexRequest> { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index d5801223855d2..751a872ee7dcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.admin.indices.dangling.DanglingIndexInfo; import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; @@ -43,8 +42,8 @@ import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.opensearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; @@ -57,8 +56,9 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java index ae284d79bb085..f853a47b3c2bf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/FindDanglingIndexRequest.java @@ -33,9 +33,9 @@ package org.opensearch.action.admin.indices.dangling.find; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 8b0d6df497cc8..9d812f23db5f9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -32,12 +32,12 @@ package org.opensearch.action.admin.indices.dangling.find; -import java.io.IOException; - import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; +import java.io.IOException; + /** * Used when querying every node in the cluster for a specific dangling index. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java index 84d044a303a2c..13da03a04acbd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java @@ -32,10 +32,6 @@ package org.opensearch.action.admin.indices.dangling.find; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.TransportNodesAction; @@ -48,6 +44,10 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + /** * Finds a specified dangling index by its UUID, searching across all nodes. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 590f08a82c1d2..2702b6a05c4bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * by its UUID. The {@link #acceptDataLoss} flag must also be * explicitly set to true, or later validation will fail. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ImportDanglingIndexRequest extends AcknowledgedRequest<ImportDanglingIndexRequest> { private final String indexUUID; private final boolean acceptDataLoss; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 2010515249371..3f47d1bf083f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -32,17 +32,9 @@ package org.opensearch.action.admin.indices.dangling.import_index; -import static java.util.Collections.singletonList; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.opensearch.action.admin.indices.dangling.find.FindDanglingIndexRequest; @@ -54,10 +46,18 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.gateway.LocalAllocateDangledIndices; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; + /** * Implements the import of a dangling index. When handling a {@link ImportDanglingIndexAction}, * this class first checks that such a dangling index exists. It then calls {@link LocalAllocateDangledIndices} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java index a28320f5dbbee..119c4acbf4160 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesRequest.java @@ -33,17 +33,19 @@ package org.opensearch.action.admin.indices.dangling.list; import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Transport request for listing a dangling indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesRequest extends BaseNodesRequest<ListDanglingIndicesRequest> { /** * Filter the response by index UUID. Leave as null to find all indices. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index f785085da9a03..be63bee6312fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -36,12 +36,13 @@ import org.opensearch.action.admin.indices.dangling.DanglingIndexInfo; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -58,8 +59,9 @@ * information for each dangling index is presented under the "dangling_indices" key. If any nodes * in the cluster failed to answer, the details are presented under the "_nodes.failures" key. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ListDanglingIndicesResponse extends BaseNodesResponse<NodeListDanglingIndicesResponse> implements StatusToXContentObject { public ListDanglingIndicesResponse(StreamInput in) throws IOException { @@ -130,7 +132,7 @@ protected void writeNodesTo(StreamOutput out, List<NodeListDanglingIndicesRespon /** * Aggregates dangling index information - * + * <p> * NOTE: visible for testing * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 2a53eb38d672c..4c1690d25fbd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -31,16 +31,15 @@ package org.opensearch.action.admin.indices.datastream; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -48,10 +47,12 @@ import org.opensearch.cluster.metadata.MetadataCreateDataStreamService; import org.opensearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -61,8 +62,9 @@ /** * Transport action for creating a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CreateDataStreamAction extends ActionType<AcknowledgedResponse> { public static final CreateDataStreamAction INSTANCE = new CreateDataStreamAction(); @@ -75,8 +77,9 @@ private CreateDataStreamAction() { /** * Request for Creating Data Stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends AcknowledgedRequest<Request> implements IndicesRequest { private final String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java index 0b2375850f1fc..fcb13f4091638 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsAction.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.PointValues; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; @@ -52,10 +51,11 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index e5ae7cd582481..6b0aec6a31839 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -33,15 +33,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; @@ -54,13 +53,15 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.snapshots.SnapshotInProgressException; import org.opensearch.snapshots.SnapshotsService; @@ -78,8 +79,9 @@ /** * Transport action for deleting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteDataStreamAction extends ActionType<AcknowledgedResponse> { private static final Logger logger = LogManager.getLogger(DeleteDataStreamAction.class); @@ -94,8 +96,9 @@ private DeleteDataStreamAction() { /** * Request for deleting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest<Request> implements IndicesRequest.Replaceable { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index 543783b3de367..1db4e85887c23 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -33,9 +33,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; @@ -53,14 +51,17 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -76,8 +77,9 @@ /** * Transport action for getting a datastream * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetDataStreamAction extends ActionType<GetDataStreamAction.Response> { public static final GetDataStreamAction INSTANCE = new GetDataStreamAction(); @@ -90,8 +92,9 @@ private GetDataStreamAction() { /** * Request for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeReadRequest<Request> implements IndicesRequest.Replaceable { private String[] names; @@ -156,16 +159,18 @@ public IndicesRequest indices(String... indices) { /** * Response for getting data streams * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { public static final ParseField DATASTREAMS_FIELD = new ParseField("data_streams"); /** * Data streams information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamInfo extends AbstractDiffable<DataStreamInfo> implements ToXContentObject { public static final ParseField STATUS_FIELD = new ParseField("status"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index 35fb6a8748941..5fbefbc6e1591 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -36,9 +36,10 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; @@ -47,8 +48,9 @@ /** * A request to delete an index. Best created with {@link org.opensearch.client.Requests#deleteIndexRequest(String)}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequest extends AcknowledgedRequest<DeleteIndexRequest> implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 33f6342e94139..6cf0920f8570f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder< DeleteIndexRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index a91ca1a7b714c..410a58afc95f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -35,11 +35,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -47,6 +46,7 @@ import org.opensearch.cluster.metadata.MetadataDeleteIndexService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index 6d2bea438f3ff..91fde3ec62d7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -36,9 +36,10 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -47,8 +48,9 @@ /** * Transport request for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequest extends ClusterManagerNodeReadRequest<IndicesExistsRequest> implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 8459bbd8b874e..2e0f28cb7e3f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesExistsRequest, IndicesExistsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java index db1a8620be5aa..a457cca74f897 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsResponse.java @@ -32,7 +32,8 @@ package org.opensearch.action.admin.indices.exists.indices; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -41,8 +42,9 @@ /** * Transport response for checking if an index exists * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesExistsResponse extends ActionResponse { private boolean exists; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 1469b48d04fc7..428a0eb35513d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.exists.indices; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; @@ -42,6 +41,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexNotFoundException; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java index c8b28efc5f294..f8cf6ab72e038 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,8 +53,9 @@ * @see org.opensearch.client.IndicesAdminClient#flush(FlushRequest) * @see FlushResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequest extends BroadcastRequest<FlushRequest> { private boolean force = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java index d0cbd1d27fba6..50d7a78c919f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for flushing one or more indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushRequestBuilder extends BroadcastOperationRequestBuilder<FlushRequest, FlushResponse, FlushRequestBuilder> { public FlushRequestBuilder(OpenSearchClient client, FlushAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java index 85ca20c30c08b..3881a839a6dcd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/FlushResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.indices.flush; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -45,8 +46,9 @@ /** * A response to flush action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushResponse extends BroadcastResponse { private static final ConstructingObjectParser<FlushResponse, Void> PARSER = new ConstructingObjectParser<>("flush", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java index 5a114a6765dbc..07434c65862b6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportFlushAction.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.flush; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportBroadcastReplicationAction; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java index c96a55a32aee7..90db53af72257 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -33,18 +33,18 @@ package org.opensearch.action.admin.indices.flush; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 89e5a57094a96..bf6ee9ca43755 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.engine.Engine; @@ -54,8 +55,9 @@ * @see org.opensearch.client.IndicesAdminClient#forceMerge(ForceMergeRequest) * @see ForceMergeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequest extends BroadcastRequest<ForceMergeRequest> { /** @@ -67,11 +69,13 @@ public static final class Defaults { public static final int MAX_NUM_SEGMENTS = -1; public static final boolean ONLY_EXPUNGE_DELETES = false; public static final boolean FLUSH = true; + public static final boolean PRIMARY_ONLY = false; } private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; + private boolean primaryOnly = Defaults.PRIMARY_ONLY; private static final Version FORCE_MERGE_UUID_VERSION = Version.V_3_0_0; @@ -98,6 +102,9 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + primaryOnly = in.readBoolean(); + } if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { forceMergeUUID = in.readString(); } else if ((forceMergeUUID = in.readOptionalString()) == null) { @@ -164,6 +171,21 @@ public ForceMergeRequest flush(boolean flush) { return this; } + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public boolean primaryOnly() { + return primaryOnly; + } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequest primaryOnly(boolean primaryOnly) { + this.primaryOnly = primaryOnly; + return this; + } + /** * Should this task store its result after it has finished? */ @@ -186,6 +208,8 @@ public String getDescription() { + onlyExpungeDeletes + "], flush[" + flush + + "], primaryOnly[" + + primaryOnly + "]"; } @@ -195,6 +219,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(primaryOnly); + } if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) { out.writeString(forceMergeUUID); } else { @@ -211,6 +238,8 @@ public String toString() { + onlyExpungeDeletes + ", flush=" + flush + + ", primaryOnly=" + + primaryOnly + '}'; } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index cff05f194cac4..10b9749f16b27 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to force merge one or more indices. In order to force merge all @@ -42,8 +43,9 @@ * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, ForceMergeResponse, @@ -79,4 +81,12 @@ public ForceMergeRequestBuilder setFlush(boolean flush) { request.flush(flush); return this; } + + /** + * Should force merge only performed on primary shards. Defaults to {@code false}. + */ + public ForceMergeRequestBuilder setPrimaryOnly(boolean primaryOnly) { + request.primaryOnly(primaryOnly); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java index 4b5825724df84..e6a7fe0025b87 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.indices.forcemerge; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -47,8 +48,9 @@ /** * A response for force merge action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ForceMergeResponse extends BroadcastResponse { private static final ConstructingObjectParser<ForceMergeResponse, Void> PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index e3afe420d0a18..b71c75462900a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.forcemerge; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -43,6 +42,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; @@ -115,11 +115,16 @@ protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting sha } /** - * The refresh request works against *all* shards. + * The force merge request works against *all* shards by default, but it can work against all primary shards only + * by setting primary_only to true. */ @Override protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) { - return clusterState.routingTable().allShards(concreteIndices); + if (request.primaryOnly()) { + return clusterState.routingTable().allShardsSatisfyingPredicate(concreteIndices, ShardRouting::primary); + } else { + return clusterState.routingTable().allShards(concreteIndices); + } } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index f5a2a213bd720..47c59791edf04 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -34,23 +34,26 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.ArrayUtils; import java.io.IOException; /** * A request to retrieve information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequest extends ClusterInfoRequest<GetIndexRequest> { /** * The features to get. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Feature { ALIASES((byte) 0), MAPPINGS((byte) 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index 3019191e5570e..e97319abe5f98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get information about an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder<GetIndexRequest, GetIndexResponse, GetIndexRequestBuilder> { public GetIndexRequestBuilder(OpenSearchClient client, GetIndexAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index df05c3dd665d2..5a237b8d3470f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -33,14 +33,15 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -57,8 +58,9 @@ /** * A response for a get index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexResponse extends ActionResponse implements ToXContentObject { private Map<String, MappingMetadata> mappings = Map.of(); @@ -328,7 +330,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java index 66d810b091c4c..755119401c6b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction; import org.opensearch.cluster.ClusterState; @@ -42,10 +41,11 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java index 08b4f622425bc..c82db7eca3fd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java @@ -37,9 +37,9 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index 811ace0082dfb..f18f973c07959 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -37,21 +37,23 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Arrays; /** * Request the mappings of specific fields - * + * <p> * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequest extends ActionRequest implements IndicesRequest.Replaceable { protected boolean local = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index ebc0c015c5140..d379bfbecafd0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * A helper class to build {@link GetFieldMappingsRequest} objects * - * @opensearch.internal - **/ + * @opensearch.api + */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder<GetFieldMappingsRequest, GetFieldMappingsResponse> { public GetFieldMappingsRequestBuilder(OpenSearchClient client, GetFieldMappingsAction action, String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index a06121b1d448d..86533f14e83e1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -33,19 +33,20 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; @@ -56,17 +57,18 @@ import java.util.Objects; import static java.util.Collections.unmodifiableMap; -import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Response object for {@link GetFieldMappingsRequest} API - * + * <p> * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetFieldMappingsResponse extends ActionResponse implements ToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -178,8 +180,9 @@ private void addFieldMappingsToBuilder(XContentBuilder builder, Params params, M /** * Metadata for field mappings for toXContent * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class FieldMappingMetadata implements ToXContentFragment { private static final ParseField FULL_NAME = new ParseField("full_name"); @@ -214,7 +217,7 @@ public String fullName() { /** Returns the mappings as a map. Note that the returned map has a single key which is always the field's {@link Mapper#name}. */ public Map<String, Object> sourceAsMap() { - return XContentHelper.convertToMap(source, true, XContentType.JSON).v2(); + return XContentHelper.convertToMap(source, true, MediaTypeRegistry.JSON).v2(); } // pkg-private for testing @@ -233,7 +236,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("mapping", sourceAsMap()); } else { try (InputStream stream = source.streamInput()) { - builder.rawField(MAPPING.getPreferredName(), stream, XContentType.JSON); + builder.rawField(MAPPING.getPreferredName(), stream, MediaTypeRegistry.JSON); } } return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 3988b0dd5a508..cd0ecdb30e5fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -41,8 +42,9 @@ /** * Transport request to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequest extends ClusterInfoRequest<GetMappingsRequest> { public GetMappingsRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 85bf8c2ffd9c6..36ca1cb088cb5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, GetMappingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index c4c9094e276d6..56c979c20b6d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -33,13 +33,14 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -52,8 +53,9 @@ /** * Transport response to get field mappings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetMappingsResponse extends ActionResponse implements ToXContentFragment { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -132,7 +134,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index 93f76f42b2f05..53dbb86233803 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -32,13 +32,13 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 435034c77b921..64f39644a4d96 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -42,18 +42,18 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.regex.Regex; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.index.IndexService; -import org.opensearch.index.mapper.MappingLookup; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.Mapper; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.mapper.MappingLookup; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -220,7 +220,7 @@ private static void addFieldMapper( try { BytesReference bytes = XContentHelper.toXContent( fieldMapper, - XContentType.JSON, + MediaTypeRegistry.JSON, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS, false ); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 61b6f67ebdabb..6f07aa4dbf48a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction; import org.opensearch.cluster.ClusterState; @@ -42,6 +41,7 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 465a44556c081..8122db3278795 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -39,19 +39,20 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.Strings; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -75,8 +76,9 @@ * @see org.opensearch.client.IndicesAdminClient#putMapping(PutMappingRequest) * @see AcknowledgedResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> implements IndicesRequest.Replaceable, ToXContentObject { private static final Set<String> RESERVED_FIELDS = Set.of( @@ -210,7 +212,7 @@ public String source() { /** * A specialized simplified mapping source method, takes the form of simple properties definition: * ("field1", "type=string,store=true"). - * + * <p> * Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata * mapping fields will automatically be put on the top level mapping object. */ @@ -250,7 +252,7 @@ public static XContentBuilder simpleMapping(String... source) { builder.startObject(fieldName); String[] s1 = Strings.splitStringByCommaToArray(source[i]); for (String s : s1) { - String[] s2 = org.opensearch.common.Strings.split(s, "="); + String[] s2 = Strings.split(s, "="); if (s2.length != 2) { throw new IllegalArgumentException("malformed " + s); } @@ -270,7 +272,7 @@ public static XContentBuilder simpleMapping(String... source) { builder.startObject(fieldName); String[] s1 = Strings.splitStringByCommaToArray(source[i]); for (String s : s1) { - String[] s2 = org.opensearch.common.Strings.split(s, "="); + String[] s2 = Strings.split(s, "="); if (s2.length != 2) { throw new IllegalArgumentException("malformed " + s); } @@ -298,7 +300,7 @@ public PutMappingRequest source(XContentBuilder mappingBuilder) { */ public PutMappingRequest source(Map<String, ?> mappingSource) { try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.map(mappingSource); return source(BytesReference.bytes(builder), builder.contentType()); } catch (IOException e) { @@ -309,8 +311,8 @@ public PutMappingRequest source(Map<String, ?> mappingSource) { /** * The mapping source definition. */ - public PutMappingRequest source(String mappingSource, XContentType xContentType) { - return source(new BytesArray(mappingSource), xContentType); + public PutMappingRequest source(String mappingSource, MediaType mediaType) { + return source(new BytesArray(mappingSource), mediaType); } /** @@ -353,7 +355,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (source != null) { try (InputStream stream = new BytesArray(source).streamInput()) { - builder.rawValue(stream, XContentType.JSON); + builder.rawValue(stream, MediaTypeRegistry.JSON); } } else { builder.startObject().endObject(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index a1300b5859ce5..d44b243bb0edb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -36,17 +36,19 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; import java.util.Map; /** * Builder for a put mapping request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder< PutMappingRequest, AcknowledgedResponse, @@ -95,8 +97,8 @@ public PutMappingRequestBuilder setSource(Map mappingSource) { /** * The mapping source definition. */ - public PutMappingRequestBuilder setSource(String mappingSource, XContentType xContentType) { - request.source(mappingSource, xContentType); + public PutMappingRequestBuilder setSource(String mappingSource, MediaType mediaType) { + request.source(mappingSource, mediaType); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index 0b66689b869f1..4722c1048014f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -31,10 +31,9 @@ package org.opensearch.action.admin.indices.mapping.put; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -42,6 +41,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 6c3482da3cac0..ac797914aafd8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -35,11 +35,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -48,6 +47,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index d42f3699765e7..f48ec1ae6fb71 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; import java.util.Arrays; @@ -49,8 +50,9 @@ /** * A request to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequest extends AcknowledgedRequest<OpenIndexRequest> implements IndicesRequest.Replaceable { private String[] indices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index bf09c3f173491..19770255b0ee1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Builder for for open index request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder<OpenIndexRequest, OpenIndexResponse, OpenIndexRequestBuilder> { public OpenIndexRequestBuilder(OpenSearchClient client, OpenIndexAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index bd96a1071c129..78af1abc3ce31 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.open; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -43,8 +44,9 @@ /** * A response for a open index action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OpenIndexResponse extends ShardsAcknowledgedResponse { private static final ConstructingObjectParser<OpenIndexResponse, Void> PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java index 018c527f3d759..0243990dce2ff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; @@ -47,6 +46,7 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index ca5a17f0a1520..1fb8514cbf48c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; import java.util.Objects; @@ -49,8 +50,9 @@ /** * A request to add a block to an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequest extends AcknowledgedRequest<AddIndexBlockRequest> implements IndicesRequest.Replaceable { private final APIBlock block; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 8322ba19f433e..ebcdf700d3b6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -36,12 +36,14 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; +import org.opensearch.common.annotation.PublicApi; /** * Builder for add index block request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockRequestBuilder extends AcknowledgedRequestBuilder< AddIndexBlockRequest, AddIndexBlockResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 39268d752c8c5..3ab64fa55af8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -32,18 +32,19 @@ package org.opensearch.action.admin.indices.readonly; import org.opensearch.OpenSearchException; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.List; @@ -54,8 +55,9 @@ /** * Transport response to open an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AddIndexBlockResponse extends ShardsAcknowledgedResponse { private final List<AddBlockResult> indices; @@ -93,14 +95,15 @@ protected void addCustomFields(final XContentBuilder builder, final Params param @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockResult implements Writeable, ToXContentFragment { private final Index index; @@ -192,15 +195,16 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } /** * Per shard result for adding a block * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AddBlockShardResult implements Writeable, ToXContentFragment { private final int id; @@ -252,14 +256,15 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Contains failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private @Nullable String nodeId; @@ -298,7 +303,7 @@ public XContentBuilder innerToXContent(final XContentBuilder builder, final Para @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } static Failure readFailure(final StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java index ca1ca2c5f33b9..eb018d16119e0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; @@ -46,6 +45,7 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java index 3dc459a380bfd..a86475a16a779 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationOperation; @@ -45,14 +44,15 @@ import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java index 5294e573bab7c..aca98a6d9c571 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequest.java @@ -34,17 +34,19 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Request for recovery information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequest extends BroadcastRequest<RecoveryRequest> { private boolean detailed = false; // Provides extra details in the response diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java index 99a1fb430fb28..2f44a5f2df04a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Recovery information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryRequestBuilder extends BroadcastOperationRequestBuilder<RecoveryRequest, RecoveryResponse, RecoveryRequestBuilder> { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java index 7664a73c27fc8..27b6b334ef4b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/RecoveryResponse.java @@ -32,12 +32,13 @@ package org.opensearch.action.admin.indices.recovery; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.indices.recovery.RecoveryState; @@ -48,8 +49,9 @@ /** * Information regarding the recovery state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryResponse extends BroadcastResponse { private final Map<String, List<RecoveryState>> shardRecoveryStates; @@ -120,6 +122,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java index 1f76ee4bc5337..de76ec2d4f5a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.recovery; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -43,6 +42,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java index c6e230cc66373..c8000cbc40da8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.refresh; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -46,8 +47,9 @@ * @see org.opensearch.client.IndicesAdminClient#refresh(RefreshRequest) * @see RefreshResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequest extends BroadcastRequest<RefreshRequest> { public RefreshRequest(String... indices) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 5b27ae13f24be..ebafc726bfd39 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A refresh request making all operations performed since the last refresh available for search. The (near) real-time * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder<RefreshRequest, RefreshResponse, RefreshRequestBuilder> { public RefreshRequestBuilder(OpenSearchClient client, RefreshAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java index 8c83986e384c1..30351b8983717 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/RefreshResponse.java @@ -32,8 +32,9 @@ package org.opensearch.action.admin.indices.refresh; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -45,8 +46,9 @@ /** * The response of a refresh action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshResponse extends BroadcastResponse { private static final ConstructingObjectParser<RefreshResponse, Void> PARSER = new ConstructingObjectParser<>("refresh", true, arg -> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java index ae56160b098a4..e276cbf900ff0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -34,13 +34,13 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.replication.BasicReplicationRequest; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportBroadcastReplicationAction; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 1541753581c95..e17fa35395770 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.refresh; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.BasicReplicationRequest; import org.opensearch.action.support.replication.ReplicationResponse; @@ -40,8 +39,9 @@ import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index 9d8caf1d7b20b..9a913c6bcafff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -10,17 +10,19 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Request for Segment Replication stats information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequest extends BroadcastRequest<SegmentReplicationStatsRequest> { private boolean detailed = false; // Provides extra details in the response private boolean activeOnly = false; // Only reports on active segment replication events @@ -89,7 +91,7 @@ public void activeOnly(boolean activeOnly) { /** * Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default. * - * @return list of shard id's if shards are passed, empty otherwise + * @return array of shard id's if shards are passed, empty otherwise */ public String[] shards() { return shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java index 7e68d2ac59f07..9f00bff414cf5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequestBuilder.java @@ -10,12 +10,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Segment Replication stats information request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsRequestBuilder extends BroadcastOperationRequestBuilder< SegmentReplicationStatsRequest, SegmentReplicationStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java index 63899668badca..e65e13a945abd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsResponse.java @@ -8,12 +8,13 @@ package org.opensearch.action.admin.indices.replication; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.SegmentReplicationPerGroupStats; @@ -24,8 +25,9 @@ /** * Stats Information regarding the Segment Replication state of indices and their associated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentReplicationStatsResponse extends BroadcastResponse { private final Map<String, List<SegmentReplicationPerGroupStats>> replicationStats; @@ -91,6 +93,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index 8d299be0bea8b..1b912518d7e04 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -9,7 +9,6 @@ package org.opensearch.action.admin.indices.replication; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -19,13 +18,14 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTargetService; @@ -33,11 +33,11 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; -import java.util.ArrayList; import java.util.Map; -import java.util.HashMap; import java.util.stream.Collectors; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java index eb85268961571..e20e4c2d868a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -32,10 +32,8 @@ package org.opensearch.action.admin.indices.resolve; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.OriginalIndices; @@ -51,13 +49,16 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.CountDown; -import org.opensearch.core.common.Strings; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; @@ -82,8 +83,9 @@ /** * Transport action to resolve an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResolveIndexAction extends ActionType<ResolveIndexAction.Response> { public static final ResolveIndexAction INSTANCE = new ResolveIndexAction(); @@ -96,8 +98,9 @@ private ResolveIndexAction() { /** * Request for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ActionRequest implements IndicesRequest.Replaceable { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); @@ -196,8 +199,9 @@ public String getName() { /** * The resolved index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedIndex extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField ALIASES_FIELD = new ParseField("aliases"); @@ -284,8 +288,9 @@ public int hashCode() { /** * The resolved index alias * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedAlias extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); @@ -346,8 +351,9 @@ public int hashCode() { /** * The resolved data stream * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResolvedDataStream extends ResolvedIndexAbstraction implements Writeable, ToXContentObject { static final ParseField BACKING_INDICES_FIELD = new ParseField("backing_indices"); @@ -418,8 +424,9 @@ public int hashCode() { /** * Response for resolving an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Response extends ActionResponse implements ToXContentObject { static final ParseField INDICES_FIELD = new ParseField("indices"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java index 280dc307447b7..4d0b7fc8c13c7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/Condition.java @@ -33,8 +33,9 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import java.util.Objects; @@ -42,8 +43,9 @@ /** * Base class for rollover request conditions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Condition<T> implements NamedWriteable, ToXContentFragment { protected T value; @@ -96,8 +98,9 @@ public String name() { /** * Holder for index stats used to evaluate conditions * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats { public final long numDocs; public final long indexCreated; @@ -113,8 +116,9 @@ public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize) { /** * Holder for evaluated condition result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Result { public final Condition<?> condition; public final boolean matched; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java index 3752ee6ece82c..da750594e7264 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.rollover; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java index f0ab571ea9f75..faa3558420a5c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java index f58c842be374b..75c68350e2204 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverInfo.java @@ -34,13 +34,14 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -52,8 +53,9 @@ /** * Class for holding Rollover related information within an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverInfo extends AbstractDiffable<RolloverInfo> implements Writeable, ToXContentFragment { public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); @@ -149,6 +151,6 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 95a4b6573611d..68c0076bbd302 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -37,11 +37,12 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; @@ -54,12 +55,13 @@ /** * Request class to swap index under an alias or increment data stream generation upon satisfying conditions - * + * <p> * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implements IndicesRequest { private static final ObjectParser<RolloverRequest, Void> PARSER = new ObjectParser<>("rollover"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java index ed598c14acec3..acac7102edbc7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -35,15 +35,17 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; /** * Transport request to rollover an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RolloverRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RolloverRequest, RolloverResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 55ee65d0a4973..b7df35cd480bb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -51,12 +52,13 @@ /** * Response object for {@link RolloverRequest} API - * + * <p> * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RolloverResponse extends ShardsAcknowledgedResponse implements ToXContentObject { private static final ParseField NEW_INDEX = new ParseField("new_index"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java index 4ddff1563885a..3b11a3d82d707 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsAction; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -54,8 +53,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java index 7249bc5e9d3ba..4b37da2c99850 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * List of Index Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexSegments implements Iterable<IndexShardSegments> { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java index a6caf0649fde1..8fdc050511050 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexShardSegments.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.segments; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * List of Index Shard Segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardSegments implements Iterable<ShardSegments> { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index 5604b13e7a2a4..648f58dada4f9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -36,8 +36,9 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -55,8 +56,9 @@ /** * Transport response for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentResponse extends BroadcastResponse { private final ShardSegments[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 412568cdefc10..aff2b383df08f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -33,17 +33,19 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * Transport request for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequest extends BroadcastRequest<IndicesSegmentsRequest> { protected boolean verbose = false; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index 4b758e1f4bfb1..579b6d997acd9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for retrieving indices segment information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesSegmentsRequest, IndicesSegmentResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java index e66717cadb464..84edec384b68a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -10,9 +10,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; @@ -25,7 +26,10 @@ /** * Transport request for retrieving PITs segment information + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class PitSegmentsRequest extends BroadcastRequest<PitSegmentsRequest> { private boolean verbose = false; private final List<String> pitIds = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java index 90317542244ff..09adda4d79108 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/ShardSegments.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ /** * Collection of shard segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSegments implements Writeable, Iterable<Segment> { private final ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index b6dc3ddb19081..6dad4b8e3554f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.segments; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -43,6 +42,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java index 276551372339f..393c07ba58c5e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java @@ -7,13 +7,11 @@ */ package org.opensearch.action.admin.indices.segments; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.ListPitInfo; import org.opensearch.action.search.PitService; import org.opensearch.action.search.SearchContextId; import org.opensearch.action.search.SearchContextIdForNode; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -28,12 +26,14 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.indices.IndicesService; import org.opensearch.search.SearchService; import org.opensearch.search.internal.PitReaderContext; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java index 9666bcb34831a..547cfa8c3bce3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Arrays; @@ -48,8 +49,9 @@ /** * Transport request for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequest extends ClusterManagerNodeReadRequest<GetSettingsRequest> implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 84cd4e8682e93..5ba42c05dccf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -35,13 +35,15 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; /** * Transport request builder for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetSettingsRequest, GetSettingsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java index 05b06fc2b62c7..695c98684f0f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -32,17 +32,17 @@ package org.opensearch.action.admin.indices.settings.get; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -54,8 +54,9 @@ /** * Transport response for getting index segments * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetSettingsResponse extends ActionResponse implements ToXContentObject { private final Map<String, Settings> indexToSettings; @@ -193,7 +194,7 @@ public String toString() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); XContentBuilder builder = new XContentBuilder(JsonXContent.jsonXContent, baos); toXContent(builder, ToXContent.EMPTY_PARAMS, false); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { throw new IllegalStateException(e); // should not be possible here } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 22cbcf804f9d2..d8f2180208b18 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.settings.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -42,12 +41,13 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 465c1dd573567..9265c6ae60678 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -35,10 +35,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -48,6 +47,7 @@ import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; @@ -55,8 +55,9 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.stream.Stream; +import java.util.Arrays; import java.util.Set; +import java.util.stream.Stream; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; @@ -77,10 +78,11 @@ public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAc "index.max_script_fields", "index.max_terms_count", "index.max_regex_length", - "index.highlight.max_analyzed_offset" + "index.highlight.max_analyzed_offset", + "index.number_of_replicas" ); - private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog" }; + private final static String[] ALLOWLIST_REMOTE_SNAPSHOT_SETTINGS_PREFIXES = { "index.search.slowlog", "index.routing.allocation" }; private final MetadataUpdateSettingsService updateSettingsService; @@ -145,10 +147,10 @@ protected ClusterBlockException checkBlock(UpdateSettingsRequest request, Cluste } } + final String[] requestIndexNames = Arrays.stream(requestIndices).map(Index::getName).toArray(String[]::new); return allowSearchableSnapshotSettingsUpdate ? null - : state.blocks() - .indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + : state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, requestIndexNames); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index a7b7e005bce90..45172e313dfcc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -36,14 +36,16 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -52,15 +54,16 @@ import java.util.Objects; import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; -import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Request for an update index settings action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsRequest> implements IndicesRequest.Replaceable, @@ -157,8 +160,8 @@ public UpdateSettingsRequest settings(Settings.Builder settings) { /** * Sets the settings to be updated (either json or yaml format) */ - public UpdateSettingsRequest settings(String source, XContentType xContentType) { - this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + public UpdateSettingsRequest settings(String source, MediaType mediaType) { + this.settings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } @@ -221,7 +224,7 @@ public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOExcept @Override public String toString() { - return "indices : " + Arrays.toString(indices) + "," + Strings.toString(XContentType.JSON, this); + return "indices : " + Arrays.toString(indices) + "," + Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 7501f0c7798de..08d7a240aa007 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -44,8 +45,9 @@ /** * Builder for an update index settings request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder< UpdateSettingsRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index bc9633f2bd2db..b986ef3c62e73 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -37,12 +37,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; /** * Request builder for {@link IndicesShardStoresRequest} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoreRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesShardStoresRequest, IndicesShardStoresResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java index ea3de86fa17c0..ea07325c35c3b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -36,9 +36,10 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.EnumSet; @@ -46,8 +47,9 @@ /** * Request for {@link IndicesShardStoresAction} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresRequest extends ClusterManagerNodeReadRequest<IndicesShardStoresRequest> implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index fed9ee01b385d..c2f373fcbd6a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -33,8 +33,9 @@ package org.opensearch.action.admin.indices.shards; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,15 +54,17 @@ * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesShardStoresResponse extends ActionResponse implements ToXContentFragment { /** * Shard store information from a node * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class StoreStatus implements Writeable, ToXContentFragment, Comparable<StoreStatus> { private final DiscoveryNode node; private final String allocationId; @@ -70,7 +73,10 @@ public static class StoreStatus implements Writeable, ToXContentFragment, Compar /** * The status of the shard store with respect to the cluster + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum AllocationStatus { /** @@ -232,8 +238,9 @@ public int compareTo(StoreStatus other) { /** * Single node failure while retrieving shard store information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends DefaultShardOperationFailedException { private String nodeId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index c1e6a64efbf2f..04166c88a00ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; @@ -55,12 +54,13 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.gateway.AsyncShardFetch; import org.opensearch.gateway.TransportNodesListGatewayStartedShards; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -195,7 +195,7 @@ void start() { } else { for (Tuple<ShardId, String> shard : shards) { InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shard.v1(), shard.v2(), listShardStoresInfo); - fetch.fetchData(nodes, Collections.<String>emptySet()); + fetch.fetchData(nodes, Collections.emptyMap()); } } } @@ -223,7 +223,7 @@ protected synchronized void processAsyncFetch( List<FailedNodeException> failures, long fetchingRound ) { - fetchResponses.add(new Response(shardId, responses, failures)); + fetchResponses.add(new Response(shardAttributesMap.keySet().iterator().next(), responses, failures)); if (expectedOps.countDown()) { finish(); } @@ -312,7 +312,7 @@ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { } @Override - protected void reroute(ShardId shardId, String reason) { + protected void reroute(String shardId, String reason) { // no-op } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index b2e7ed92e608a..a5225f2243876 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -40,10 +40,11 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.ParseField; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -57,8 +58,9 @@ /** * Request class to shrink an index into a single shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements IndicesRequest, ToXContentObject { public static final ObjectParser<ResizeRequest, Void> PARSER = new ObjectParser<>("resize_request"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index eb05c0a69b78b..f9d90d46b0904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -36,14 +36,16 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResizeRequestBuilder extends AcknowledgedRequestBuilder<ResizeRequest, ResizeResponse, ResizeRequestBuilder> { public ResizeRequestBuilder(OpenSearchClient client, ActionType<ResizeResponse> action) { super(client, action, new ResizeRequest()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java index 1aa09023e3583..a4801f84c9ef9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeResponse.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.shrink; import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; @@ -42,8 +43,9 @@ /** * A response for a resize index action, either shrink or split index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ResizeResponse extends CreateIndexResponse { private static final ConstructingObjectParser<ResizeResponse, Void> PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java index 6403ed735ae49..91bcc0d62b1c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeType.java @@ -32,11 +32,14 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.common.annotation.PublicApi; + /** * The type of the resize operation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ResizeType { SHRINK, SPLIT, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 0d31c90a98f56..ca4c16935c2b9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.shrink; import org.apache.lucene.index.IndexWriter; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.stats.IndexShardStats; @@ -49,16 +48,17 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.DocsStats; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.StoreStats; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.index.store.StoreStats; import java.io.IOException; import java.util.Locale; @@ -66,6 +66,8 @@ import java.util.Set; import java.util.function.IntFunction; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; + /** * Main class to initiate resizing (shrink / split) an index into a new index * @@ -138,25 +140,80 @@ protected void clusterManagerOperation( // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); - client.admin() - .indices() - .prepareStats(sourceIndex) - .clear() - .setDocs(true) - .setStore(true) - .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { - CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { - IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); - return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); - createIndexService.createIndex( - updateRequest, - ActionListener.map( - delegatedListener, - response -> new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), updateRequest.index()) - ) - ); - })); + + IndexMetadata indexMetadata = state.metadata().index(sourceIndex); + if (resizeRequest.getResizeType().equals(ResizeType.SHRINK) + && state.metadata().isSegmentReplicationEnabled(sourceIndex) + && indexMetadata != null + && Integer.valueOf(indexMetadata.getSettings().get(SETTING_NUMBER_OF_REPLICAS)) > 0) { + client.admin() + .indices() + .prepareRefresh(sourceIndex) + .execute(ActionListener.delegateFailure(listener, (delegatedRefreshListener, refreshResponse) -> { + client.admin() + .indices() + .prepareStats(sourceIndex) + .clear() + .setDocs(true) + .setStore(true) + .setSegments(true) + .execute(ActionListener.delegateFailure(listener, (delegatedIndicesStatsListener, indicesStatsResponse) -> { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + + if (indicesStatsResponse.getIndex(sourceIndex) + .getTotal() + .getSegments() + .getReplicationStats().maxBytesBehind != 0) { + throw new IllegalStateException( + "Replication still in progress for index [" + + sourceIndex + + "]. Please wait for replication to complete and retry. Use the _cat/segment_replication/" + + sourceIndex + + " api to check if the index is up to date (e.g. bytes_behind == 0)." + ); + } + + createIndexService.createIndex( + updateRequest, + ActionListener.map( + delegatedIndicesStatsListener, + response -> new ResizeResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) + ) + ); + })); + })); + } else { + client.admin() + .indices() + .prepareStats(sourceIndex) + .clear() + .setDocs(true) + .setStore(true) + .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + createIndexService.createIndex( + updateRequest, + ActionListener.map( + delegatedListener, + response -> new ResizeResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) + ) + ); + })); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java index 5a3a34e9a2ebe..8bfeb13b253c3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStats.java @@ -34,10 +34,11 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -68,8 +69,9 @@ /** * Common Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStats implements Writeable, ToXContentFragment { @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 502b90417615f..a7d9f95b80f7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -33,10 +33,11 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Collections; @@ -45,8 +46,9 @@ /** * Common Stats Flags for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CommonStatsFlags implements Writeable, Cloneable { public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); @@ -255,8 +257,9 @@ public CommonStatsFlags clone() { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { Store("store", 0), Indexing("indexing", 1), diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java index 1635ce0bf83fc..b0143d9491087 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexShardStats.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * IndexShardStats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardStats implements Iterable<ShardStats>, Writeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java index 1c57ca39576b0..09614ea801193 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.stats; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Index Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexStats implements Iterable<IndexShardStats> { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java index 54f3e9b7d1a24..2b64464a76899 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -46,8 +47,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> { private CommonStatsFlags flags = new CommonStatsFlags(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index c211812b32c48..acc085a96a896 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** @@ -45,8 +46,9 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder< IndicesStatsRequest, IndicesStatsResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java index 4014bad06ff9a..6242081cd2371 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -33,15 +33,16 @@ package org.opensearch.action.admin.indices.stats; import org.opensearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -54,8 +55,9 @@ /** * Transport response for retrieving indices stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; @@ -226,6 +228,6 @@ static final class Fields { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, false); + return Strings.toString(MediaTypeRegistry.JSON, this, true, false); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java index 77562fa19b319..4ed1ce95b7de2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/ShardStats.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -49,8 +50,9 @@ /** * Shard Stats for OpenSearch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardStats implements Writeable, ToXContentFragment { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 9bb519c175f9d..2b85b6d5d6b5b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -34,7 +34,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -44,6 +43,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.engine.CommitStats; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java index e719ad9bdd174..1f427a349c2ea 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java index 7bfec26e924dd..496358cdfd2b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index 85524bddc56d8..e93a428b0af26 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -33,6 +33,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * A request to delete an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequest extends ClusterManagerNodeRequest<DeleteIndexTemplateRequest> { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 036272ea0d5da..60771cfa453ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -31,15 +31,17 @@ package org.opensearch.action.admin.indices.template.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder for deleting an index template * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteIndexTemplateRequest, AcknowledgedResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 326f1c8ec059c..30cb0cb3e5d00 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -34,10 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 3f071c4074074..27ea64809e3a7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -34,10 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 8503d7fea6e51..c9542c7a58810 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -34,10 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java index ff038bd89a597..51bd63c8473e6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.metadata.ComponentTemplate; import org.opensearch.common.Nullable; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 2902bd5b81eba..0fdb08285e8b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.common.Nullable; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index 1ac0f7e2d45a7..fa45efdc53124 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -33,9 +33,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; @@ -44,8 +45,9 @@ /** * Request that allows to retrieve index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequest extends ClusterManagerNodeReadRequest<GetIndexTemplatesRequest> { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 09de1733239fc..f8c02b4c8be08 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -33,12 +33,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Request builder to retrieve one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, GetIndexTemplatesResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index d3be5de84d758..009a73b615e0c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -31,8 +31,9 @@ package org.opensearch.action.admin.indices.template.get; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -49,8 +50,9 @@ /** * Response for retrieving one or more Index templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetIndexTemplatesResponse extends ActionResponse implements ToXContentObject { private final List<IndexTemplateMetadata> indexTemplates; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index 1ad2dc1636d40..e2594cd792cd3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -43,8 +42,9 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index 0bf5d64905a98..b1ef32db7274f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -43,8 +42,9 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index c99e7aac09b98..10b4975f7b9d0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.template.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -41,8 +40,9 @@ import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.regex.Regex; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 66fcf0e7cf0ae..c4396f22d7c16 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -36,9 +36,10 @@ import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Objects; @@ -47,8 +48,9 @@ * Transport Request for handling simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateRequest extends ClusterManagerNodeReadRequest<SimulateIndexTemplateRequest> { private String indexName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 76c5a4cbd669c..c6a9d3530a8cc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -32,10 +32,11 @@ package org.opensearch.action.admin.indices.template.post; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.Template; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -50,8 +51,9 @@ /** * Contains the information on what V2 templates would match a given index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulateIndexTemplateResponse extends ActionResponse implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 70af69bc77760..c1a02d813ffb2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.template.post; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -52,8 +51,9 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index d50a8b199230a..6565896fd3db2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -32,20 +32,20 @@ package org.opensearch.action.admin.indices.template.post; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.AliasValidator; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.IndicesService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java index 62fe778d3f922..d12f99ec345d3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java @@ -34,13 +34,13 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComponentTemplate; import org.opensearch.common.Nullable; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index 63c3b492ff705..ed209e18b64ef 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -36,15 +36,16 @@ import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; import java.util.Objects; @@ -54,8 +55,9 @@ /** * An action for putting a composable template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutComposableIndexTemplateAction extends ActionType<AcknowledgedResponse> { public static final PutComposableIndexTemplateAction INSTANCE = new PutComposableIndexTemplateAction(); @@ -68,8 +70,9 @@ private PutComposableIndexTemplateAction() { /** * A request for putting a single index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Request extends ClusterManagerNodeRequest<Request> implements IndicesRequest { private final String name; @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 099f7c34ff818..d4e9200508bfa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -41,11 +41,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -54,6 +50,10 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -79,8 +79,9 @@ /** * A request to create an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequest extends ClusterManagerNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, @@ -226,8 +227,8 @@ public PutIndexTemplateRequest settings(Settings.Builder settings) { /** * The settings to create the index template with (either json/yaml format). */ - public PutIndexTemplateRequest settings(String source, XContentType xContentType) { - this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + public PutIndexTemplateRequest settings(String source, MediaType mediaType) { + this.settings = Settings.builder().loadFromSource(source, mediaType).build(); return this; } @@ -298,7 +299,7 @@ public PutIndexTemplateRequest mapping(Map<String, Object> source) { try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.map(source); - mappings = Strings.toString(builder); + mappings = builder.toString(); return this; } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + source + "]", e); @@ -398,15 +399,15 @@ public PutIndexTemplateRequest source(String templateSource, XContentType xConte /** * The template source definition. */ - public PutIndexTemplateRequest source(byte[] source, XContentType xContentType) { - return source(source, 0, source.length, xContentType); + public PutIndexTemplateRequest source(byte[] source, MediaType mediaType) { + return source(source, 0, source.length, mediaType); } /** * The template source definition. */ - public PutIndexTemplateRequest source(byte[] source, int offset, int length, XContentType xContentType) { - return source(new BytesArray(source, offset, length), xContentType); + public PutIndexTemplateRequest source(byte[] source, int offset, int length, MediaType mediaType) { + return source(new BytesArray(source, offset, length), mediaType); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index eae2f9d9c94e0..931d12de574ae 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -32,13 +32,15 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; import java.util.List; import java.util.Map; @@ -46,8 +48,9 @@ /** * A request builder for putting an index template into the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< PutIndexTemplateRequest, AcknowledgedResponse, @@ -113,8 +116,8 @@ public PutIndexTemplateRequestBuilder setSettings(Settings.Builder settings) { /** * The settings to crete the index template with (either json or yaml format) */ - public PutIndexTemplateRequestBuilder setSettings(String source, XContentType xContentType) { - request.settings(source, xContentType); + public PutIndexTemplateRequestBuilder setSettings(String source, MediaType mediaType) { + request.settings(source, mediaType); return this; } @@ -130,10 +133,10 @@ public PutIndexTemplateRequestBuilder setSettings(Map<String, Object> source) { * Adds mapping that will be added when the index template gets created. * * @param source The mapping source - * @param xContentType The type/format of the source + * @param mediaType The type/format of the source */ - public PutIndexTemplateRequestBuilder setMapping(String source, XContentType xContentType) { - request.mapping(source, xContentType); + public PutIndexTemplateRequestBuilder setMapping(String source, MediaType mediaType) { + request.mapping(source, mediaType); return this; } @@ -226,16 +229,16 @@ public PutIndexTemplateRequestBuilder setSource(Map<String, Object> templateSour /** * The template source definition. */ - public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource, XContentType xContentType) { - request.source(templateSource, xContentType); + public PutIndexTemplateRequestBuilder setSource(BytesReference templateSource, MediaType mediaType) { + request.source(templateSource, mediaType); return this; } /** * The template source definition. */ - public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, XContentType xContentType) { - request.source(templateSource, xContentType); + public PutIndexTemplateRequestBuilder setSource(byte[] templateSource, MediaType mediaType) { + request.source(templateSource, mediaType); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index e67062123ae16..6eb87bee9ffa7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -32,10 +32,9 @@ package org.opensearch.action.admin.indices.template.put; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -46,9 +45,10 @@ import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 2cc8dfbefed82..8a31c36d723b4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -32,18 +32,18 @@ package org.opensearch.action.admin.indices.template.put; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 826aa888dbacf..4431949c2e42b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -34,10 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -46,9 +45,10 @@ import org.opensearch.cluster.metadata.MetadataIndexTemplateService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java index 32cf6d4bfe70d..e760067e9f5be 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.util.Arrays; @@ -40,8 +41,9 @@ /** * Status for an Index Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index 4513a321e2a51..fd8ddc1293aaf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -32,6 +32,8 @@ package org.opensearch.action.admin.indices.upgrade.get; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -41,8 +43,9 @@ /** * Status for an Index Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexUpgradeStatus implements Iterable<IndexShardUpgradeStatus> { private final String index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java index 57fb2513faf78..783b44ba6570d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.broadcast.BroadcastShardResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -42,8 +43,9 @@ /** * Status for a Shard Upgrade * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardUpgradeStatus extends BroadcastShardResponse { private ShardRouting shardRouting; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index 1d21ed8569e2f..a171355fb2049 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -34,7 +34,6 @@ import org.opensearch.Version; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -44,6 +43,7 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Segment; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java index 258010d29e828..2584ab6b370da 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -33,16 +33,18 @@ package org.opensearch.action.admin.indices.upgrade.get; import org.opensearch.action.support.broadcast.BroadcastRequest; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; /** * Transport Request for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequest extends BroadcastRequest<UpgradeStatusRequest> { public UpgradeStatusRequest() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index c698c38fe12d5..ac5f881c35dc5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport Request Builder for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder< UpgradeStatusRequest, UpgradeStatusResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 7fe663a347ee3..ba2915ee4ddf1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -32,11 +32,12 @@ package org.opensearch.action.admin.indices.upgrade.get; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -50,8 +51,9 @@ /** * Transport Response for retrieving status of upgrading an Index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 5b6bd7414b154..2dcd030093a67 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -33,10 +33,8 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.PrimaryMissingActionException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; @@ -51,6 +49,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index a73110c781ba7..286724d78bb63 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -35,10 +35,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -47,6 +46,7 @@ import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java index 98c307c37ea54..4df02ad7fa8c0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ * @see org.opensearch.client.IndicesAdminClient#upgrade(UpgradeRequest) * @see UpgradeResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequest extends BroadcastRequest<UpgradeRequest> { /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java index 8203f9d51b8e4..bf316504dc920 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or * {@code null} for the indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder<UpgradeRequest, UpgradeResponse, UpgradeRequestBuilder> { public UpgradeRequestBuilder(OpenSearchClient client, UpgradeAction action) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java index f6d2bac786b5f..4bdf41dabba37 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -33,9 +33,10 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.Version; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * A response for the upgrade action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpgradeResponse extends BroadcastResponse { private final Map<String, Tuple<Version, String>> versions; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java index 248cab5e40eaf..84aa687af0fe1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/QueryExplanation.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.indices.validate.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Query Explanation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryExplanation implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 9e1e3c6b1e1e7..fb6e78d3ca4fc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -36,8 +36,8 @@ import org.opensearch.action.support.broadcast.BroadcastShardRequest; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.query.QueryBuilder; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.internal.AliasFilter; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 7919b16aa3af3..2b46fbcb9d105 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -34,9 +34,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.opensearch.action.support.broadcast.TransportBroadcastAction; import org.opensearch.cluster.ClusterState; @@ -46,11 +44,13 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.Randomness; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java index a3fa560b78d65..94bec696dd2c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -37,11 +37,12 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.common.Strings; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -53,8 +54,9 @@ * <p> * The request requires the query to be set using {@link #query(QueryBuilder)} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest> implements ToXContentObject { private QueryBuilder query = new MatchAllQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index 6209f41d88be2..de4cf5ae2b904 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport Request Builder to Validate a Query * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder< ValidateQueryRequest, ValidateQueryResponse, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 1d73d4821f2b4..791128491f33d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -32,9 +32,10 @@ package org.opensearch.action.admin.indices.validate.query; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -52,8 +53,9 @@ /** * The response of the validate action. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValidateQueryResponse extends BroadcastResponse { public static final String VALID_FIELD = "valid"; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java new file mode 100644 index 0000000000000..9faf25ce10732 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/CreateViewAction.java @@ -0,0 +1,279 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** Action to create a view */ +@ExperimentalApi +public class CreateViewAction extends ActionType<GetViewAction.Response> { + + private static final int MAX_NAME_LENGTH = 64; + private static final int MAX_DESCRIPTION_LENGTH = 256; + private static final int MAX_TARGET_COUNT = 25; + private static final int MAX_TARGET_INDEX_PATTERN_LENGTH = 64; + + public static final CreateViewAction INSTANCE = new CreateViewAction(); + public static final String NAME = "cluster:admin/views/create"; + + private CreateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** + * Request for Creating View + */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + private final String description; + private final List<Target> targets; + + public Request(final String name, final String description, final List<Target> targets) { + this.name = name; + this.description = Objects.requireNonNullElse(description, ""); + this.targets = targets; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.description = in.readString(); + this.targets = in.readList(Target::new); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public List<Target> getTargets() { + return new ArrayList<>(targets); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return name.equals(that.name) && description.equals(that.description) && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, targets); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + if (name != null && name.length() > MAX_NAME_LENGTH) { + validationException = ValidateActions.addValidationError( + "name must be less than " + MAX_NAME_LENGTH + " characters in length", + validationException + ); + } + if (description != null && description.length() > MAX_DESCRIPTION_LENGTH) { + validationException = ValidateActions.addValidationError( + "description must be less than " + MAX_DESCRIPTION_LENGTH + " characters in length", + validationException + ); + } + if (CollectionUtils.isEmpty(targets)) { + validationException = ValidateActions.addValidationError("targets cannot be empty", validationException); + } else { + if (targets.size() > MAX_TARGET_COUNT) { + validationException = ValidateActions.addValidationError( + "view cannot have more than " + MAX_TARGET_COUNT + " targets", + validationException + ); + } + for (final Target target : targets) { + final var validationMessages = Optional.ofNullable(target.validate()) + .map(ValidationException::validationErrors) + .orElse(List.of()); + for (final String validationMessage : validationMessages) { + validationException = ValidateActions.addValidationError(validationMessage, validationException); + } + } + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeString(description); + out.writeList(targets); + } + + /** View target representation for create requests */ + @ExperimentalApi + public static class Target implements Writeable { + public final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = indexPattern; + } + + public Target(final StreamInput in) throws IOException { + this.indexPattern = in.readString(); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (Strings.isNullOrEmpty(indexPattern)) { + validationException = ValidateActions.addValidationError("index pattern cannot be empty or null", validationException); + } + if (indexPattern != null && indexPattern.length() > MAX_TARGET_INDEX_PATTERN_LENGTH) { + validationException = ValidateActions.addValidationError( + "target index pattern must be less than " + MAX_TARGET_INDEX_PATTERN_LENGTH + " characters in length", + validationException + ); + } + + return validationException; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.Target.INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "create_view_request", + args -> new Request((String) args[0], (String) args[1], (List<Target>) args[2]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), View.TARGETS_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for creating a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.createView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java new file mode 100644 index 0000000000000..abb3c3f4db5f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to delete a view */ +@SuppressWarnings("deprecation") +@ExperimentalApi +public class DeleteViewAction extends ActionType<AcknowledgedResponse> { + + public static final DeleteViewAction INSTANCE = new DeleteViewAction(); + public static final String NAME = "cluster:admin/views/delete"; + + public DeleteViewAction() { + super(NAME, AcknowledgedResponse::new); + } + + /** Request for delete view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "delete_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for deleting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, AcknowledgedResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected AcknowledgedResponse read(final StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected void clusterManagerOperation( + final Request request, + final ClusterState state, + final ActionListener<AcknowledgedResponse> listener + ) throws Exception { + viewService.deleteView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java new file mode 100644 index 0000000000000..762eea965c8c1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/GetViewAction.java @@ -0,0 +1,214 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.ValidateActions; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContent.Params; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** Action to get a view */ +@ExperimentalApi +public class GetViewAction extends ActionType<GetViewAction.Response> { + + public static final GetViewAction INSTANCE = new GetViewAction(); + public static final String NAME = "views:data/read/get"; + + public GetViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for get view */ + @ExperimentalApi + public static class Request extends ClusterManagerNodeRequest<Request> { + private final String name; + + public Request(final String name) { + this.name = name; + } + + public Request(final StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(name)) { + validationException = ValidateActions.addValidationError("name cannot be empty or null", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>( + "get_view_request", + args -> new Request((String) args[0]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), View.NAME_FIELD); + } + + public static Request fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** Response with a view */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final View view; + + public Response(final View view) { + this.view = view; + } + + public Response(final StreamInput in) throws IOException { + super(in); + this.view = new View(in); + } + + public View getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return getView().equals(that.getView()); + } + + @Override + public int hashCode() { + return Objects.hash(getView()); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + this.view.writeTo(out); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("view", view); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Response, Void> PARSER = new ConstructingObjectParser<>( + "view_response", + args -> new Response((View) args[0]) + ); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), View.PARSER, new ParseField("view")); + } + + public static Response fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends TransportClusterManagerNodeAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super(NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected Response read(final StreamInput in) throws IOException { + return new Response(in); + } + + @Override + protected void clusterManagerOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) + throws Exception { + viewService.getView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java new file mode 100644 index 0000000000000..eac0b1d5558ca --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ListViewNamesAction.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** Action to list a view names */ +@ExperimentalApi +public class ListViewNamesAction extends ActionType<ListViewNamesAction.Response> { + + public static final ListViewNamesAction INSTANCE = new ListViewNamesAction(); + public static final String NAME = "views:data/read/list"; + + public ListViewNamesAction() { + super(NAME, ListViewNamesAction.Response::new); + } + + /** Request for list view names */ + @ExperimentalApi + public static class Request extends ActionRequest { + public Request() {} + + public Request(final StreamInput in) {} + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request that = (Request) o; + return true; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + /** Response for list view names */ + @ExperimentalApi + public static class Response extends ActionResponse implements ToXContentObject { + + private final List<String> views; + + public Response(final List<String> views) { + this.views = views; + } + + public Response(final StreamInput in) throws IOException { + views = in.readStringList(); + } + + public List<String> getViewNames() { + return views; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return views.equals(that.views); + } + + @Override + public int hashCode() { + return Objects.hash(views); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeStringCollection(views); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field("views", views); + builder.endObject(); + return builder; + } + } + + /** + * Transport Action for getting a View + */ + public static class TransportAction extends HandledTransportAction<Request, Response> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(Task task, Request request, ActionListener<Response> listener) { + viewService.listViewNames(listener); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java new file mode 100644 index 0000000000000..1e20221242f06 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/SearchViewAction.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Function; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** Action to create a view */ +@ExperimentalApi +public class SearchViewAction extends ActionType<SearchResponse> { + + public static final SearchViewAction INSTANCE = new SearchViewAction(); + public static final String NAME = "views:data/read/search"; + + private SearchViewAction() { + super(NAME, SearchResponse::new); + } + + /** + * Wraps the functionality of search requests and tailors for what is available + * when searching through views + */ + @ExperimentalApi + public static class Request extends SearchRequest { + + private final String view; + + public Request(final String view, final SearchRequest searchRequest) { + super(searchRequest); + this.view = view; + } + + public Request(final StreamInput in) throws IOException { + super(in); + view = in.readString(); + } + + public String getView() { + return view; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request that = (Request) o; + return view.equals(that.view) && super.equals(that); + } + + @Override + public int hashCode() { + return Objects.hash(view, super.hashCode()); + } + + @Override + public ActionRequestValidationException validate() { + final Function<String, String> unsupported = (String x) -> x + " is not supported when searching views"; + ActionRequestValidationException validationException = super.validate(); + + if (scroll() != null) { + validationException = addValidationError(unsupported.apply("Scroll"), validationException); + } + + // TODO: Filter out any additional search features that are not supported. + // Required before removing @ExperimentalApi annotations. + + if (Strings.isNullOrEmpty(view)) { + validationException = addValidationError("View is required", validationException); + } + + return validationException; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(view); + } + + @Override + public String toString() { + return super.toString().replace("SearchRequest{", "SearchViewAction.Request{view=" + view + ","); + } + } + + /** + * Transport Action for searching a View + */ + public static class TransportAction extends HandledTransportAction<Request, SearchResponse> { + + private final ViewService viewService; + + @Inject + public TransportAction(final TransportService transportService, final ActionFilters actionFilters, final ViewService viewService) { + super(NAME, transportService, actionFilters, Request::new); + this.viewService = viewService; + } + + @Override + protected void doExecute(final Task task, final Request request, final ActionListener<SearchResponse> listener) { + viewService.searchView(request, listener); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java new file mode 100644 index 0000000000000..9182684c73a0b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/UpdateViewAction.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** Action to update a view */ +@ExperimentalApi +public class UpdateViewAction extends ActionType<GetViewAction.Response> { + + public static final UpdateViewAction INSTANCE = new UpdateViewAction(); + public static final String NAME = "cluster:admin/views/update"; + + public UpdateViewAction() { + super(NAME, GetViewAction.Response::new); + } + + /** Request for update view */ + @ExperimentalApi + public static class Request { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<CreateViewAction.Request, String> PARSER = new ConstructingObjectParser<>( + "create_view_request", + false, + (args, viewName) -> new CreateViewAction.Request(viewName, (String) args[0], (List<CreateViewAction.Request.Target>) args[1]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), View.DESCRIPTION_FIELD); + PARSER.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> CreateViewAction.Request.Target.fromXContent(p), + View.TARGETS_FIELD + ); + } + + public static CreateViewAction.Request fromXContent(final XContentParser parser, final String viewName) throws IOException { + return PARSER.parse(parser, viewName); + } + } + + /** + * Transport Action for updating a View + */ + @ExperimentalApi + public static class TransportAction extends TransportClusterManagerNodeAction<CreateViewAction.Request, GetViewAction.Response> { + + private final ViewService viewService; + + @Inject + public TransportAction( + final TransportService transportService, + final ClusterService clusterService, + final ThreadPool threadPool, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final ViewService viewService + ) { + super( + NAME, + transportService, + clusterService, + threadPool, + actionFilters, + CreateViewAction.Request::new, + indexNameExpressionResolver + ); + this.viewService = viewService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetViewAction.Response read(final StreamInput in) throws IOException { + return new GetViewAction.Response(in); + } + + @Override + protected void clusterManagerOperation( + final CreateViewAction.Request request, + final ClusterState state, + final ActionListener<GetViewAction.Response> listener + ) throws Exception { + viewService.updateView(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(final CreateViewAction.Request request, final ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java new file mode 100644 index 0000000000000..90a69158286b4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewAlreadyExistsException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceAlreadyExistsException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view already exists */ +@ExperimentalApi +public class ViewAlreadyExistsException extends ResourceAlreadyExistsException { + + public ViewAlreadyExistsException(final String viewName) { + super("View [{}] already exists", viewName); + } + + public ViewAlreadyExistsException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java new file mode 100644 index 0000000000000..3a90e6b0bc791 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewNotFoundException.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; + +import java.io.IOException; + +/** Exception thrown when a view is not found */ +@ExperimentalApi +public class ViewNotFoundException extends ResourceNotFoundException { + + public ViewNotFoundException(final String viewName) { + super("View [{}] does not exist", viewName); + } + + public ViewNotFoundException(final StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java new file mode 100644 index 0000000000000..294f88decba1f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.action.ActionListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeSet; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** Service to interact with views, create, retrieve, update, and delete */ +@ExperimentalApi +public class ViewService { + + private final static Logger LOG = LogManager.getLogger(ViewService.class); + private final ClusterService clusterService; + private final NodeClient client; + private final LongSupplier timeProvider; + + public ViewService(final ClusterService clusterService, final NodeClient client, final LongSupplier timeProvider) { + this.clusterService = clusterService; + this.client = client; + this.timeProvider = Optional.ofNullable(timeProvider).orElse(System::currentTimeMillis); + } + + public void createView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final long currentTime = timeProvider.getAsLong(); + + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View view = new View(request.getName(), request.getDescription(), currentTime, currentTime, new TreeSet<>(targets)); + + createOrUpdateView(Operation.CreateView, view, listener); + } + + public void updateView(final CreateViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View originalView = getViewOrThrowException(request.getName()); + + final long currentTime = timeProvider.getAsLong(); + final List<View.Target> targets = request.getTargets() + .stream() + .map(target -> new View.Target(target.getIndexPattern())) + .collect(Collectors.toList()); + final View updatedView = new View( + request.getName(), + request.getDescription(), + originalView.getCreatedAt(), + currentTime, + new TreeSet<>(targets) + ); + + createOrUpdateView(Operation.UpdateView, updatedView, listener); + } + + public void deleteView(final DeleteViewAction.Request request, final ActionListener<AcknowledgedResponse> listener) { + getViewOrThrowException(request.getName()); + + clusterService.submitStateUpdateTask("delete_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + return new ClusterState.Builder(clusterService.state()).metadata( + Metadata.builder(currentState.metadata()).removeView(request.getName()) + ).build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to delete view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + listener.onResponse(new AcknowledgedResponse(true)); + } + }); + } + + public void getView(final GetViewAction.Request request, final ActionListener<GetViewAction.Response> listener) { + final View view = getViewOrThrowException(request.getName()); + + listener.onResponse(new GetViewAction.Response(view)); + } + + public void listViewNames(final ActionListener<ListViewNamesAction.Response> listener) { + final List<String> viewNames = new ArrayList<>( + Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(Map::keySet) + .orElseThrow() + ); + + listener.onResponse(new ListViewNamesAction.Response(viewNames)); + } + + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + final View view = getViewOrThrowException(request.getView()); + + final String[] indices = view.getTargets().stream().map(View.Target::getIndexPattern).toArray(String[]::new); + request.indices(indices); + + client.executeLocally(SearchAction.INSTANCE, request, listener); + } + + View getViewOrThrowException(final String viewName) { + return Optional.ofNullable(clusterService) + .map(ClusterService::state) + .map(ClusterState::metadata) + .map(Metadata::views) + .map(views -> views.get(viewName)) + .orElseThrow(() -> new ViewNotFoundException(viewName)); + } + + private enum Operation { + CreateView("create", false), + UpdateView("update", true); + + private final String name; + private final boolean allowOverriding; + + Operation(final String name, final boolean allowOverriding) { + this.name = name; + this.allowOverriding = allowOverriding; + } + } + + private void createOrUpdateView(final Operation operation, final View view, final ActionListener<GetViewAction.Response> listener) { + clusterService.submitStateUpdateTask(operation.name + "_view_task", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(final ClusterState currentState) throws Exception { + if (!operation.allowOverriding && currentState.metadata().views().containsKey(view.getName())) { + throw new ViewAlreadyExistsException(view.getName()); + } + return new ClusterState.Builder(clusterService.state()).metadata(Metadata.builder(currentState.metadata()).put(view)) + .build(); + } + + @Override + public void onFailure(final String source, final Exception e) { + LOG.error("Unable to " + operation.name + " view, from " + source, e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { + final View createdView = newState.getMetadata().views().get(view.getName()); + final GetViewAction.Response response = new GetViewAction.Response(createdView); + listener.onResponse(response); + } + }); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java new file mode 100644 index 0000000000000..db0556b1bf334 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** View transport handlers. */ +package org.opensearch.action.admin.indices.view; diff --git a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java index 0d6d122e31261..25a2c081f8441 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java @@ -40,9 +40,9 @@ /** * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. - * + * <p> * Notes for implementing custom subclasses: - * + * <p> * The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although * the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following * semantics: @@ -241,7 +241,7 @@ private static class ExponentialEqualJitterBackoffIterator implements Iterator<T /** * Retry limit to avoids integer overflow issues. * Post this limit, max delay will be returned with Equal Jitter. - * + * <p> * NOTE: If the value is greater than 30, there can be integer overflow * issues during delay calculation. **/ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java index 2a85b7abb741a..c0e0a5d8532b6 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemRequest.java @@ -36,12 +36,12 @@ import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.action.DocWriteRequest; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import java.io.IOException; import java.util.Objects; @@ -114,7 +114,7 @@ public void abort(String index, Exception cause) { setPrimaryResponse(new BulkItemResponse(id, request.opType(), failure)); } else { assert primaryResponse.isFailed() && primaryResponse.getFailure().isAborted() : "response [" - + Strings.toString(XContentType.JSON, primaryResponse) + + Strings.toString(MediaTypeRegistry.JSON, primaryResponse) + "]; cause [" + cause + "]"; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 26cd318e7a280..81e8940f1b505 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -41,35 +41,37 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.StatusToXContentObject; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; -import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.opensearch.core.xcontent.XContentParserUtils.throwUnknownField; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.core.xcontent.XContentParserUtils.throwUnknownField; /** * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message in case it failed). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; @@ -179,8 +181,9 @@ public static BulkItemResponse fromXContent(XContentParser parser, int id) throw /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; public static final String ID_FIELD = "id"; @@ -209,7 +212,7 @@ public static class Failure implements Writeable, ToXContentFragment { /** * For write failures before operation was assigned a sequence number. - * + * <p> * use @{link {@link #Failure(String, String, Exception, long, long)}} * to record operation sequence no with failure */ @@ -368,7 +371,7 @@ public static Failure fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index 896456089ee3e..4e770f5851bc6 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -232,6 +232,7 @@ public void resetForExecutionForRetry() { currentItemState = ItemProcessingState.INITIAL; requestToExecute = null; executionResult = null; + retryCounter++; assertInvariants(ItemProcessingState.INITIAL); } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java index 4695b44c4986b..141ec24fc390f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java @@ -32,19 +32,20 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.client.Client; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -184,7 +185,7 @@ public Builder setGlobalPipeline(String globalPipeline) { /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). - * + * <p> * The default is to back off exponentially. * * @see org.opensearch.action.bulk.BackoffPolicy#exponentialBackoff() @@ -457,17 +458,13 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, XCo /** * Adds the data from the bytes to be processed by the bulk processor */ - public BulkProcessor add( - BytesReference data, - @Nullable String defaultIndex, - @Nullable String defaultPipeline, - XContentType xContentType - ) throws Exception { + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultPipeline, MediaType mediaType) + throws Exception { Tuple<BulkRequest, Long> bulkRequestToExecute = null; lock.lock(); try { ensureOpen(); - bulkRequest.add(data, defaultIndex, null, null, defaultPipeline, null, true, xContentType); + bulkRequest.add(data, defaultIndex, null, null, defaultPipeline, null, true, mediaType); bulkRequestToExecute = newBulkRequestIfNeeded(); } finally { lock.unlock(); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index a3294e160659b..47abd0337fcf9 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -45,12 +45,13 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -67,12 +68,13 @@ /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s * and allows to executes it in a single batch. - * + * <p> * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest<BulkRequest>, Accountable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkRequest.class); @@ -123,7 +125,7 @@ public BulkRequest add(DocWriteRequest<?>... requests) { /** * Add a request to the current BulkRequest. - * + * <p> * Note for internal callers: This method does not respect all global parameters. * Only the global index is applied to the request objects. * Global parameters would be respected if the request was serialized for a REST call as it is @@ -347,7 +349,7 @@ public final BulkRequest timeout(TimeValue timeout) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + * <p> * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} @@ -364,7 +366,7 @@ public final BulkRequest pipeline(String globalPipeline) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + * <p> - {@link BulkRequest#add(IndexRequest)} - {@link BulkRequest#add(UpdateRequest)} - {@link BulkRequest#add(DocWriteRequest)} @@ -404,7 +406,7 @@ public Boolean requireAlias() { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + * <p> * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java index 08eee82a53cf9..a165d186d3878 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestBuilder.java @@ -44,6 +44,7 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.MediaType; @@ -52,8 +53,9 @@ * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse> implements WriteRequestBuilder<BulkRequestBuilder> { public BulkRequestBuilder(OpenSearchClient client, BulkAction action, @Nullable String globalIndex) { diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestHandler.java index 541d29102ecd2..ff40a12cb9087 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestHandler.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.Scheduler; import java.util.concurrent.CountDownLatch; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 3a86ccd578991..3fadfe5f2cd6a 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -37,16 +37,17 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -105,7 +106,7 @@ private static BytesReference sliceTrimmingCarriageReturn( MediaType mediaType ) { final int length; - if (XContentType.JSON == mediaType && bytesReference.get(nextMarker - 1) == (byte) '\r') { + if (MediaTypeRegistry.JSON == mediaType && bytesReference.get(nextMarker - 1) == (byte) '\r') { length = nextMarker - from - 1; } else { length = nextMarker - from; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java index f013749e380f0..6b70e2acd41d2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java @@ -32,14 +32,15 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionResponse; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -56,8 +57,9 @@ * bulk requests. Each item holds the index/type/id is operated on, and if it failed or not (with the * failure message). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse>, StatusToXContentObject { private static final String ITEMS = "items"; diff --git a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java index 0f2e4f1b00763..c0b6d7049a0e9 100644 --- a/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java +++ b/server/src/main/java/org/opensearch/action/bulk/MappingUpdatePerformer.java @@ -32,9 +32,9 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionListener; -import org.opensearch.index.mapper.Mapping; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.mapper.Mapping; /** * Updates the mappings on the cluster manager diff --git a/server/src/main/java/org/opensearch/action/bulk/Retry.java b/server/src/main/java/org/opensearch/action/bulk/Retry.java index 754ee9ba4dddc..338355e214eb8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/Retry.java +++ b/server/src/main/java/org/opensearch/action/bulk/Retry.java @@ -31,11 +31,11 @@ package org.opensearch.action.bulk; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.opensearch.action.ActionListener; +import org.apache.logging.log4j.Logger; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index f6ca9022a5bff..4a9b07c12821d 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -36,11 +36,9 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SparseFixedBitSet; import org.opensearch.ExceptionsHelper; -import org.opensearch.core.Assertions; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; @@ -69,20 +67,29 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AtomicArray; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndexClosedException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -129,7 +136,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul private final IndexNameExpressionResolver indexNameExpressionResolver; private static final String DROPPED_ITEM_WITH_AUTO_GENERATED_ID = "auto-generated"; private final IndexingPressureService indexingPressureService; + private final IndicesService indicesService; private final SystemIndices systemIndices; + private final Tracer tracer; @Inject public TransportBulkAction( @@ -143,7 +152,9 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + IndicesService indicesService, + SystemIndices systemIndices, + Tracer tracer ) { this( threadPool, @@ -156,8 +167,10 @@ public TransportBulkAction( indexNameExpressionResolver, autoCreateIndex, indexingPressureService, + indicesService, systemIndices, - System::nanoTime + System::nanoTime, + tracer ); } @@ -172,8 +185,10 @@ public TransportBulkAction( IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, IndexingPressureService indexingPressureService, + IndicesService indicesService, SystemIndices systemIndices, - LongSupplier relativeTimeProvider + LongSupplier relativeTimeProvider, + Tracer tracer ) { super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, ThreadPool.Names.SAME); Objects.requireNonNull(relativeTimeProvider); @@ -187,8 +202,10 @@ public TransportBulkAction( this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexingPressureService = indexingPressureService; + this.indicesService = indicesService; this.systemIndices = systemIndices; clusterService.addStateApplier(this.ingestForwarder); + this.tracer = tracer; } /** @@ -595,14 +612,25 @@ protected void doRun() { } if (requestsByShard.isEmpty()) { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); + BulkItemResponse[] response = responses.toArray(new BulkItemResponse[responses.length()]); + long tookMillis = buildTookInMillis(startTimeNanos); + + DocStatusStats stats = new DocStatusStats(); + for (BulkItemResponse itemResponse : response) { + if (itemResponse != null) { + stats.inc(itemResponse.status()); + } + } + + indicesService.addDocStatusStats(stats); + listener.onResponse(new BulkResponse(response, tookMillis)); return; } final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); + final DocStatusStats docStatusStats = new DocStatusStats(); String nodeId = clusterService.localNode().getId(); + for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List<BulkItemRequest> requests = entry.getValue(); @@ -624,47 +652,66 @@ protected void doRun() { bulkShardRequest::ramBytesUsed, isOnlySystem ); - shardBulkAction.execute(bulkShardRequest, ActionListener.runBefore(new ActionListener<BulkShardResponse>() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + + final Span span = tracer.startSpan(SpanBuilder.from("bulkShardAction", nodeId, bulkShardRequest)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + shardBulkAction.execute( + bulkShardRequest, + TraceableActionListener.create(ActionListener.runBefore(new ActionListener<BulkShardResponse>() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } + + if (counter.decrementAndGet() == 0) { + finishHim(); + } } - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); - } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - DocWriteRequest<?> docWriteRequest = request.request(); - responses.set( - request.id(), - new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ) - ); - } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final DocWriteRequest<?> docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( + request.id(), + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) + ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); + } - private void finishHim() { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - } - }, releasable::close)); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); + listener.onResponse( + new BulkResponse( + responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos) + ) + ); + } + }, releasable::close), span, tracer) + ); + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } } bulkRequest = null; // allow memory for bulk request items to be reclaimed before all items have been completed } @@ -766,6 +813,10 @@ void executeBulk( final AtomicArray<BulkItemResponse> responses, Map<String, IndexNotFoundException> indicesThatCannotBeCreated ) { + /* + * We are not wrapping the listener here to capture the response codes for performance benefits. It will + * be saving us an iteration over the responses array + */ new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos, indicesThatCannotBeCreated).run(); } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 140c1320daa91..a7a13afd2597c 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.MessageSupplier; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; @@ -67,18 +66,20 @@ import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.IndexingPressureService; @@ -89,17 +90,17 @@ import org.opensearch.index.mapper.MapperException; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStorePressureService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportChannel; @@ -137,7 +138,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ private final UpdateHelper updateHelper; private final MappingUpdatedAction mappingUpdatedAction; private final SegmentReplicationPressureService segmentReplicationPressureService; - private final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; + private final RemoteStorePressureService remoteStorePressureService; /** * This action is used for performing primary term validation. With remote translog enabled, the translogs would @@ -161,8 +162,9 @@ public TransportShardBulkAction( ActionFilters actionFilters, IndexingPressureService indexingPressureService, SegmentReplicationPressureService segmentReplicationPressureService, - RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService, - SystemIndices systemIndices + RemoteStorePressureService remoteStorePressureService, + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -178,12 +180,14 @@ public TransportShardBulkAction( EXECUTOR_NAME_FUNCTION, false, indexingPressureService, - systemIndices + systemIndices, + tracer, + AdmissionControlActionType.INDEXING ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; this.segmentReplicationPressureService = segmentReplicationPressureService; - this.remoteRefreshSegmentPressureService = remoteRefreshSegmentPressureService; + this.remoteStorePressureService = remoteStorePressureService; this.transportPrimaryTermValidationAction = ACTION_NAME + "[validate_primary_term]"; @@ -539,9 +543,8 @@ protected Releasable checkPrimaryLimits(BulkShardRequest request, boolean rerout } // TODO - While removing remote store flag, this can be encapsulated to single class with common interface for backpressure // service - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) - && remoteRefreshSegmentPressureService.isSegmentsUploadBackpressureEnabled()) { - remoteRefreshSegmentPressureService.validateSegmentsUploadLag(request.shardId()); + if (remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { + remoteStorePressureService.validateSegmentsUploadLag(request.shardId()); } } return super.checkPrimaryLimits(request, rerouteWasLocal, localRerouteInitiatedByNodeClient); @@ -590,6 +593,7 @@ static boolean executeBulkItemRequest( context.setRequestToExecute(updateResult.action()); break; case NOOP: + context.getPrimary().noopUpdate(); context.markOperationAsNoOp(updateResult.action()); context.markAsCompleted(context.getExecutionResult()); return true; diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportSingleItemBulkWriteAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportSingleItemBulkWriteAction.java index 707cf62d87beb..9b901dda24c2b 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportSingleItemBulkWriteAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportSingleItemBulkWriteAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.support.ActionFilters; @@ -41,6 +40,7 @@ import org.opensearch.action.support.WriteResponse; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index aebff808286ec..d37e049d44720 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -39,13 +39,14 @@ import org.opensearch.action.DocWriteRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; @@ -64,8 +65,9 @@ * @see org.opensearch.client.Client#delete(DeleteRequest) * @see org.opensearch.client.Requests#deleteRequest(String) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> implements DocWriteRequest<DeleteRequest>, @@ -205,7 +207,7 @@ public long ifSeqNo() { /** * If set, only perform this delete request if the document was last modification was assigned this primary term. - * + * <p> * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java index 0436962ce01d2..66aa1c73042fe 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequestBuilder.java @@ -36,13 +36,15 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.VersionType; /** * A delete document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteRequestBuilder extends ReplicationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder> implements WriteRequestBuilder<DeleteRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java index 35a111e456db5..c39a787ad763d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteResponse.java @@ -33,10 +33,11 @@ package org.opensearch.action.delete; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; @@ -48,8 +49,9 @@ * @see org.opensearch.action.delete.DeleteRequest * @see org.opensearch.client.Client#delete(DeleteRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeleteResponse extends DocWriteResponse { public DeleteResponse(ShardId shardId, StreamInput in) throws IOException { @@ -112,8 +114,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link DeleteResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override diff --git a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java index 039214459ac21..6cbabfec6d763 100644 --- a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java @@ -40,7 +40,7 @@ /** * Performs the delete operation. - * + * <p> * Deprecated use TransportBulkAction with a single item instead * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java index c6792449042dc..dd9c01b04acbb 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequest.java @@ -36,10 +36,11 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -54,8 +55,9 @@ /** * Explain request encapsulating the explain query and document identifier to get an explanation for. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequest extends SingleShardRequest<ExplainRequest> implements ToXContentObject { private static final ParseField QUERY_FIELD = new ParseField("query"); diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java index 3031cb6067469..681b48f7a6593 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A builder for {@link ExplainRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<ExplainRequest, ExplainResponse, ExplainRequestBuilder> { ExplainRequestBuilder(OpenSearchClient client, ExplainAction action) { diff --git a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java index 6d0fbfd15bee6..80a8634e62d87 100644 --- a/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/opensearch/action/explain/ExplainResponse.java @@ -34,17 +34,18 @@ import org.apache.lucene.search.Explanation; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Collection; @@ -56,8 +57,9 @@ /** * Response containing the score explanation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ExplainResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField _INDEX = new ParseField("_index"); diff --git a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java index 4fd3ab8dcf389..fb2ccc6ebbf12 100644 --- a/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/opensearch/action/explain/TransportExplainAction.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -44,15 +43,16 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.Uid; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchService; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java index 8053e1f8521cf..20b9789972fff 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilities.java @@ -32,13 +32,14 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -60,8 +61,9 @@ /** * Describes the capabilities of a field optionally merged across multiple indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilities implements Writeable, ToXContentObject { private static final ParseField TYPE_FIELD = new ParseField("type"); @@ -289,7 +291,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java index c4dead2a3de25..6aa5e3c5da9a2 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java @@ -39,8 +39,8 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.query.QueryBuilder; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.QueryBuilder; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index f3ca0c4b192ed..8e39984afdc33 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index add0377d9be48..71e8db73e4ecc 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -37,9 +37,10 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; @@ -52,7 +53,10 @@ /** * Transport request for retrieving field capabilities for an explicit list of fields + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NAME = "field_caps_request"; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java index 70a90b98bdf25..c589d344089f3 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java @@ -34,13 +34,15 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; /** * Transport request builder for retrieving field capabilities * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesRequestBuilder extends ActionRequestBuilder<FieldCapabilitiesRequest, FieldCapabilitiesResponse> { public FieldCapabilitiesRequestBuilder(OpenSearchClient client, FieldCapabilitiesAction action, String... indices) { super(client, action, new FieldCapabilitiesRequest().indices(indices)); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index 550ef2d412ca4..72fdc75686e3b 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,18 +32,19 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.action.ActionResponse; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -57,8 +58,10 @@ /** * Response for {@link FieldCapabilitiesRequest} requests. * - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject { private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField FIELDS_FIELD = new ParseField("fields"); @@ -213,6 +216,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 84f13b7d22c92..4c9e10cba52e7 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -42,6 +41,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index acabc12af0e56..10bf4975311d6 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.ActionType; import org.opensearch.action.NoShardAvailableActionException; @@ -55,13 +54,14 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.logging.LoggerMessageFormat; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.query.MatchAllQueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.search.SearchService; import org.opensearch.search.builder.SearchSourceBuilder; diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 81102609f4c2d..952fa8bdab63a 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -37,10 +37,11 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.single.shard.SingleShardRequest; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -60,8 +61,9 @@ * @see org.opensearch.client.Requests#getRequest(String) * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequest extends SingleShardRequest<GetRequest> implements RealtimeRequest { private String id; diff --git a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java index 6237cf73f0ca8..f50cbb16186f7 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequestBuilder.java @@ -35,6 +35,7 @@ import org.opensearch.action.support.single.shard.SingleShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.index.VersionType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -42,8 +43,9 @@ /** * A get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetRequest, GetResponse, GetRequestBuilder> { public GetRequestBuilder(OpenSearchClient client, GetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/GetResponse.java b/server/src/main/java/org/opensearch/action/get/GetResponse.java index b713dc8a507d1..f7f7241933bd6 100644 --- a/server/src/main/java/org/opensearch/action/get/GetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/GetResponse.java @@ -33,14 +33,15 @@ package org.opensearch.action.get; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.document.DocumentField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -58,8 +59,9 @@ * @see GetRequest * @see org.opensearch.client.Client#get(GetRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetResponse extends ActionResponse implements Iterable<DocumentField>, ToXContentObject { GetResult getResult; @@ -238,6 +240,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java index 19c9b785e7ea2..09b4205ffe521 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.get; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetItemResponse implements Writeable { private final GetResponse response; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index e8d0c1b9d320f..9ec41fdca585d 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -42,14 +42,15 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -69,8 +70,9 @@ /** * Transport request for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetRequest.Item>, @@ -91,8 +93,9 @@ public class MultiGetRequest extends ActionRequest /** * A single get item. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Item implements Writeable, IndicesRequest, ToXContentObject { private String index; @@ -261,7 +264,7 @@ public int hashCode() { } public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } @@ -580,4 +583,19 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return "MultiGetRequest{" + + "preference='" + + preference + + '\'' + + ", realtime=" + + realtime + + ", refresh=" + + refresh + + ", items=" + + items + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java index c317edc07da8b..0b701c8ec11c7 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A multi get document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest, MultiGetResponse> { public MultiGetRequestBuilder(OpenSearchClient client, MultiGetAction action) { diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java index 563345caaf1f8..3c3489a065d9a 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetResponse.java @@ -34,11 +34,12 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -55,8 +56,9 @@ /** * Transport response for a multi get. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContentObject { private static final ParseField INDEX = new ParseField("_index"); @@ -71,8 +73,9 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable, ToXContentObject { private final String index; diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetShardResponse.java b/server/src/main/java/org/opensearch/action/get/MultiGetShardResponse.java index 2425282c727a6..ca020e74c7d93 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetShardResponse.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetShardResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.get; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/get/TransportGetAction.java b/server/src/main/java/org/opensearch/action/get/TransportGetAction.java index 07546034665b3..00a795c86356f 100644 --- a/server/src/main/java/org/opensearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/opensearch/action/get/TransportGetAction.java @@ -32,20 +32,22 @@ package org.opensearch.action.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.get.GetResult; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -88,16 +90,30 @@ protected boolean resolveIndex(GetRequest request) { return true; } + /** + * Returns true if GET request should be routed to primary shards, else false. + */ + protected static boolean shouldForcePrimaryRouting(Metadata metadata, boolean realtime, String preference, String indexName) { + return metadata.isSegmentReplicationEnabled(indexName) && realtime && preference == null; + } + @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { + final String preference; + // route realtime GET requests when segment replication is enabled to primary shards, + // iff there are no other preferences/routings enabled for routing to a specific shard + if (shouldForcePrimaryRouting( + state.getMetadata(), + request.request().realtime, + request.request().preference(), + request.concreteIndex() + )) { + preference = Preference.PRIMARY.type(); + } else { + preference = request.request().preference(); + } return clusterService.operationRouting() - .getShards( - clusterService.state(), - request.concreteIndex(), - request.request().id(), - request.request().routing(), - request.request().preference() - ); + .getShards(clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), preference); } @Override diff --git a/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java index 6fe73fddc27a6..8bbfef381aea8 100644 --- a/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java @@ -32,16 +32,18 @@ package org.opensearch.action.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -75,6 +77,10 @@ public TransportMultiGetAction( this.indexNameExpressionResolver = resolver; } + protected static boolean shouldForcePrimaryRouting(Metadata metadata, boolean realtime, String preference, String indexName) { + return metadata.isSegmentReplicationEnabled(indexName) && realtime && preference == null; + } + @Override protected void doExecute(Task task, final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) { ClusterState clusterState = clusterService.state(); @@ -109,6 +115,9 @@ protected void doExecute(Task task, final MultiGetRequest request, final ActionL MultiGetShardRequest shardRequest = shardRequests.get(shardId); if (shardRequest == null) { + if (shouldForcePrimaryRouting(clusterState.getMetadata(), request.realtime(), request.preference(), concreteSingleIndex)) { + request.preference(Preference.PRIMARY.type()); + } shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.getId()); shardRequests.put(shardId, shardRequest); } diff --git a/server/src/main/java/org/opensearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/opensearch/action/get/TransportShardMultiGetAction.java index b0596ac2fb448..27955098d96cd 100644 --- a/server/src/main/java/org/opensearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/opensearch/action/get/TransportShardMultiGetAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.get; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportActions; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -42,11 +41,12 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.get.GetResult; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index d686f0b460634..a5958f8b9f499 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -48,21 +48,22 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.Locale; @@ -76,22 +77,23 @@ /** * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.opensearch.client.Requests#indexRequest(String)}. - * + * <p> * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], MediaType)} to be set. - * + * <p> * The source (content to index) can be set in its bytes form using ({@link #source(byte[], MediaType)}), * its string form ({@link #source(String, MediaType)}) or using a {@link XContentBuilder} * ({@link #source(XContentBuilder)}). - * + * <p> * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse * @see org.opensearch.client.Requests#indexRequest(String) * @see org.opensearch.client.Client#index(IndexRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implements DocWriteRequest<IndexRequest>, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); @@ -159,7 +161,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { contentType = in.readMediaType(); } else { contentType = in.readEnum(XContentType.class); @@ -378,7 +380,7 @@ public IndexRequest source(Map<String, ?> source) throws OpenSearchGenerationExc */ public IndexRequest source(Map<String, ?> source, MediaType contentType) throws OpenSearchGenerationException { try { - XContentBuilder builder = XContentFactory.contentBuilder(contentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(contentType); builder.map(source); return source(builder); } catch (IOException e) { @@ -388,7 +390,7 @@ public IndexRequest source(Map<String, ?> source, MediaType contentType) throws /** * Sets the document source to index. - * + * <p> * Note, its preferable to either set it using {@link #source(XContentBuilder)} * or using the {@link #source(byte[], MediaType)}. */ @@ -434,7 +436,7 @@ public IndexRequest source(MediaType mediaType, Object... source) { ); } try { - XContentBuilder builder = XContentFactory.contentBuilder(mediaType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(mediaType); builder.startObject(); for (int i = 0; i < source.length; i++) { builder.field(source[i++].toString(), source[i]); @@ -591,7 +593,7 @@ public long ifSeqNo() { /** * If set, only perform this indexing request if the document was last modification was assigned this primary term. - * + * <p> * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ @@ -670,7 +672,7 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(autoGeneratedTimestamp); if (contentType != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { contentType.writeTo(out); } else { out.writeEnum((XContentType) contentType); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java index ca25409556398..9d4ad3c32778c 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequestBuilder.java @@ -37,9 +37,10 @@ import org.opensearch.action.support.replication.ReplicationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import java.util.Map; @@ -47,8 +48,9 @@ /** * An index document action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest, IndexResponse, IndexRequestBuilder> implements WriteRequestBuilder<IndexRequestBuilder> { @@ -82,8 +84,8 @@ public IndexRequestBuilder setRouting(String routing) { /** * Sets the source. */ - public IndexRequestBuilder setSource(BytesReference source, XContentType xContentType) { - request.source(source, xContentType); + public IndexRequestBuilder setSource(BytesReference source, MediaType mediaType) { + request.source(source, mediaType); return this; } @@ -102,7 +104,7 @@ public IndexRequestBuilder setSource(Map<String, ?> source) { * * @param source The map to index */ - public IndexRequestBuilder setSource(Map<String, ?> source, XContentType contentType) { + public IndexRequestBuilder setSource(Map<String, ?> source, MediaType contentType) { request.source(source, contentType); return this; } @@ -111,10 +113,10 @@ public IndexRequestBuilder setSource(Map<String, ?> source, XContentType content * Sets the document source to index. * <p> * Note, its preferable to either set it using {@link #setSource(XContentBuilder)} - * or using the {@link #setSource(byte[], XContentType)}. + * or using the {@link #setSource(byte[], MediaType)}. */ - public IndexRequestBuilder setSource(String source, XContentType xContentType) { - request.source(source, xContentType); + public IndexRequestBuilder setSource(String source, MediaType mediaType) { + request.source(source, mediaType); return this; } @@ -129,8 +131,8 @@ public IndexRequestBuilder setSource(XContentBuilder sourceBuilder) { /** * Sets the document to index in bytes form. */ - public IndexRequestBuilder setSource(byte[] source, XContentType xContentType) { - request.source(source, xContentType); + public IndexRequestBuilder setSource(byte[] source, MediaType mediaType) { + request.source(source, mediaType); return this; } @@ -141,10 +143,10 @@ public IndexRequestBuilder setSource(byte[] source, XContentType xContentType) { * @param source The source to index * @param offset The offset in the byte array * @param length The length of the data - * @param xContentType The type/format of the source + * @param mediaType The type/format of the source */ - public IndexRequestBuilder setSource(byte[] source, int offset, int length, XContentType xContentType) { - request.source(source, offset, length, xContentType); + public IndexRequestBuilder setSource(byte[] source, int offset, int length, MediaType mediaType) { + request.source(source, offset, length, mediaType); return this; } @@ -169,8 +171,8 @@ public IndexRequestBuilder setSource(Object... source) { * valid String representation.</b> * </p> */ - public IndexRequestBuilder setSource(XContentType xContentType, Object... source) { - request.source(xContentType, source); + public IndexRequestBuilder setSource(MediaType mediaType, Object... source) { + request.source(mediaType, source); return this; } diff --git a/server/src/main/java/org/opensearch/action/index/IndexResponse.java b/server/src/main/java/org/opensearch/action/index/IndexResponse.java index 12d788323b497..53f832fc12c43 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/opensearch/action/index/IndexResponse.java @@ -33,12 +33,13 @@ package org.opensearch.action.index; import org.opensearch.action.DocWriteResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; @@ -50,8 +51,9 @@ * @see IndexRequest * @see org.opensearch.client.Client#index(IndexRequest) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexResponse extends DocWriteResponse { public IndexResponse(ShardId shardId, StreamInput in) throws IOException { @@ -90,7 +92,7 @@ public String toString() { builder.append(",result=").append(getResult().getLowercase()); builder.append(",seqNo=").append(getSeqNo()); builder.append(",primaryTerm=").append(getPrimaryTerm()); - builder.append(",shards=").append(Strings.toString(XContentType.JSON, getShardInfo())); + builder.append(",shards=").append(Strings.toString(MediaTypeRegistry.JSON, getShardInfo())); return builder.append("]").toString(); } @@ -116,8 +118,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link Builder#build()} method is called to * instantiate the {@link IndexResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { @Override public IndexResponse build() { diff --git a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java index fe4f80bf0c065..ce32840f6751b 100644 --- a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java @@ -40,7 +40,7 @@ /** * Performs the index operation. - * + * <p> * Allows for the following settings: * <ul> * <li><b>autoCreateIndex</b>: When set to {@code true}, will automatically create an index if one does not exists. diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index 2f05ce3a25320..b9d916e152c3d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -43,8 +44,9 @@ /** * transport request to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequest extends AcknowledgedRequest<DeletePipelineRequest> { private String id; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index 6a2eb494e8d3f..bc253db85bb0f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to delete a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeletePipelineRequestBuilder extends ActionRequestBuilder<DeletePipelineRequest, AcknowledgedResponse> { public DeletePipelineRequestBuilder(OpenSearchClient client, DeletePipelineAction action) { diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java index 86c4c09303c15..fe68f06d0d32e 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java @@ -32,15 +32,15 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.ingest.IngestService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java index bbe56aa0ff9dd..c7266c31a5022 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java @@ -34,17 +34,19 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; /** * transport request to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequest extends ClusterManagerNodeReadRequest<GetPipelineRequest> { private String[] ids; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java index bdc13523ffdc6..593ea2156d5e3 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * Transport request builder to get a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetPipelineRequest, GetPipelineResponse, diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java index a26fa413b2f5b..0719842bc985f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineResponse.java @@ -32,18 +32,19 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.StatusToXContentObject; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.ingest.PipelineConfiguration; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -57,8 +58,9 @@ /** * transport response for getting a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject { private List<PipelineConfiguration> pipelines; @@ -172,7 +174,7 @@ public boolean equals(Object other) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java index 1283a68a3ea5f..80333c7346f92 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -41,6 +40,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.ingest.IngestService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java index 830cb8d6fac42..2821f4fd7fadb 100644 --- a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java +++ b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java @@ -32,21 +32,21 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionType; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Randomness; +import org.opensearch.core.action.ActionListener; import org.opensearch.transport.TransportService; import java.util.concurrent.atomic.AtomicInteger; /** * A utility for forwarding ingest requests to ingest nodes in a round-robin fashion. - * + * <p> * TODO: move this into IngestService and make index/bulk actions call that * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index 7a88f817c70bf..06e89b5f2908b 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; @@ -49,8 +50,9 @@ /** * transport request to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest> implements ToXContentObject { private String id; @@ -70,7 +72,7 @@ public PutPipelineRequest(StreamInput in) throws IOException { super(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -101,7 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index d69165b280063..e8d6a4d332319 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -35,14 +35,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to put a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PutPipelineRequestBuilder extends ActionRequestBuilder<PutPipelineRequest, AcknowledgedResponse> { public PutPipelineRequestBuilder(OpenSearchClient client, PutPipelineAction action) { @@ -54,8 +56,8 @@ public PutPipelineRequestBuilder( PutPipelineAction action, String id, BytesReference source, - XContentType xContentType + MediaType mediaType ) { - super(client, action, new PutPipelineRequest(id, source, xContentType)); + super(client, action, new PutPipelineRequest(id, source, mediaType)); } } diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java index f87d832078661..e2d206e8c4f6d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java @@ -32,12 +32,11 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; @@ -46,6 +45,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.ingest.IngestInfo; import org.opensearch.ingest.IngestService; diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java index 98a03272aff42..bc338a57f762d 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentResult.java @@ -31,14 +31,16 @@ package org.opensearch.action.ingest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentObject; /** * Interface to simulate a document result * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SimulateDocumentResult extends Writeable, ToXContentObject { } diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java b/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java index 82db29c950161..c7c0f21eb0876 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateExecutionService.java @@ -32,8 +32,8 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.ingest.CompoundProcessor; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Pipeline; diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index 1ac441a1afe64..b51f25d2e62b1 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -35,11 +35,12 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -60,8 +61,9 @@ /** * transport request to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimulatePipelineRequest.class); @@ -85,7 +87,7 @@ public SimulatePipelineRequest(BytesReference source, MediaType mediaType) { id = in.readOptionalString(); verbose = in.readBoolean(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -127,7 +129,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeBoolean(verbose); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); @@ -218,7 +220,12 @@ private static List<IngestDocument> parseDocs(Map<String, Object> config) { String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); Long version = null; if (dataMap.containsKey(Metadata.VERSION.getFieldName())) { - version = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + Object versionFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + if (versionFieldValue instanceof Integer || versionFieldValue instanceof Long) { + version = ((Number) versionFieldValue).longValue(); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_version], only int or long is accepted"); + } } VersionType versionType = null; if (dataMap.containsKey(Metadata.VERSION_TYPE.getFieldName())) { @@ -228,12 +235,25 @@ private static List<IngestDocument> parseDocs(Map<String, Object> config) { } IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, document); if (dataMap.containsKey(Metadata.IF_SEQ_NO.getFieldName())) { - Long ifSeqNo = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ifSeqNo); + Object ifSeqNoFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); + if (ifSeqNoFieldValue instanceof Integer || ifSeqNoFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ((Number) ifSeqNoFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_seq_no], only int or long is accepted"); + } } if (dataMap.containsKey(Metadata.IF_PRIMARY_TERM.getFieldName())) { - Long ifPrimaryTerm = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_PRIMARY_TERM.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ifPrimaryTerm); + Object ifPrimaryTermFieldValue = ConfigurationUtils.readObject( + null, + null, + dataMap, + Metadata.IF_PRIMARY_TERM.getFieldName() + ); + if (ifPrimaryTermFieldValue instanceof Integer || ifPrimaryTermFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ((Number) ifPrimaryTermFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_primary_term], only int or long is accepted"); + } } ingestDocumentList.add(ingestDocument); } diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java index b2eda0e9485e4..2a5f281a5075c 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaType; /** * Transport request builder to simulate a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineRequestBuilder extends ActionRequestBuilder<SimulatePipelineRequest, SimulatePipelineResponse> { /** @@ -58,9 +60,9 @@ public SimulatePipelineRequestBuilder( OpenSearchClient client, SimulatePipelineAction action, BytesReference source, - XContentType xContentType + MediaType mediaType ) { - super(client, action, new SimulatePipelineRequest(source, xContentType)); + super(client, action, new SimulatePipelineRequest(source, mediaType)); } /** diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java index 9e67f791dc141..1fb3a69bd1e16 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineResponse.java @@ -33,8 +33,9 @@ package org.opensearch.action.ingest; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -54,8 +55,9 @@ /** * transport response for simulating a pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineTransportAction.java index c1dc1be97455e..4753679d370af 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineTransportAction.java @@ -32,12 +32,12 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java index 7b04f96a220bd..f8d0fd618c00f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateProcessorResult.java @@ -32,8 +32,8 @@ package org.opensearch.action.ingest; import org.opensearch.OpenSearchException; -import org.opensearch.core.ParseField; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; diff --git a/server/src/main/java/org/opensearch/action/main/MainAction.java b/server/src/main/java/org/opensearch/action/main/MainAction.java index c5cbac824ec83..28a31a92d7f16 100644 --- a/server/src/main/java/org/opensearch/action/main/MainAction.java +++ b/server/src/main/java/org/opensearch/action/main/MainAction.java @@ -44,7 +44,7 @@ public class MainAction extends ActionType<MainResponse> { public static final String NAME = "cluster:monitor/main"; public static final MainAction INSTANCE = new MainAction(); - public MainAction() { + private MainAction() { super(NAME, MainResponse::new); } } diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 30994c5cfcbd2..ad19241464848 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -34,9 +34,9 @@ import org.opensearch.Build; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; diff --git a/server/src/main/java/org/opensearch/action/main/TransportMainAction.java b/server/src/main/java/org/opensearch/action/main/TransportMainAction.java index 2916c3dd88d49..b3a075233e754 100644 --- a/server/src/main/java/org/opensearch/action/main/TransportMainAction.java +++ b/server/src/main/java/org/opensearch/action/main/TransportMainAction.java @@ -34,13 +34,13 @@ import org.opensearch.Build; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java index 879250218405c..9d60706d1f100 100644 --- a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.resync; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.action.support.replication.ReplicationResponse; @@ -43,8 +42,9 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.SequenceNumbers; @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; @@ -93,7 +94,8 @@ public TransportResyncReplicationAction( ShardStateAction shardStateAction, ActionFilters actionFilters, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -109,7 +111,8 @@ public TransportResyncReplicationAction( EXECUTOR_NAME_FUNCTION, true, /* we should never reject resync because of thread pool capacity on primary */ indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index 152e6f56668f2..0520a4a7aecec 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -37,19 +37,19 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.NoShardAvailableActionException; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.action.support.TransportActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.FailAwareWeightedRouting; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; @@ -58,6 +58,10 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.pipeline.PipelinedRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.ArrayDeque; @@ -68,6 +72,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -107,7 +112,6 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten private final AtomicInteger skippedOps = new AtomicInteger(); private final TransportSearchAction.SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; - protected final GroupShardsIterator<SearchShardIterator> toSkipShardsIts; protected final GroupShardsIterator<SearchShardIterator> shardsIts; private final int expectedTotalOps; @@ -115,6 +119,11 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten private final int maxConcurrentRequestsPerNode; private final Map<String, PendingExecutions> pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private final SearchRequestContext searchRequestContext; + private final Tracer tracer; + + private SearchPhase currentPhase; + private boolean currentPhaseHasLifecycle; private final List<Releasable> releasables = new ArrayList<>(); @@ -135,7 +144,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten SearchTask task, SearchPhaseResults<Result> resultConsumer, int maxConcurrentRequestsPerNode, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext, + Tracer tracer ) { super(name); final List<SearchShardIterator> toSkipIterators = new ArrayList<>(); @@ -171,6 +182,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; + this.searchRequestContext = searchRequestContext; + this.tracer = tracer; } @Override @@ -209,11 +222,13 @@ public final void start() { 0, 0, buildTookInMillis(), + searchRequestContext.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null ) ); + onRequestEnd(searchRequestContext); return; } executePhase(this); @@ -279,6 +294,7 @@ private void performPhaseOnShard(final int shardIndex, final SearchShardIterator Runnable r = () -> { final Thread thread = Thread.currentThread(); try { + final SearchPhase phase = this; executePhaseOnShard(shardIt, shard, new SearchActionListener<Result>(shard, shardIndex) { @Override public void innerOnResponse(Result result) { @@ -292,7 +308,12 @@ public void innerOnResponse(Result result) { @Override public void onFailure(Exception t) { try { - onShardFailure(shardIndex, shard, shardIt, t); + // It only happens when onPhaseDone() is called and executePhaseOnShard() fails hard with an exception. + if (totalOps.get() == expectedTotalOps) { + onPhaseFailure(phase, "The phase has failed", t); + } else { + onShardFailure(shardIndex, shard, shardIt, t); + } } finally { executeNext(pendingExecutions, thread); } @@ -371,6 +392,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha : OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); + } else { Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; @@ -419,18 +441,51 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } + onPhaseEnd(searchRequestContext); executePhase(nextPhase); } } + private void onPhaseEnd(SearchRequestContext searchRequestContext) { + if (getCurrentPhase() != null) { + long tookInNanos = System.nanoTime() - getCurrentPhase().getStartTimeInNanos(); + searchRequestContext.updatePhaseTookMap(getCurrentPhase().getName(), TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + } + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseEnd(this, searchRequestContext); + } + } + + private void onPhaseStart(SearchPhase phase) { + setCurrentPhase(phase); + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseStart(this); + } + } + + private void onRequestEnd(SearchRequestContext searchRequestContext) { + this.searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(this, searchRequestContext); + } + private void executePhase(SearchPhase phase) { - try { - phase.run(); + Span phaseSpan = tracer.startSpan(SpanCreationContext.server().name("[phase/" + phase.getName() + "]")); + try (final SpanScope scope = tracer.withSpanInScope(phaseSpan)) { + onPhaseStart(phase); + phase.recordAndRun(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } + + if (currentPhaseHasLifecycle == false) { + phaseSpan.setError(e); + } + onPhaseFailure(phase, "", e); + } finally { + if (currentPhaseHasLifecycle == false) { + phaseSpan.endSpan(); + } } } @@ -603,6 +658,20 @@ private void successfulShardExecution(SearchShardIterator shardsIt) { } } + public SearchPhase getCurrentPhase() { + return currentPhase; + } + + private void setCurrentPhase(SearchPhase phase) { + currentPhase = phase; + // The WrappingSearchAsyncActionPhase (see please CanMatchPreFilterSearchPhase as one example) is a special case + // of search phase that wraps SearchAsyncActionPhase as SearchPhase. The AbstractSearchAsyncAction manages own + // onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and the wrapping SearchPhase is being abandoned + // (fe, has no onPhaseEnd callbacks called ever). To fix that, we would not send any notifications for this + // phase. + currentPhaseHasLifecycle = ((phase instanceof WrappingSearchAsyncActionPhase) == false); + } + @Override public final int getNumShards() { return results.getNumShards(); @@ -636,6 +705,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + searchRequestContext.getPhaseTook(), failures, clusters, searchContextId @@ -668,12 +738,20 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At searchContextId = null; } } + searchRequestContext.setTotalHits(internalSearchResponse.hits().getTotalHits()); + searchRequestContext.setShardStats(results.getNumShards(), successfulOps.get(), skippedOps.get(), failures.length); + onPhaseEnd(searchRequestContext); + onRequestEnd(searchRequestContext); listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } + setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + if (currentPhaseHasLifecycle) { + this.searchRequestContext.getSearchRequestOperationsListener().onPhaseFailure(this, cause); + } raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java b/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java index c831c80b6455c..bce8d9fb2b1ca 100644 --- a/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java +++ b/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.opensearch.search.DocValueFormat; @@ -59,7 +60,7 @@ class BottomSortValuesCollector { this.reverseMuls = new int[sortFields.length]; this.sortFields = sortFields; for (int i = 0; i < sortFields.length; i++) { - comparators[i] = sortFields[i].getComparator(1, false); + comparators[i] = sortFields[i].getComparator(1, Pruning.NONE); reverseMuls[i] = sortFields[i].getReverse() ? -1 : 1; } } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index c026c72f77f00..952d83b9e4539 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -33,10 +33,10 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.FixedBitSet; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchService.CanMatchResponse; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.builder.SearchSourceBuilder; @@ -44,6 +44,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Comparator; @@ -90,7 +91,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<CanMa ClusterState clusterState, SearchTask task, Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext, + Tracer tracer ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -110,7 +113,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<CanMa task, new CanMatchSearchPhaseResults(shardsIts.size()), shardsIts.size(), - clusters + clusters, + searchRequestContext, + tracer ); this.phaseFactory = phaseFactory; this.shardsIts = shardsIts; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollController.java b/server/src/main/java/org/opensearch/action/search/ClearScrollController.java index eb0fa49a94050..2ce94bfd682fe 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollController.java @@ -33,15 +33,15 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; import java.util.ArrayList; import java.util.Collection; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java index cd9e33634d918..16bf0fc46c4eb 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -50,8 +51,9 @@ /** * Transport request for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollRequest extends ActionRequest implements ToXContentObject { private List<String> scrollIds; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java index 63f64e02a9dd2..c10417e756dd6 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Transport request builder for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollRequestBuilder extends ActionRequestBuilder<ClearScrollRequest, ClearScrollResponse> { public ClearScrollRequestBuilder(OpenSearchClient client, ClearScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java index b6cd5d5bb7c0e..e2580ccdd0969 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollResponse.java @@ -32,28 +32,30 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; /** * Transport response for clearing a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SUCCEEDED = new ParseField("succeeded"); diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 8d2ea3a03266c..87eb27bdb8255 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -12,17 +12,17 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; @@ -98,18 +98,18 @@ public void executeCreatePit( task.getParentTaskId(), Collections.emptyMap() ); - /** - * This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is - * not supported for point in time + /* + This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is + not supported for point in time */ searchRequest.setCcsMinimizeRoundtrips(false); - /** - * Phase 1 of create PIT + /* + Phase 1 of create PIT */ executeCreatePit(searchTask, searchRequest, createPitListener); - /** - * Phase 2 of create PIT where we update pit id in pit contexts + /* + Phase 2 of create PIT where we update pit id in pit contexts */ createPitListener.whenComplete( searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, @@ -167,9 +167,9 @@ void executeUpdatePitId( searchResponse.pointInTimeId() ) ); - /** - * store the create time ( same create time for all PIT contexts across shards ) to be used - * for list PIT api + /* + store the create time ( same create time for all PIT contexts across shards ) to be used + for list PIT api */ final long relativeStartNanos = System.nanoTime(); final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java index f496a22caff6d..840d4becda714 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -13,14 +13,15 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Map; @@ -30,7 +31,10 @@ /** * A request to make create point in time against one or more indices. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { // keep alive for pit reader context diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java index 2fb10cfeb727c..410b93afc3e65 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -8,14 +8,15 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; import java.io.IOException; @@ -26,7 +27,10 @@ /** * Create point in time response with point in time id and shard success / failures + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField ID = new ParseField("pit_id"); private static final ParseField CREATION_TIME = new ParseField("creation_time"); diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 1e616ab5ca16e..c534b306b1404 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -8,14 +8,15 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.transport.TransportResponse; import java.io.IOException; @@ -23,7 +24,10 @@ /** * This class captures if deletion of pit is successful along with pit id + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { /** * This will be true if PIT reader contexts are deleted ond also if contexts are not found. diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 1a66311cd9a1b..e21a63eef9433 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContent; @@ -27,7 +28,10 @@ /** * Request to delete one or more PIT search contexts based on IDs. + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitRequest extends ActionRequest implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 0aabd838171ba..469d78ad73bd9 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -8,27 +8,31 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; /** * Response class for delete pits flow which clears the point in time search contexts + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { private final List<DeletePitInfo> deletePitResults; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java index 2c6ab6437fd2a..0cde81203063c 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -19,8 +20,9 @@ /** * Request to delete a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class DeleteSearchPipelineRequest extends AcknowledgedRequest<DeleteSearchPipelineRequest> { private String id; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java index 918583b3d510b..ac83a6bb6b765 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.action.support.master.AcknowledgedResponse; @@ -17,6 +16,7 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java index 618a5620ce093..e249fb239dae9 100644 --- a/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/ExpandSearchPhase.java @@ -32,8 +32,8 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java index 9bbc81c0d6a4c..91353240a8156 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -22,7 +23,10 @@ /** * Inner node get all pits response + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java index 948fe72eae817..336c8139561e9 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -10,6 +10,7 @@ import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -18,7 +19,10 @@ /** * Request to get all active PIT IDs from all nodes of cluster + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesRequest extends BaseNodesRequest<GetAllPitNodesRequest> { @Inject diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java index 055eb84ab3811..8d858a00c409b 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -11,6 +11,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,7 +32,10 @@ /** * This class transforms active PIT objects from all nodes to unique PIT objects + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class GetAllPitNodesResponse extends BaseNodesResponse<GetAllPitNodeResponse> implements ToXContentObject { /** @@ -41,6 +45,12 @@ public class GetAllPitNodesResponse extends BaseNodesResponse<GetAllPitNodeRespo public GetAllPitNodesResponse(StreamInput in) throws IOException { super(in); + Set<String> uniquePitIds = new HashSet<>(); + pitInfos.addAll( + getNodes().stream() + .flatMap(p -> p.getPitInfos().stream().filter(t -> uniquePitIds.add(t.getPitId()))) + .collect(Collectors.toList()) + ); } public GetAllPitNodesResponse( diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java index e9ff2fbf10f79..59e95aa87985a 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineRequest.java @@ -10,9 +10,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.util.Objects; @@ -20,8 +21,9 @@ /** * Request to get search pipelines * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineRequest extends ClusterManagerNodeReadRequest<GetSearchPipelineRequest> { private final String[] ids; diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java index 4211839ce6569..0379046c8275d 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineResponse.java @@ -8,16 +8,17 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.StatusToXContentObject; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.pipeline.PipelineConfiguration; import java.io.IOException; @@ -32,8 +33,9 @@ /** * transport response for getting a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class GetSearchPipelineResponse extends ActionResponse implements StatusToXContentObject { private final List<PipelineConfiguration> pipelines; @@ -129,7 +131,7 @@ private static Map<String, PipelineConfiguration> toMap(List<PipelineConfigurati @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java index 3b8c872e7a2f5..a7fcb8f1cfbae 100644 --- a/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/GetSearchPipelineTransportAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -17,6 +16,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java index e120507f4d47a..7e4ed186dd665 100644 --- a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -8,6 +8,7 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -17,12 +18,16 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; /** * This holds information about pit reader context such as pit id and creation time + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public class ListPitInfo implements ToXContentFragment, Writeable { private final String pitId; private final long creationTime; @@ -80,4 +85,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListPitInfo that = (ListPitInfo) o; + return pitId.equals(that.pitId) && creationTime == that.creationTime && keepAlive == that.keepAlive; + } + + @Override + public int hashCode() { + return Objects.hash(pitId, creationTime, keepAlive); + } + } diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index fa8b5b9470723..5b887b48f696e 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -38,11 +38,13 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.IndicesOptions.WildcardStates; import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; @@ -50,7 +52,6 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -71,8 +72,9 @@ /** * A multi search API request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { public static final int MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT = 0; @@ -277,6 +279,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +378,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } @@ -391,4 +398,16 @@ public boolean shouldCancelChildrenOnCancellation() { } }; } + + @Override + public String toString() { + return "MultiSearchRequest{" + + "maxConcurrentSearchRequests=" + + maxConcurrentSearchRequests + + ", requests=" + + requests + + ", indicesOptions=" + + indicesOptions + + '}'; + } } diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java index f9bb90c69d925..3bf078e8ce793 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequestBuilder.java @@ -35,12 +35,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A request builder for multiple search requests. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchRequestBuilder extends ActionRequestBuilder<MultiSearchRequest, MultiSearchResponse> { public MultiSearchRequestBuilder(OpenSearchClient client, MultiSearchAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index c576d87d85b0f..70bb0b99e69df 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -34,16 +34,17 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -59,8 +60,9 @@ /** * A multi search response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContentObject { private static final ParseField RESPONSES = new ParseField(Fields.RESPONSES); @@ -78,8 +80,9 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult /** * A search response item, holding the actual search response, or an error message if it failed. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Item implements Writeable { private final SearchResponse response; private final Exception exception; @@ -255,6 +258,6 @@ static final class Fields { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java b/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java index 70be9a0b19e08..b723b97b5c413 100644 --- a/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java +++ b/server/src/main/java/org/opensearch/action/search/ParsedScrollId.java @@ -32,13 +32,16 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + import java.util.Arrays; /** * Search scroll id that has been parsed * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ParsedScrollId { public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch"; diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index 336c0f2c49793..b6480ce63f827 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -11,15 +11,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; @@ -71,6 +71,7 @@ public void deletePitContexts( ) { if (nodeToContextsMap.size() == 0) { listener.onResponse(new DeletePitResponse(Collections.emptyList())); + return; } final Set<String> clusters = nodeToContextsMap.values() .stream() diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java index d32aab0c8a561..15b4ea648af29 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java @@ -11,10 +11,11 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -25,8 +26,9 @@ /** * Request to put a search pipeline * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class PutSearchPipelineRequest extends AcknowledgedRequest<PutSearchPipelineRequest> implements ToXContentObject { private String id; private BytesReference source; @@ -47,7 +49,7 @@ public PutSearchPipelineRequest(StreamInput in) throws IOException { super(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -76,7 +78,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java index da4a405c84555..903b7dfce09c0 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java @@ -8,7 +8,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.support.ActionFilters; @@ -22,9 +21,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.search.pipeline.SearchPipelineInfo; +import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,7 +82,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener<AcknowledgedResponse> listener ) throws Exception { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest().clear().addMetric(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName()); client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> { Map<DiscoveryNode, SearchPipelineInfo> searchPipelineInfos = new HashMap<>(); for (NodeInfo nodeInfo : nodeInfos.getNodes()) { diff --git a/server/src/main/java/org/opensearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/opensearch/action/search/QueryPhaseResultConsumer.java index 45c2dc4f29403..f1b06378bd579 100644 --- a/server/src/main/java/org/opensearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/opensearch/action/search/QueryPhaseResultConsumer.java @@ -35,13 +35,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TopDocs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.lucene.search.TopDocsAndMaxScore; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregation.ReduceContextBuilder; diff --git a/server/src/main/java/org/opensearch/action/search/SearchActionListener.java b/server/src/main/java/org/opensearch/action/search/SearchActionListener.java index 6983eed18b7b3..c032012b0b45d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchActionListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchActionListener.java @@ -31,7 +31,7 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextId.java b/server/src/main/java/org/opensearch/action/search/SearchContextId.java index 8c4c33d115355..41887f09e3fca 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextId.java @@ -33,14 +33,14 @@ package org.opensearch.action.search; import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 59c0e54bb6cbc..7c702c16030b7 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -33,6 +33,7 @@ package org.opensearch.action.search; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Id for a search context per node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchContextIdForNode implements Writeable { private final String node; private final ShardSearchContextId searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 71a986c0e15f7..87c5dd034a17e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -33,14 +33,15 @@ package org.opensearch.action.search; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.dfs.AggregatedDfs; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.internal.AliasFilter; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.List; @@ -76,10 +77,12 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction final TransportSearchAction.SearchTimeProvider timeProvider, final ClusterState clusterState, final SearchTask task, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext, + final Tracer tracer ) { super( - "dfs", + SearchPhaseName.DFS_PRE_QUERY.getName(), logger, searchTransportService, nodeIdToConnection, @@ -95,7 +98,9 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction task, new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestContext, + tracer ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchExecutionStatsCollector.java b/server/src/main/java/org/opensearch/action/search/SearchExecutionStatsCollector.java index 7082e33dfd5c5..842e87b3eb635 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchExecutionStatsCollector.java +++ b/server/src/main/java/org/opensearch/action/search/SearchExecutionStatsCollector.java @@ -32,7 +32,7 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.node.ResponseCollectorService; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.fetch.QueryFetchSearchResult; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 50b0cd8e01c1d..0890e9f5de8d4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.Locale; @@ -40,15 +41,26 @@ /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. * - * @opensearch.internal + * @opensearch.api */ -abstract class SearchPhase implements CheckedRunnable<IOException> { +@PublicApi(since = "1.0.0") +public abstract class SearchPhase implements CheckedRunnable<IOException> { private final String name; + private long startTimeInNanos; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + public long getStartTimeInNanos() { + return startTimeInNanos; + } + + public void recordAndRun() throws IOException { + this.startTimeInNanos = System.nanoTime(); + run(); + } + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 018035f21179b..df451e0745e3c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -34,8 +34,9 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; -import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.InternalSearchResponse; @@ -50,6 +51,7 @@ * * @opensearch.internal */ +@InternalApi public interface SearchPhaseContext extends Executor { // TODO maybe we can make this concrete later - for now we just implement this in the base class for all initial phases @@ -73,6 +75,8 @@ public interface SearchPhaseContext extends Executor { */ SearchRequest getRequest(); + SearchPhase getCurrentPhase(); + /** * Builds and sends the final search response back to the user. * diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index 512d3295c4cfc..161a103cdf36a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -45,9 +45,9 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -162,7 +162,7 @@ public AggregatedDfs aggregateDfs(Collection<DfsSearchResult> results) { * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. - * + * <p> * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * @@ -284,7 +284,7 @@ public List<Integer>[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { /** * Enriches search hits and completion suggestion hits from <code>sortedDocs</code> using <code>fetchResultsArr</code>, * merges suggestions, aggregations and profile results - * + * <p> * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseExecutionException.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseExecutionException.java index b3ed42824e91a..9ede6b4e3eb1b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseExecutionException.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseExecutionException.java @@ -32,14 +32,14 @@ package org.opensearch.action.search; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index b6f842cf2cce1..8cf92934c8a52 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -8,11 +8,16 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Enum for different Search Phases in OpenSearch - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.9.0") public enum SearchPhaseName { + DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), FETCH("fetch"), DFS_QUERY("dfs_query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchProgressActionListener.java b/server/src/main/java/org/opensearch/action/search/SearchProgressActionListener.java index 3f24eea0bfe1e..320bebfa6a9f4 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchProgressActionListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchProgressActionListener.java @@ -32,7 +32,7 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; /** * An {@link ActionListener} for search requests that allows to track progress of the {@link SearchAction}. diff --git a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java index ffc64682cb07d..34e8aacbad250 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchProgressListener.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.search.SearchResponse.Clusters; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregations; @@ -53,6 +54,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchProgressListener { private static final Logger logger = LogManager.getLogger(SearchProgressListener.class); diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java new file mode 100644 index 0000000000000..607ccf182851b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.PipelineAggregationBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.Collection; + +/** + * Increments the counters related to Aggregation Search Queries. + */ +public class SearchQueryAggregationCategorizer { + + private static final String TYPE_TAG = "type"; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryAggregationCategorizer(SearchQueryCounters searchQueryCounters) { + this.searchQueryCounters = searchQueryCounters; + } + + public void incrementSearchQueryAggregationCounters(Collection<AggregationBuilder> aggregatorFactories) { + for (AggregationBuilder aggregationBuilder : aggregatorFactories) { + incrementCountersRecursively(aggregationBuilder); + } + } + + private void incrementCountersRecursively(AggregationBuilder aggregationBuilder) { + // Increment counters for the current aggregation + String aggregationType = aggregationBuilder.getType(); + searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, aggregationType)); + + // Recursively process sub-aggregations if any + Collection<AggregationBuilder> subAggregations = aggregationBuilder.getSubAggregations(); + if (subAggregations != null && !subAggregations.isEmpty()) { + for (AggregationBuilder subAggregation : subAggregations) { + incrementCountersRecursively(subAggregation); + } + } + + // Process pipeline aggregations + Collection<PipelineAggregationBuilder> pipelineAggregations = aggregationBuilder.getPipelineAggregations(); + for (PipelineAggregationBuilder pipelineAggregation : pipelineAggregations) { + String pipelineAggregationType = pipelineAggregation.getType(); + searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, pipelineAggregationType)); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java new file mode 100644 index 0000000000000..ffaae5b08772f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryShapeVisitor; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.List; +import java.util.ListIterator; + +/** + * Class to categorize the search queries based on the type and increment the relevant counters. + * Class also logs the query shape. + */ +final class SearchQueryCategorizer { + + private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); + + final SearchQueryCounters searchQueryCounters; + + final SearchQueryAggregationCategorizer searchQueryAggregationCategorizer; + + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { + searchQueryCounters = new SearchQueryCounters(metricsRegistry); + searchQueryAggregationCategorizer = new SearchQueryAggregationCategorizer(searchQueryCounters); + } + + public void categorize(SearchSourceBuilder source) { + QueryBuilder topLevelQueryBuilder = source.query(); + logQueryShape(topLevelQueryBuilder); + incrementQueryTypeCounters(topLevelQueryBuilder); + incrementQueryAggregationCounters(source.aggregations()); + incrementQuerySortCounters(source.sorts()); + } + + private void incrementQuerySortCounters(List<SortBuilder<?>> sorts) { + if (sorts != null && sorts.size() > 0) { + for (ListIterator<SortBuilder<?>> it = sorts.listIterator(); it.hasNext();) { + SortBuilder sortBuilder = it.next(); + String sortOrder = sortBuilder.order().toString(); + searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); + } + } + } + + private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { + if (aggregations == null) { + return; + } + + searchQueryAggregationCategorizer.incrementSearchQueryAggregationCounters(aggregations.getAggregatorFactories()); + } + + private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); + topLevelQueryBuilder.visit(searchQueryVisitor); + } + + private void logQueryShape(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + topLevelQueryBuilder.visit(shapeVisitor); + log.trace("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java new file mode 100644 index 0000000000000..31f83dbef9dc9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; + +/** + * Class to visit the query builder tree and also track the level information. + * Increments the counters related to Search Query type. + */ +final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { + private final int level; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { + this(searchQueryCounters, 0); + } + + private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { + this.searchQueryCounters = counters; + this.level = level; + } + + public void accept(QueryBuilder qb) { + searchQueryCounters.incrementCounter(qb, level); + } + + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java new file mode 100644 index 0000000000000..a8a7e352b89dc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Class contains all the Counters related to search query types. + */ +final class SearchQueryCounters { + private static final String LEVEL_TAG = "level"; + private static final String UNIT = "1"; + private final MetricsRegistry metricsRegistry; + public final Counter aggCounter; + public final Counter otherQueryCounter; + public final Counter sortCounter; + private final Map<Class<? extends QueryBuilder>, Counter> queryHandlers; + public final ConcurrentHashMap<String, Counter> nameToQueryTypeCounters; + + public SearchQueryCounters(MetricsRegistry metricsRegistry) { + this.metricsRegistry = metricsRegistry; + this.nameToQueryTypeCounters = new ConcurrentHashMap<>(); + this.aggCounter = metricsRegistry.createCounter( + "search.query.type.agg.count", + "Counter for the number of top level agg search queries", + UNIT + ); + this.otherQueryCounter = metricsRegistry.createCounter( + "search.query.type.other.count", + "Counter for the number of top level and nested search queries that do not match any other categories", + UNIT + ); + this.sortCounter = metricsRegistry.createCounter( + "search.query.type.sort.count", + "Counter for the number of top level sort search queries", + UNIT + ); + this.queryHandlers = new HashMap<>(); + + } + + public void incrementCounter(QueryBuilder queryBuilder, int level) { + String uniqueQueryCounterName = queryBuilder.getName(); + + Counter counter = nameToQueryTypeCounters.computeIfAbsent(uniqueQueryCounterName, k -> createQueryCounter(k)); + counter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + + private Counter createQueryCounter(String counterName) { + Counter counter = metricsRegistry.createCounter( + "search.query.type." + counterName + ".count", + "Counter for the number of top level and nested " + counterName + " search queries", + UNIT + ); + return counter; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index 1ead14aac6b51..c8ab5fdaf61a1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -34,15 +34,16 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.TopFieldDocs; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.transport.Transport; import java.util.Map; @@ -81,10 +82,12 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh final TransportSearchAction.SearchTimeProvider timeProvider, ClusterState clusterState, SearchTask task, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext, + final Tracer tracer ) { super( - "query", + SearchPhaseName.QUERY.getName(), logger, searchTransportService, nodeIdToConnection, @@ -100,7 +103,9 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPh task, resultConsumer, request.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestContext, + tracer ); this.topDocsSize = SearchPhaseController.getTopDocsSize(request); this.trackTotalHitsUpTo = request.resolveTrackTotalHitsUpTo(); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index 43fec9e70ab88..f738c182c06da 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -38,16 +38,17 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.search.Scroll; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.internal.SearchContext; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; @@ -69,8 +70,9 @@ * @see org.opensearch.client.Client#search(SearchRequest) * @see SearchResponse * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable { public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); @@ -117,6 +119,8 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private String pipeline; + private Boolean phaseTook = null; + public SearchRequest() { this.localClusterAlias = null; this.absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; @@ -209,6 +213,7 @@ private SearchRequest( this.absoluteStartMillis = absoluteStartMillis; this.finalReduce = finalReduce; this.cancelAfterTimeInterval = searchRequest.cancelAfterTimeInterval; + this.phaseTook = searchRequest.phaseTook; } /** @@ -253,6 +258,9 @@ public SearchRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { pipeline = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalBoolean(); + } } @Override @@ -284,6 +292,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeOptionalString(pipeline); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalBoolean(phaseTook); + } } @Override @@ -348,7 +359,7 @@ boolean isFinalReduce() { * request. When created through {@link #subSearchRequest(SearchRequest, String[], String, long, boolean)}, this method returns * the provided current time, otherwise it will return {@link System#currentTimeMillis()}. */ - long getOrCreateAbsoluteStartMillis() { + public long getOrCreateAbsoluteStartMillis() { return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis; } @@ -600,7 +611,7 @@ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + * <p> * When unspecified, the pre-filter phase is executed if any of these conditions is met: * <ul> * <li>The request targets more than 128 shards</li> @@ -615,13 +626,27 @@ public void setPreFilterShardSize(int preFilterShardSize) { this.preFilterShardSize = preFilterShardSize; } + /** + * Returns value of user-provided phase_took query parameter for this search request. + */ + public Boolean isPhaseTook() { + return phaseTook; + } + + /** + * Sets value of phase_took query param if provided by user. Defaults to <code>null</code>. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or <code>null</code> if the threshold is unspecified. * This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + * <p> * When unspecified, the pre-filter phase is executed if any of these conditions is met: * <ul> * <li>The request targets more than 128 shards</li> @@ -719,7 +744,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -740,7 +766,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -783,6 +810,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 861e1df0203d7..9dac827e7d518 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -36,6 +36,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; @@ -58,8 +59,9 @@ /** * A search action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse> { public SearchRequestBuilder(OpenSearchClient client, SearchAction action) { @@ -404,6 +406,15 @@ public SearchRequestBuilder setTrackScores(boolean trackScores) { return this; } + /** + * Applies when fetching scores with named queries, and controls if scores will be tracked as well. + * Defaults to {@code false}. + */ + public SearchRequestBuilder setIncludeNamedQueriesScore(boolean includeNamedQueriesScore) { + sourceBuilder().includeNamedQueriesScores(includeNamedQueriesScore); + return this; + } + /** * Indicates if the total hit count for the query should be tracked. Requests will count total hit count accurately * up to 10,000 by default, see {@link #setTrackTotalHitsUpTo(int)} to change this value or set to true/false to always/never @@ -605,7 +616,7 @@ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShard * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + * <p> * When unspecified, the pre-filter phase is executed if any of these conditions is met: * <ul> * <li>The request targets more than 128 shards</li> diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java new file mode 100644 index 0000000000000..b8bbde65ca6bc --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.opensearch.common.annotation.InternalApi; + +import java.util.EnumMap; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * This class holds request-level context for search queries at the coordinator node + * + * @opensearch.internal + */ +@InternalApi +public class SearchRequestContext { + private final SearchRequestOperationsListener searchRequestOperationsListener; + private long absoluteStartNanos; + private final Map<String, Long> phaseTookMap; + private TotalHits totalHits; + private final EnumMap<ShardStatsFieldNames, Integer> shardStats; + + private final SearchRequest searchRequest; + + SearchRequestContext(final SearchRequestOperationsListener searchRequestOperationsListener, final SearchRequest searchRequest) { + this.searchRequestOperationsListener = searchRequestOperationsListener; + this.absoluteStartNanos = System.nanoTime(); + this.phaseTookMap = new HashMap<>(); + this.shardStats = new EnumMap<>(ShardStatsFieldNames.class); + this.searchRequest = searchRequest; + } + + SearchRequestOperationsListener getSearchRequestOperationsListener() { + return searchRequestOperationsListener; + } + + void updatePhaseTookMap(String phaseName, Long tookTime) { + this.phaseTookMap.put(phaseName, tookTime); + } + + public Map<String, Long> phaseTookMap() { + return phaseTookMap; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (searchRequest != null && searchRequest.isPhaseTook() != null && searchRequest.isPhaseTook()) { + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + /** + * Override absoluteStartNanos set in constructor. + * For testing only + */ + void setAbsoluteStartNanos(long absoluteStartNanos) { + this.absoluteStartNanos = absoluteStartNanos; + } + + /** + * Request start time in nanos + */ + public long getAbsoluteStartNanos() { + return absoluteStartNanos; + } + + void setTotalHits(TotalHits totalHits) { + this.totalHits = totalHits; + } + + public TotalHits totalHits() { + return totalHits; + } + + void setShardStats(int total, int successful, int skipped, int failed) { + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL, total); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL, successful); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED, skipped); + this.shardStats.put(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED, failed); + } + + String formattedShardStats() { + if (shardStats.isEmpty()) { + return ""; + } else { + return String.format( + Locale.ROOT, + "{%s:%s, %s:%s, %s:%s, %s:%s}", + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED), + ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED.toString(), + shardStats.get(ShardStatsFieldNames.SEARCH_REQUEST_SLOWLOG_SHARD_FAILED) + ); + } + } +} + +enum ShardStatsFieldNames { + SEARCH_REQUEST_SLOWLOG_SHARD_TOTAL("total"), + SEARCH_REQUEST_SLOWLOG_SHARD_SUCCESSFUL("successful"), + SEARCH_REQUEST_SLOWLOG_SHARD_SKIPPED("skipped"), + SEARCH_REQUEST_SLOWLOG_SHARD_FAILED("failed"); + + private final String name; + + ShardStatsFieldNames(String name) { + this.name = name; + } + + @Override + public String toString() { + return this.name; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java new file mode 100644 index 0000000000000..db487bf945889 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * SearchRequestOperationsCompositeListenerFactory contains listeners registered to search requests, + * and is responsible for creating the {@link SearchRequestOperationsListener.CompositeListener} + * with the all listeners enabled at cluster-level and request-level. + * + * + * @opensearch.internal + */ +public final class SearchRequestOperationsCompositeListenerFactory { + private final List<SearchRequestOperationsListener> searchRequestListenersList; + + /** + * Create the SearchRequestOperationsCompositeListenerFactory and add multiple {@link SearchRequestOperationsListener} + * to the searchRequestListenersList. + * Those enabled listeners will be executed during each search request. + * + * @param listeners Multiple SearchRequestOperationsListener object to add. + * @throws IllegalArgumentException if any input listener is null. + */ + public SearchRequestOperationsCompositeListenerFactory(final SearchRequestOperationsListener... listeners) { + searchRequestListenersList = new ArrayList<>(); + for (SearchRequestOperationsListener listener : listeners) { + if (listener == null) { + throw new IllegalArgumentException("listener must not be null"); + } + searchRequestListenersList.add(listener); + } + } + + /** + * Get searchRequestListenersList, + * + * @return List of SearchRequestOperationsListener + */ + public List<SearchRequestOperationsListener> getListeners() { + return searchRequestListenersList; + } + + /** + * Create the {@link SearchRequestOperationsListener.CompositeListener} + * with the all listeners enabled at cluster-level and request-level. + * + * @param searchRequest The SearchRequest object used to decide which request-level listeners to add based on states/flags + * @param logger Logger to be attached to the {@link SearchRequestOperationsListener.CompositeListener} + * @param perRequestListeners the per-request listeners that can be optionally added to the returned CompositeListener list. + * @return SearchRequestOperationsListener.CompositeListener + */ + public SearchRequestOperationsListener.CompositeListener buildCompositeListener( + final SearchRequest searchRequest, + final Logger logger, + final SearchRequestOperationsListener... perRequestListeners + ) { + final List<SearchRequestOperationsListener> searchListenersList = Stream.concat( + searchRequestListenersList.stream(), + Arrays.stream(perRequestListeners) + ) + .filter((searchRequestOperationsListener -> searchRequestOperationsListener.isEnabled(searchRequest))) + .collect(Collectors.toList()); + + return new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java new file mode 100644 index 0000000000000..53efade174502 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.InternalApi; + +import java.util.List; + +/** + * A listener for search, fetch and context events at the coordinator node level + * + * @opensearch.internal + */ +@InternalApi +public abstract class SearchRequestOperationsListener { + private volatile boolean enabled; + public static final SearchRequestOperationsListener NOOP = new SearchRequestOperationsListener(false) { + @Override + protected void onPhaseStart(SearchPhaseContext context) {} + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + }; + + protected SearchRequestOperationsListener() { + this.enabled = true; + } + + protected SearchRequestOperationsListener(final boolean enabled) { + this.enabled = enabled; + } + + protected abstract void onPhaseStart(SearchPhaseContext context); + + protected abstract void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext); + + protected abstract void onPhaseFailure(SearchPhaseContext context, Throwable cause); + + protected void onRequestStart(SearchRequestContext searchRequestContext) {} + + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + protected boolean isEnabled(SearchRequest searchRequest) { + return isEnabled(); + } + + protected boolean isEnabled() { + return enabled; + } + + protected void setEnabled(final boolean enabled) { + this.enabled = enabled; + } + + /** + * Holder of Composite Listeners + * + * @opensearch.internal + */ + + static final class CompositeListener extends SearchRequestOperationsListener { + private final List<SearchRequestOperationsListener> listeners; + private final Logger logger; + + CompositeListener(List<SearchRequestOperationsListener> listeners, Logger logger) { + this.listeners = listeners; + this.logger = logger; + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseStart(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseStart listener [{}] failed", listener), e); + } + } + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseEnd(context, searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); + } + } + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseFailure(context, cause); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); + } + } + } + + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestStart(searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onRequestEnd(context, searchRequestContext); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onRequestEnd listener [{}] failed", listener), e); + } + } + } + + public List<SearchRequestOperationsListener> getListeners() { + return listeners; + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java new file mode 100644 index 0000000000000..a9a07c6aca7f4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.search; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.OpenSearchLogMessage; +import org.opensearch.common.logging.SlowLogLevel; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.tasks.Task; + +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * The request-level search slow log implementation + * + * @opensearch.internal + */ +public final class SearchRequestSlowLog extends SearchRequestOperationsListener { + private static final Charset UTF_8 = StandardCharsets.UTF_8; + + private long warnThreshold; + private long infoThreshold; + private long debugThreshold; + private long traceThreshold; + private SlowLogLevel level; + + private final Logger logger; + + static final String CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX = "cluster.search.request.slowlog"; + + public static final Setting<TimeValue> CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.warn", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.info", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.debug", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING = Setting.timeSetting( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".threshold.trace", + TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<SlowLogLevel> CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL = new Setting<>( + CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".level", + SlowLogLevel.TRACE.name(), + SlowLogLevel::parse, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + + public SearchRequestSlowLog(ClusterService clusterService) { + this(clusterService, LogManager.getLogger(CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX)); // logger configured in log4j2.properties + } + + SearchRequestSlowLog(ClusterService clusterService, Logger logger) { + this.logger = logger; + Loggers.setLevel(this.logger, SlowLogLevel.TRACE.name()); + + this.setWarnThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING)); + this.setInfoThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING)); + this.setDebugThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING)); + this.setTraceThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING)); + this.setLevel(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL)); + + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING, this::setWarnThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING, this::setInfoThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING, this::setDebugThreshold); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING, this::setTraceThreshold); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL, this::setLevel); + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) {} + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + + @Override + protected void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + protected void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + long tookInNanos = System.nanoTime() - searchRequestContext.getAbsoluteStartNanos(); + + if (warnThreshold >= 0 && tookInNanos > warnThreshold && level.isLevelEnabledFor(SlowLogLevel.WARN)) { + logger.warn(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (infoThreshold >= 0 && tookInNanos > infoThreshold && level.isLevelEnabledFor(SlowLogLevel.INFO)) { + logger.info(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (debugThreshold >= 0 && tookInNanos > debugThreshold && level.isLevelEnabledFor(SlowLogLevel.DEBUG)) { + logger.debug(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } else if (traceThreshold >= 0 && tookInNanos > traceThreshold && level.isLevelEnabledFor(SlowLogLevel.TRACE)) { + logger.trace(new SearchRequestSlowLogMessage(context, tookInNanos, searchRequestContext)); + } + } + + /** + * Search request slow log message + * + * @opensearch.internal + */ + static final class SearchRequestSlowLogMessage extends OpenSearchLogMessage { + + SearchRequestSlowLogMessage(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + super(prepareMap(context, tookInNanos, searchRequestContext), message(context, tookInNanos, searchRequestContext)); + } + + private static Map<String, Object> prepareMap( + SearchPhaseContext context, + long tookInNanos, + SearchRequestContext searchRequestContext + ) { + final Map<String, Object> messageFields = new HashMap<>(); + messageFields.put("took", TimeValue.timeValueNanos(tookInNanos)); + messageFields.put("took_millis", TimeUnit.NANOSECONDS.toMillis(tookInNanos)); + messageFields.put("phase_took", searchRequestContext.phaseTookMap().toString()); + if (searchRequestContext.totalHits() != null) { + messageFields.put("total_hits", searchRequestContext.totalHits()); + } else { + messageFields.put("total_hits", "-1"); + } + messageFields.put("search_type", context.getRequest().searchType()); + messageFields.put("shards", searchRequestContext.formattedShardStats()); + + if (context.getRequest().source() != null) { + String source = escapeJson(context.getRequest().source().toString(FORMAT_PARAMS)); + messageFields.put("source", source); + } else { + messageFields.put("source", "{}"); + } + + messageFields.put("id", context.getTask().getHeader(Task.X_OPAQUE_ID)); + return messageFields; + } + + // Message will be used in plaintext logs + private static String message(SearchPhaseContext context, long tookInNanos, SearchRequestContext searchRequestContext) { + final StringBuilder sb = new StringBuilder(); + sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], "); + sb.append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); + sb.append("phase_took_millis[").append(searchRequestContext.phaseTookMap().toString()).append("], "); + if (searchRequestContext.totalHits() != null) { + sb.append("total_hits[").append(searchRequestContext.totalHits()).append("], "); + } else { + sb.append("total_hits[-1]"); + } + sb.append("search_type[").append(context.getRequest().searchType()).append("], "); + sb.append("shards[").append(searchRequestContext.formattedShardStats()).append("], "); + if (context.getRequest().source() != null) { + sb.append("source[").append(context.getRequest().source().toString(FORMAT_PARAMS)).append("], "); + } else { + sb.append("source[], "); + } + if (context.getTask().getHeader(Task.X_OPAQUE_ID) != null) { + sb.append("id[").append(context.getTask().getHeader(Task.X_OPAQUE_ID)).append("]"); + } else { + sb.append("id[]"); + } + return sb.toString(); + } + + private static String escapeJson(String text) { + byte[] sourceEscaped = JsonStringEncoder.getInstance().quoteAsUTF8(text); + return new String(sourceEscaped, UTF_8); + } + } + + void setWarnThreshold(TimeValue warnThreshold) { + this.warnThreshold = warnThreshold.nanos(); + setEnabledIfThresholdExceed(); + } + + void setInfoThreshold(TimeValue infoThreshold) { + this.infoThreshold = infoThreshold.nanos(); + setEnabledIfThresholdExceed(); + } + + void setDebugThreshold(TimeValue debugThreshold) { + this.debugThreshold = debugThreshold.nanos(); + setEnabledIfThresholdExceed(); + } + + void setTraceThreshold(TimeValue traceThreshold) { + this.traceThreshold = traceThreshold.nanos(); + setEnabledIfThresholdExceed(); + } + + void setLevel(SlowLogLevel level) { + this.level = level; + } + + protected long getWarnThreshold() { + return warnThreshold; + } + + protected long getInfoThreshold() { + return infoThreshold; + } + + protected long getDebugThreshold() { + return debugThreshold; + } + + protected long getTraceThreshold() { + return traceThreshold; + } + + SlowLogLevel getLevel() { + return level; + } + + private void setEnabledIfThresholdExceed() { + super.setEnabled(this.warnThreshold >= 0 || this.debugThreshold >= 0 || this.infoThreshold >= 0 || this.traceThreshold >= 0); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java new file mode 100644 index 0000000000000..97ef94055faf7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.metrics.MeanMetric; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; + +import java.util.EnumMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Request level search stats to track coordinator level node search latencies + * + * @opensearch.api + */ +@PublicApi(since = "2.11.0") +public final class SearchRequestStats extends SearchRequestOperationsListener { + Map<SearchPhaseName, StatsHolder> phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; + public static final Setting<Boolean> SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( + SEARCH_REQUEST_STATS_ENABLED_KEY, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + @Inject + public SearchRequestStats(ClusterSettings clusterSettings) { + this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); + clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseStatsMap.put(searchPhaseName, new StatsHolder()); + } + } + + public long getPhaseCurrent(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).current.count(); + } + + public long getPhaseTotal(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).total.count(); + } + + public long getPhaseMetric(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).timing.sum(); + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); + phaseStats.current.dec(); + phaseStats.total.inc(); + phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + + public static final class StatsHolder { + CounterMetric current = new CounterMetric(); + CounterMetric total = new CounterMetric(); + MeanMetric timing = new MeanMetric(); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index c7ab8f0858e7b..899c71e91e3ab 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -33,22 +33,27 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; -import org.opensearch.action.ActionResponse; +import org.opensearch.Version; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; +import org.opensearch.search.GenericSearchExtBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregations; @@ -60,18 +65,22 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.action.search.SearchResponseSections.EXT_FIELD; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** * A response of a search request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SCROLL_ID = new ParseField("_scroll_id"); @@ -90,6 +99,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; public SearchResponse(StreamInput in) throws IOException { super(in); @@ -108,6 +118,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); pointInTimeId = in.readOptionalString(); } @@ -122,7 +137,7 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); } public SearchResponse( @@ -135,6 +150,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + PhaseTook phaseTook, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId ) { this.internalResponse = internalResponse; this.scrollId = scrollId; @@ -144,6 +185,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -206,6 +248,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -294,6 +343,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -312,6 +364,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t ); clusters.toXContent(builder, params); internalResponse.toXContent(builder, params); + return builder; } @@ -332,6 +385,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -339,6 +393,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE String searchContextId = null; List<ShardSearchFailure> failures = new ArrayList<>(); Clusters clusters = Clusters.EMPTY; + List<SearchExtBuilder> extBuilders = new ArrayList<>(); for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -395,6 +450,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map<String, Long> phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -417,6 +490,33 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } clusters = new Clusters(total, successful, skipped); + } else if (EXT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + String extSectionName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + extSectionName = parser.currentName(); + } else { + SearchExtBuilder searchExtBuilder; + try { + searchExtBuilder = parser.namedObject(SearchExtBuilder.class, extSectionName, null); + if (!searchExtBuilder.getWriteableName().equals(extSectionName)) { + throw new IllegalStateException( + "The parsed [" + + searchExtBuilder.getClass().getName() + + "] object has a " + + "different writeable name compared to the name of the section that it was parsed from: found [" + + searchExtBuilder.getWriteableName() + + "] expected [" + + extSectionName + + "]" + ); + } + } catch (XContentParseException e) { + searchExtBuilder = GenericSearchExtBuilder.fromXContent(parser); + } + extBuilders.add(searchExtBuilder); + } + } } else { parser.skipChildren(); } @@ -429,7 +529,8 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE timedOut, terminatedEarly, profile, - numReducePhases + numReducePhases, + extBuilders ); return new SearchResponse( searchResponseSections, @@ -438,6 +539,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -457,21 +559,25 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); out.writeOptionalString(pointInTimeId); } @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful * and how many of them were skipped. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Clusters implements ToXContentFragment, Writeable { public static final Clusters EMPTY = new Clusters(0, 0, 0); @@ -570,6 +676,68 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map<String, Long> phaseTookMap; + + public PhaseTook(Map<String, Long> phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier<Long> tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..538e7fd54e2c3 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -110,7 +110,7 @@ final class SearchResponseMerger { /** * Add a search response to the list of responses to be merged together into one. * Merges currently happen at once when all responses are available and - * {@link #getMergedResponse(SearchResponse.Clusters)} )} is called. + * {@link #getMergedResponse(SearchResponse.Clusters, SearchRequestContext)} )} is called. * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. */ void add(SearchResponse searchResponse) { @@ -126,7 +126,7 @@ int numResponses() { * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} * so that all responses are merged into a single one. */ - SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { + SearchResponse getMergedResponse(SearchResponse.Clusters clusters, SearchRequestContext searchRequestContext) { // if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, // we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchRequestContext.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 214bc0448b90c..bca2c8a52b691 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -32,9 +32,12 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregations; import org.opensearch.search.profile.ProfileShardResult; @@ -42,21 +45,27 @@ import org.opensearch.search.suggest.Suggest; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Objects; /** * Base class that holds the various sections which a search response is * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * + * <p> * The reason why this class exists is that the high level REST client uses its own classes * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchResponseSections implements ToXContentFragment { + public static final ParseField EXT_FIELD = new ParseField("ext"); + protected final SearchHits hits; protected final Aggregations aggregations; protected final Suggest suggest; @@ -64,6 +73,7 @@ public class SearchResponseSections implements ToXContentFragment { protected final boolean timedOut; protected final Boolean terminatedEarly; protected final int numReducePhases; + protected final List<SearchExtBuilder> searchExtBuilders = new ArrayList<>(); public SearchResponseSections( SearchHits hits, @@ -73,6 +83,19 @@ public SearchResponseSections( Boolean terminatedEarly, SearchProfileShardResults profileResults, int numReducePhases + ) { + this(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, Collections.emptyList()); + } + + public SearchResponseSections( + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileShardResults profileResults, + int numReducePhases, + List<SearchExtBuilder> searchExtBuilders ) { this.hits = hits; this.aggregations = aggregations; @@ -81,6 +104,7 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; + this.searchExtBuilders.addAll(Objects.requireNonNull(searchExtBuilders, "searchExtBuilders must not be null")); } public final boolean timedOut() { @@ -135,9 +159,20 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) if (profileResults != null) { profileResults.toXContent(builder, params); } + if (!searchExtBuilders.isEmpty()) { + builder.startObject(EXT_FIELD.getPreferredName()); + for (SearchExtBuilder searchExtBuilder : searchExtBuilders) { + searchExtBuilder.toXContent(builder, params); + } + builder.endObject(); + } return builder; } + public List<SearchExtBuilder> getSearchExtBuilders() { + return Collections.unmodifiableList(this.searchExtBuilders); + } + protected void writeTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java index 899c7a3c1dabd..7329a03f7e281 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.InternalScrollSearchRequest; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index c1b4f56f7adc3..f7737fe59a975 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -33,10 +33,10 @@ package org.opensearch.action.search; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.fetch.QueryFetchSearchResult; import org.opensearch.search.fetch.ScrollQueryFetchSearchResult; import org.opensearch.search.internal.InternalScrollSearchRequest; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 1fa6460a212f7..87ba32016370f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -34,11 +34,11 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ScoreDoc; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.fetch.FetchSearchResult; import org.opensearch.search.fetch.ShardFetchRequest; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java index ddefb165f00b6..044efdc36d04f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequest.java @@ -34,15 +34,16 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.Scroll; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Map; @@ -53,8 +54,9 @@ /** * Transport request for a search scroll * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java index 638c595216631..41b34bd9c6c9e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollRequestBuilder.java @@ -34,14 +34,16 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.Scroll; /** * A search scroll action request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchScrollRequestBuilder extends ActionRequestBuilder<SearchScrollRequest, SearchResponse> { public SearchScrollRequestBuilder(OpenSearchClient client, SearchScrollAction action) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java index fbd85a3fc0b8f..33d9bf70021a0 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardIterator.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.PlainShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.Countable; import org.opensearch.common.util.PlainIterator; @@ -54,8 +55,9 @@ * the cluster alias. * @see OriginalIndices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchShardIterator implements Comparable<SearchShardIterator>, Countable { private final OriginalIndices originalIndices; diff --git a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java index c94f02395cf38..dfecf4f462c4d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchShardTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchShardTask.java @@ -33,11 +33,12 @@ package org.opensearch.action.search; import org.opensearch.common.MemoizedSupplier; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; -import org.opensearch.tasks.TaskId; import java.util.Map; import java.util.function.Supplier; @@ -46,8 +47,9 @@ * Task storing information about a currently running search shard request. * See {@link ShardSearchRequest}, {@link ShardFetchSearchRequest}, ... * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchShardTask extends CancellableTask implements SearchBackpressureTask { // generating metadata in a lazy way since source can be quite big private final MemoizedSupplier<String> metadataSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTask.java b/server/src/main/java/org/opensearch/action/search/SearchTask.java index dad6c44da4f10..d3c1043c50cce 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTask.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTask.java @@ -32,10 +32,11 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.SearchBackpressureTask; -import org.opensearch.tasks.TaskId; import java.util.Map; import java.util.function.Supplier; @@ -45,8 +46,9 @@ /** * Task storing information about a currently running {@link SearchRequest}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchTask extends CancellableTask implements SearchBackpressureTask { // generating description in a lazy way since source can be quite big private final Supplier<String> descriptionSupplier; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 5a280818640ed..64c738f633f2e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.IndicesRequest; import org.opensearch.action.OriginalIndices; @@ -40,10 +39,13 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.dfs.DfsSearchResult; @@ -65,7 +67,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -542,6 +543,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( DFS_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> searchService.executeDfsPhase( request, @@ -556,6 +560,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, ShardSearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -575,6 +582,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, + false, + true, + AdmissionControlActionType.SEARCH, QuerySearchRequest::new, (request, channel, task) -> { searchService.executeQueryPhase( @@ -633,6 +643,7 @@ public static void registerRequestHandler(TransportService transportService, Sea ThreadPool.Names.SAME, true, true, + AdmissionControlActionType.SEARCH, ShardFetchSearchRequest::new, (request, channel, task) -> { searchService.executeFetchPhase( diff --git a/server/src/main/java/org/opensearch/action/search/SearchType.java b/server/src/main/java/org/opensearch/action/search/SearchType.java index cb86c0d6c1b4a..e549ec598380a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchType.java +++ b/server/src/main/java/org/opensearch/action/search/SearchType.java @@ -32,11 +32,14 @@ package org.opensearch.action.search; +import org.opensearch.common.annotation.PublicApi; + /** * Search type represent the manner at which the search operation is executed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SearchType { /** * Same as {@link #QUERY_THEN_FETCH}, except for an initial scatter phase which goes and computes the distributed diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index 96fcda0d491c9..ad4ac8f2a9eec 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -8,10 +8,10 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import org.opensearch.transport.RemoteClusterService; import java.util.Set; diff --git a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java index 50cd1dc2e2cd4..ef490844db692 100644 --- a/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/opensearch/action/search/ShardSearchFailure.java @@ -35,16 +35,17 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.OriginalIndices; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchException; import org.opensearch.search.SearchShardTarget; import org.opensearch.transport.RemoteClusterAware; @@ -56,8 +57,9 @@ /** * Represents a failure to search on a specific shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSearchFailure extends ShardOperationFailedException { private static final String REASON_FIELD = "reason"; diff --git a/server/src/main/java/org/opensearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/opensearch/action/search/TransportClearScrollAction.java index a44b80f13975c..e67005eb1fb87 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportClearScrollAction.java @@ -32,11 +32,11 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java index c664eb9629216..baa113997f243 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.internal.ShardSearchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 6ad11aa4c6d59..b15a4b66e8870 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -8,10 +8,10 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -76,7 +76,7 @@ private void deletePits(ActionListener<DeletePitResponse> listener, DeletePitReq /** * Delete all active PIT reader contexts leveraging list all PITs - * + * <p> * For Cross cluster PITs : * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which diff --git a/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java index a3ee6e0a37e23..146b4010af4b3 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.client.node.NodeClient; @@ -40,10 +39,11 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index c4af9ffa20194..65cfd35489033 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; @@ -57,20 +56,22 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.Index; -import org.opensearch.index.query.Rewriteable; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.index.query.Rewriteable; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.SearchShardTarget; @@ -86,7 +87,13 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; +import org.opensearch.telemetry.tracing.listener.TraceableSearchRequestOperationsListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -135,6 +142,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, Property.NodeScope ); + public static final Setting<Boolean> SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( + "search.query.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -145,6 +159,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, Setting.Property.NodeScope ); + public static final String SEARCH_PHASE_TOOK_ENABLED_KEY = "search.phase_took_enabled"; + public static final Setting<Boolean> SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -156,6 +178,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, private final NamedWriteableRegistry namedWriteableRegistry; private final CircuitBreaker circuitBreaker; private final SearchPipelineService searchPipelineService; + private final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory; + private final Tracer tracer; + + private volatile boolean searchQueryMetricsEnabled; + + private final MetricsRegistry metricsRegistry; + + private SearchQueryCategorizer searchQueryCategorizer; @Inject public TransportSearchAction( @@ -170,7 +200,10 @@ public TransportSearchAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry, - SearchPipelineService searchPipelineService + SearchPipelineService searchPipelineService, + MetricsRegistry metricsRegistry, + SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory, + Tracer tracer ) { super(SearchAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchRequest>) SearchRequest::new); this.client = client; @@ -185,6 +218,19 @@ public TransportSearchAction( this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; this.searchPipelineService = searchPipelineService; + this.metricsRegistry = metricsRegistry; + this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + this.searchRequestOperationsCompositeListenerFactory = searchRequestOperationsCompositeListenerFactory; + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + this.tracer = tracer; + } + + private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { + this.searchQueryMetricsEnabled = searchQueryMetricsEnabled; + if ((this.searchQueryMetricsEnabled == true) && this.searchQueryCategorizer == null) { + this.searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } } private Map<String, AliasFilter> buildPerIndexAliasFilter( @@ -241,7 +287,6 @@ private Map<String, Float> resolveIndexBoosts(SearchRequest searchRequest, Clust * @opensearch.internal */ static final class SearchTimeProvider { - private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; @@ -327,7 +372,8 @@ public AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ) { return new AbstractSearchAsyncAction<SearchPhaseResult>( actionName, @@ -346,7 +392,9 @@ public AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( task, new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestContext, + tracer ) { @Override protected void executePhaseOnShard( @@ -390,20 +438,72 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); - PipelinedRequest searchRequest; - ActionListener<SearchResponse> listener; - try { - searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = ActionListener.wrap( - r -> originalListener.onResponse(searchRequest.transformResponse(r)), - originalListener::onFailure + if (originalSearchRequest.isPhaseTook() == null) { + originalSearchRequest.setPhaseTook(clusterService.getClusterSettings().get(SEARCH_PHASE_TOOK_ENABLED)); + } + + final Span requestSpan = tracer.startSpan(SpanBuilder.from(task, actionName)); + try (final SpanScope spanScope = tracer.withSpanInScope(requestSpan)) { + SearchRequestOperationsListener.CompositeListener requestOperationsListeners; + final ActionListener<SearchResponse> updatedListener = TraceableActionListener.create(originalListener, requestSpan, tracer); + requestOperationsListeners = searchRequestOperationsCompositeListenerFactory.buildCompositeListener( + originalSearchRequest, + logger, + TraceableSearchRequestOperationsListener.create(tracer, requestSpan) ); - } catch (Exception e) { - originalListener.onFailure(e); - return; + SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); + searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); + + PipelinedRequest searchRequest; + ActionListener<SearchResponse> listener; + try { + searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); + listener = searchRequest.transformResponseListener(updatedListener); + } catch (Exception e) { + updatedListener.onFailure(e); + return; + } + + ActionListener<SearchRequest> requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener<SearchSourceBuilder> rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestContext + ); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); } + } - ActionListener<SearchSourceBuilder> rewriteListener = ActionListener.wrap(source -> { + private ActionListener<SearchSourceBuilder> buildRewriteListener( + SearchRequest searchRequest, + Task task, + SearchTimeProvider timeProvider, + SearchAsyncActionProvider searchAsyncActionProvider, + ActionListener<SearchResponse> listener, + SearchRequestContext searchRequestContext + ) { + return ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch // situations when source is rewritten to null due to a bug @@ -433,7 +533,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -454,8 +555,10 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider - ) + searchAsyncActionProvider, + searchRequestContext + ), + searchRequestContext ); } else { AtomicInteger skippedClusters = new AtomicInteger(0); @@ -504,22 +607,14 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); }, listener::onFailure) ); } } }, listener::onFailure); - if (searchRequest.source() == null) { - rewriteListener.onResponse(searchRequest.source()); - } else { - Rewriteable.rewriteAndFetch( - searchRequest.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener - ); - } } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { @@ -551,7 +646,8 @@ static void ccsRemoteReduce( RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener<SearchResponse> listener, - BiConsumer<SearchRequest, ActionListener<SearchResponse>> localSearchConsumer + BiConsumer<SearchRequest, ActionListener<SearchResponse>> localSearchConsumer, + SearchRequestContext searchRequestContext ) { if (localIndices == null && remoteIndices.size() == 1) { @@ -593,6 +689,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + searchRequestContext.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -638,7 +735,8 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, totalClusters, - listener + listener, + searchRequestContext ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, ccsListener); @@ -652,7 +750,8 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, totalClusters, - listener + listener, + searchRequestContext ); SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest( searchRequest, @@ -747,7 +846,8 @@ private static ActionListener<SearchResponse> createCCSListener( AtomicReference<Exception> exceptions, SearchResponseMerger searchResponseMerger, int totalClusters, - ActionListener<SearchResponse> originalListener + ActionListener<SearchResponse> originalListener, + SearchRequestContext searchRequestContext ) { return new CCSActionListener<SearchResponse, SearchResponse>( clusterAlias, @@ -769,7 +869,7 @@ SearchResponse createFinalResponse() { searchResponseMerger.numResponses(), skippedClusters.get() ); - return searchResponseMerger.getMergedResponse(clusters); + return searchResponseMerger.getMergedResponse(clusters, searchRequestContext); } }; } @@ -782,7 +882,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener<SearchResponse> listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestContext searchRequestContext ) { executeSearch( (SearchTask) task, @@ -796,7 +897,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestContext ); } @@ -914,11 +1016,10 @@ private void executeSearch( ActionListener<SearchResponse> listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestContext searchRequestContext ) { - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api @@ -968,11 +1069,8 @@ private void executeSearch( indexRoutings = routingMap; } final GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); - failIfOverShardCountLimit(clusterService, shardIterators.size()); - Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); - // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard @@ -1020,7 +1118,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestContext ).start(); } @@ -1103,7 +1202,8 @@ AbstractSearchAsyncAction<? extends SearchPhaseResult> asyncSearchAction( ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ); } @@ -1121,7 +1221,8 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction ActionListener<SearchResponse> listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext ) { if (preFilter) { return new CanMatchPreFilterSearchPhase( @@ -1138,8 +1239,8 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction timeProvider, clusterState, task, - (iter) -> { - AbstractSearchAsyncAction<? extends SearchPhaseResult> action = searchAsyncAction( + (iter) -> new WrappingSearchAsyncActionPhase( + searchAsyncAction( task, searchRequest, executor, @@ -1153,16 +1254,13 @@ private AbstractSearchAsyncAction<? extends SearchPhaseResult> searchAsyncAction listener, false, threadPool, - clusters - ); - return new SearchPhase(action.getName()) { - @Override - public void run() { - action.start(); - } - }; - }, - clusters + clusters, + searchRequestContext + ) + ), + clusters, + searchRequestContext, + tracer ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1192,7 +1290,9 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestContext, + tracer ); break; case QUERY_THEN_FETCH: @@ -1212,7 +1312,9 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestContext, + tracer ); break; default: diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java index 8c127983770b6..5c260e02e7275 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchHelper.java @@ -33,10 +33,10 @@ package org.opensearch.action.search; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.InternalScrollSearchRequest; diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java index ea29449582e7c..4713d03c93bac 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java @@ -32,11 +32,11 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java index da39aed20ef8e..1db8fc48c28bc 100644 --- a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java @@ -10,7 +10,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java new file mode 100644 index 0000000000000..3c1ad52a1fe6a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/WrappingSearchAsyncActionPhase.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.search.SearchPhaseResult; + +/** + * The WrappingSearchAsyncActionPhase (see please {@link CanMatchPreFilterSearchPhase} as one example) is a special case + * of search phase that wraps SearchAsyncActionPhase as {@link SearchPhase}. The {@link AbstractSearchAsyncAction} manages own + * onPhaseStart / onPhaseFailure / OnPhaseDone callbacks and but just wrapping it with the SearchPhase causes + * only some callbacks being called. The {@link AbstractSearchAsyncAction} has special treatment of {@link WrappingSearchAsyncActionPhase}. + */ +class WrappingSearchAsyncActionPhase extends SearchPhase { + private final AbstractSearchAsyncAction<? extends SearchPhaseResult> action; + + protected WrappingSearchAsyncActionPhase(AbstractSearchAsyncAction<? extends SearchPhaseResult> action) { + super(action.getName()); + this.action = action; + } + + @Override + public void run() { + action.start(); + } + + SearchPhase getSearchPhase() { + return action; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/ActionFilter.java b/server/src/main/java/org/opensearch/action/support/ActionFilter.java index 2fe32b120d4a9..e936512004fd2 100644 --- a/server/src/main/java/org/opensearch/action/support/ActionFilter.java +++ b/server/src/main/java/org/opensearch/action/support/ActionFilter.java @@ -32,9 +32,9 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.tasks.Task; /** diff --git a/server/src/main/java/org/opensearch/action/support/ActionFilterChain.java b/server/src/main/java/org/opensearch/action/support/ActionFilterChain.java index 1eb5682343ffa..1ebe1ee63abf9 100644 --- a/server/src/main/java/org/opensearch/action/support/ActionFilterChain.java +++ b/server/src/main/java/org/opensearch/action/support/ActionFilterChain.java @@ -32,9 +32,9 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.tasks.Task; /** diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java index 15275ba48fc6e..e91342a7ce4b8 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardCount.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ * A class whose instances represent a value for counting the number * of active shard copies for a given shard in an index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ActiveShardCount implements Writeable { private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2; diff --git a/server/src/main/java/org/opensearch/action/support/ActiveShardsObserver.java b/server/src/main/java/org/opensearch/action/support/ActiveShardsObserver.java index 7a4ae0e31cf55..29468fe777707 100644 --- a/server/src/main/java/org/opensearch/action/support/ActiveShardsObserver.java +++ b/server/src/main/java/org/opensearch/action/support/ActiveShardsObserver.java @@ -34,11 +34,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.node.NodeClosedException; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/support/AdapterActionFuture.java b/server/src/main/java/org/opensearch/action/support/AdapterActionFuture.java index 93430a049d3a9..cfc867c572a75 100644 --- a/server/src/main/java/org/opensearch/action/support/AdapterActionFuture.java +++ b/server/src/main/java/org/opensearch/action/support/AdapterActionFuture.java @@ -33,12 +33,12 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BaseFuture; import org.opensearch.common.util.concurrent.FutureUtils; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; +import org.opensearch.core.action.ActionListener; import java.util.concurrent.TimeUnit; diff --git a/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java b/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java index 5b0475093d3c2..ac5dfeb4728d5 100644 --- a/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java @@ -32,10 +32,10 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; /** * Listener for transport channel actions diff --git a/server/src/main/java/org/opensearch/action/support/ContextPreservingActionListener.java b/server/src/main/java/org/opensearch/action/support/ContextPreservingActionListener.java index 6eadc0b67ffbd..a59c7fb45ca27 100644 --- a/server/src/main/java/org/opensearch/action/support/ContextPreservingActionListener.java +++ b/server/src/main/java/org/opensearch/action/support/ContextPreservingActionListener.java @@ -31,8 +31,8 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import java.util.function.Supplier; diff --git a/server/src/main/java/org/opensearch/action/support/GroupedActionListener.java b/server/src/main/java/org/opensearch/action/support/GroupedActionListener.java index 3d15d63289d9e..fb9554cc4c3d2 100644 --- a/server/src/main/java/org/opensearch/action/support/GroupedActionListener.java +++ b/server/src/main/java/org/opensearch/action/support/GroupedActionListener.java @@ -31,9 +31,9 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import java.util.Collection; import java.util.Collections; diff --git a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java index 7e030a34cdd91..786d8cfb6fa1d 100644 --- a/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/HandledTransportAction.java @@ -32,7 +32,7 @@ package org.opensearch.action.support; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java index d30ee7e11bdfa..2d9fecddb6f7d 100644 --- a/server/src/main/java/org/opensearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/opensearch/action/support/IndicesOptions.java @@ -32,6 +32,7 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -55,15 +56,17 @@ * Controls how to deal with unavailable concrete indices (closed or missing), how wildcard expressions are expanded * to actual indices (all, closed or open indices) and how to deal with wildcard expressions that resolve to no indices. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesOptions implements ToXContentFragment { /** * The wildcard states. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum WildcardStates { OPEN, CLOSED, diff --git a/server/src/main/java/org/opensearch/action/support/ListenerTimeouts.java b/server/src/main/java/org/opensearch/action/support/ListenerTimeouts.java index b01320369af6a..6020dc2bfd489 100644 --- a/server/src/main/java/org/opensearch/action/support/ListenerTimeouts.java +++ b/server/src/main/java/org/opensearch/action/support/ListenerTimeouts.java @@ -33,8 +33,8 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/support/PlainListenableActionFuture.java b/server/src/main/java/org/opensearch/action/support/PlainListenableActionFuture.java index ac5f7d25efb18..1b16249f70754 100644 --- a/server/src/main/java/org/opensearch/action/support/PlainListenableActionFuture.java +++ b/server/src/main/java/org/opensearch/action/support/PlainListenableActionFuture.java @@ -32,8 +32,8 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.action.ListenableActionFuture; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/action/support/RetryableAction.java b/server/src/main/java/org/opensearch/action/support/RetryableAction.java index cf9c140a54bc8..d555a876f20fb 100644 --- a/server/src/main/java/org/opensearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/opensearch/action/support/RetryableAction.java @@ -34,10 +34,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.bulk.BackoffPolicy; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -52,8 +53,9 @@ * default. The action will be retried with exponentially increasing delay periods until the timeout period * has been reached. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RetryableAction<Response> { private final Logger logger; diff --git a/server/src/main/java/org/opensearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/opensearch/action/support/ThreadedActionListener.java index 17ef64aa901c0..8b51535ef8d87 100644 --- a/server/src/main/java/org/opensearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/opensearch/action/support/ThreadedActionListener.java @@ -34,9 +34,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/action/support/TimeoutTaskCancellationUtility.java b/server/src/main/java/org/opensearch/action/support/TimeoutTaskCancellationUtility.java index a98df5b832bf5..a317a45eab31f 100644 --- a/server/src/main/java/org/opensearch/action/support/TimeoutTaskCancellationUtility.java +++ b/server/src/main/java/org/opensearch/action/support/TimeoutTaskCancellationUtility.java @@ -11,15 +11,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.SearchService; import org.opensearch.tasks.CancellableTask; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index e2f5ecda101b8..72aae210d61ae 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -34,16 +34,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskListener; import org.opensearch.tasks.TaskManager; @@ -81,7 +81,7 @@ private Releasable registerChildNode(TaskId parentTask) { /** * Use this method when the transport action call should result in creation of a new task associated with the call. - * + * <p> * This is a typical behavior. */ public final Task execute(Request request, ActionListener<Response> listener) { diff --git a/server/src/main/java/org/opensearch/action/support/WriteRequest.java b/server/src/main/java/org/opensearch/action/support/WriteRequest.java index f462464b99ce8..8d53f7b005d54 100644 --- a/server/src/main/java/org/opensearch/action/support/WriteRequest.java +++ b/server/src/main/java/org/opensearch/action/support/WriteRequest.java @@ -37,6 +37,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -78,8 +79,9 @@ default R setRefreshPolicy(String refreshPolicy) { /** * The refresh policy of the request. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum RefreshPolicy implements Writeable { /** * Don't refresh after this request. The default. diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastOperationRequestBuilder.java index d732a28c32ea3..8c756c1682913 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastOperationRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.support.broadcast; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.ActionType; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.OpenSearchClient; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastRequest.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastRequest.java index ea9a77fc453f5..8a27e032cec5e 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastRequest.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastRequest.java @@ -36,9 +36,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java index 96f7efd05b8ba..071e4d9f36923 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java @@ -32,15 +32,15 @@ package org.opensearch.action.support.broadcast; -import org.opensearch.action.ActionResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java index 39c524448bc5d..9603f886366f2 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java @@ -35,7 +35,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java index cb77fa7c3ac57..8bf8555194976 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/TransportBroadcastAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.support.broadcast; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.support.ActionFilters; @@ -50,6 +49,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index bf71134ab7b88..c08cfb7af0e3d 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -33,12 +33,10 @@ package org.opensearch.action.support.broadcast.node; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.TransportActions; @@ -53,9 +51,12 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NodeShouldNotConnectException; @@ -63,10 +64,9 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; -import org.opensearch.transport.TransportRequestOptions; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java index 4f60a75c5dd22..05c06604725a1 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.action.support.clustermanager; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; /** * Base request builder for cluster-manager node operations diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java index b0ac743e6a1dc..c261f44a0cd04 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.core.action.ActionResponse; /** * Base request builder for cluster-manager node read operations that can be executed on the local node as well diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java index affcb16327089..a43d6fb0b1e7a 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java @@ -33,9 +33,9 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionRequest; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 13c576bd120c7..536ddcdd402e2 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -35,17 +35,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionRunnable; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.RetryableAction; +import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; -import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; @@ -56,9 +54,11 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java index e957142afd884..d8cd5af992028 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java @@ -32,10 +32,10 @@ package org.opensearch.action.support.clustermanager; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java index 843a711dc3d65..5b7da57705ac3 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java @@ -36,9 +36,9 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java index 77c1c3656ce59..f22ff60b83a58 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.action.support.clustermanager.info; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; +import org.opensearch.core.action.ActionResponse; /** * Transport request builder for cluster information diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index 7390a87de1f85..65f00a4731ab5 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -31,8 +31,6 @@ package org.opensearch.action.support.clustermanager.info; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; @@ -40,6 +38,8 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java index 1601dfd0c9893..59f238a202788 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.support.master; import org.opensearch.cluster.ack.AckedRequest; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java index 71695269396e1..279ad401f7e56 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java @@ -31,8 +31,9 @@ package org.opensearch.action.support.master; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; @@ -49,8 +50,9 @@ /** * A response that indicates that a request has been acknowledged * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java index f6d475fc06171..0acbd998a6322 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.action.support.master; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.core.action.ActionResponse; /** * Base request builder for cluster-manager node operations diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java index ae134bdeca3c2..36a3fc1d2de73 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.action.support.master; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.core.action.ActionResponse; /** * Base request builder for cluster-manager node read operations that can be executed on the local node as well diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java index 53a597214256d..eec7965bfed02 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java @@ -32,11 +32,11 @@ package org.opensearch.action.support.master; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java index 8adfba6c8ee02..b95459971737f 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java @@ -32,11 +32,11 @@ package org.opensearch.action.support.master; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java index 7052e13625f97..091413c0df6d7 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.support.master.info; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.core.action.ActionResponse; /** * Transport request builder for cluster information diff --git a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java index 8ba6653892f88..2653e3a658674 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java @@ -31,10 +31,10 @@ package org.opensearch.action.support.master.info; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java index 4a94f790c3443..8a4e12567b515 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java @@ -35,7 +35,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java index 609b58322b990..4d54ce51c923c 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java @@ -36,9 +36,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; @@ -53,9 +53,9 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request> * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds * will be ignored. - * + * <p> * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. - * + * <p> * TODO: get rid of this and resolve it to concrete nodes in the rest layer **/ private String[] nodesIds; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesResponse.java index bf68b5201c3f8..c51a4f6bb9399 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesResponse.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesResponse.java @@ -32,9 +32,9 @@ package org.opensearch.action.support.nodes; -import org.opensearch.action.ActionResponse; import org.opensearch.action.FailedNodeException; import org.opensearch.cluster.ClusterName; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/NodesOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/nodes/NodesOperationRequestBuilder.java index 9d5e1b3ab4ed4..ba1e214fe9d19 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/NodesOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/NodesOperationRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.support.nodes; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 3f39d7b786ccb..9a1a28dd70636 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.support.nodes; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; @@ -41,6 +40,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; diff --git a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java index 51b95468d6b25..95c4ef1e2b092 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java @@ -8,10 +8,10 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; import org.opensearch.action.support.replication.ReplicationOperation.Replicas; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.core.action.ActionListener; import java.util.function.BiConsumer; import java.util.function.Consumer; diff --git a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java index d2c4e7563195f..bb6bfc6fd4773 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java +++ b/server/src/main/java/org/opensearch/action/support/replication/PendingReplicationActions.java @@ -33,12 +33,13 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.RetryableAction; -import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; -import org.opensearch.index.shard.PrimaryShardClosedException; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -51,8 +52,9 @@ /** * Pending Replication Actions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingReplicationActions implements Consumer<ReplicationGroup>, Releasable { private final Map<String, Set<RetryableAction<?>>> onGoingReplicationActions = ConcurrentCollections.newConcurrentMap(); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java index f9b85cc4bd7aa..dd3a38c1398ab 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -8,11 +8,14 @@ package org.opensearch.action.support.replication; +import org.opensearch.common.annotation.PublicApi; + /** * The type of replication used for inter-node replication. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum ReplicationMode { /** * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java index 26d3b3c2f64ef..189bc82348a0c 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -8,9 +8,9 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.core.action.ActionListener; import java.util.Objects; import java.util.function.BiConsumer; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 1affc9202c32b..9f69d41d83f5b 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -35,9 +35,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.core.Assertions; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.RetryableAction; @@ -47,15 +45,17 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.NodeClosedException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; @@ -479,7 +479,7 @@ public interface Primary< /** * Notifies the primary of a local checkpoint for the given allocation. - * + * <p> * Note: The primary will use this information to advance the global checkpoint if possible. * * @param allocationId allocation ID of the shard corresponding to the supplied local checkpoint diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java index 20f7b5fc6a586..4812984732134 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java @@ -8,10 +8,10 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; import org.opensearch.action.support.replication.ReplicationOperation.Replicas; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.core.action.ActionListener; import java.util.function.BiConsumer; import java.util.function.Consumer; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequest.java index d5a2d37daa504..92e50f7a476f3 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequest.java @@ -40,12 +40,12 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequestBuilder.java index df60e585e7272..920024b366a4c 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationRequestBuilder.java @@ -32,12 +32,12 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; /** * Transport request builder for a replication operation diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java index 1f0e61a134eb2..67480976c500f 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java @@ -34,18 +34,19 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -57,8 +58,9 @@ /** * Base class for write action responses. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationResponse extends ActionResponse { public static final ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0]; @@ -88,8 +90,9 @@ public void setShardInfo(ShardInfo shardInfo) { /** * Holds shard information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardInfo implements Writeable, ToXContentObject { private static final String TOTAL = "total"; @@ -235,8 +238,9 @@ public String toString() { /** * Holds failure information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure extends ShardOperationFailedException implements ToXContentObject { private static final String _INDEX = "_index"; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationTask.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationTask.java index 5aa37ec65ff0d..c92d2a6e8108e 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationTask.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationTask.java @@ -32,13 +32,13 @@ package org.opensearch.action.support.replication; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Map; @@ -114,7 +114,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } // Implements equals and hashcode for testing diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java index 116b46469df21..e235adbc162fc 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -33,9 +33,7 @@ package org.opensearch.action.support.replication; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.TransportActions; import org.opensearch.action.support.broadcast.BroadcastRequest; @@ -46,8 +44,10 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 5b3be4d077f3c..95f998e2d89c2 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -34,12 +34,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.opensearch.core.Assertions; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; -import org.opensearch.action.ActionResponse; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; @@ -59,30 +56,34 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeClosedException; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportChannel; @@ -100,7 +101,7 @@ /** * Base class for requests that should be executed on a primary copy followed by replica copies. * Subclasses can resolve the target shard and provide implementation for primary and replica operations. - * + * <p> * The action samples cluster state on the receiving node to reroute to node with primary copy and on the * primary node to validate request before primary operation followed by sampling state again for resolving * nodes with replica copies to perform replication. @@ -134,6 +135,12 @@ public abstract class TransportReplicationAction< Setting.Property.NodeScope ); + /** + * Making primary and replica actions suffixes as constant + */ + public static final String PRIMARY_ACTION_SUFFIX = "[p]"; + public static final String REPLICA_ACTION_SUFFIX = "[r]"; + protected final ThreadPool threadPool; protected final TransportService transportService; protected final ClusterService clusterService; @@ -195,6 +202,40 @@ protected TransportReplicationAction( String executor, boolean syncGlobalCheckpointAfterOperation, boolean forceExecutionOnPrimary + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + requestReader, + replicaRequestReader, + executor, + syncGlobalCheckpointAfterOperation, + forceExecutionOnPrimary, + null + ); + } + + protected TransportReplicationAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader<Request> requestReader, + Writeable.Reader<ReplicaRequest> replicaRequestReader, + String executor, + boolean syncGlobalCheckpointAfterOperation, + boolean forceExecutionOnPrimary, + AdmissionControlActionType admissionControlActionType ) { super(actionName, actionFilters, transportService.getTaskManager()); this.threadPool = threadPool; @@ -204,8 +245,8 @@ protected TransportReplicationAction( this.shardStateAction = shardStateAction; this.executor = executor; - this.transportPrimaryAction = actionName + "[p]"; - this.transportReplicaAction = actionName + "[r]"; + this.transportPrimaryAction = actionName + PRIMARY_ACTION_SUFFIX; + this.transportReplicaAction = actionName + REPLICA_ACTION_SUFFIX; this.initialRetryBackoffBound = REPLICATION_INITIAL_RETRY_BACKOFF_BOUND.get(settings); this.retryTimeout = REPLICATION_RETRY_TIMEOUT.get(settings); @@ -213,14 +254,8 @@ protected TransportReplicationAction( transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, requestReader, this::handleOperationRequest); - transportService.registerRequestHandler( - transportPrimaryAction, - executor, - forceExecutionOnPrimary, - true, - in -> new ConcreteShardRequest<>(requestReader, in), - this::handlePrimaryRequest - ); + // This method will register Primary Request Handler Based on AdmissionControlActionType + registerPrimaryRequestHandler(requestReader, admissionControlActionType); // we must never reject on because of thread pool capacity on replicas transportService.registerRequestHandler( @@ -241,6 +276,38 @@ protected TransportReplicationAction( clusterSettings.addSettingsUpdateConsumer(REPLICATION_RETRY_TIMEOUT, (v) -> retryTimeout = v); } + /** + * This method will register handler as based on admissionControlActionType and AdmissionControlHandler will be + * invoked for registered action + * @param requestReader instance of the request reader + * @param admissionControlActionType type of AdmissionControlActionType + */ + private void registerPrimaryRequestHandler( + Writeable.Reader<Request> requestReader, + AdmissionControlActionType admissionControlActionType + ) { + if (admissionControlActionType != null) { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + admissionControlActionType, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } else { + transportService.registerRequestHandler( + transportPrimaryAction, + executor, + forceExecutionOnPrimary, + true, + in -> new ConcreteShardRequest<>(requestReader, in), + this::handlePrimaryRequest + ); + } + } + @Override protected void doExecute(Task task, Request request, ActionListener<Response> listener) { assert request.shardId() != null : "request shardId must be set"; @@ -866,7 +933,7 @@ protected IndexShard getIndexShard(final ShardId shardId) { * Responsible for routing and retrying failed operations on the primary. * The actual primary operation is done in {@link ReplicationOperation} on the * node with primary copy. - * + * <p> * Resolves index and shard id for the request before routing it to target node * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index a47d986c74c1e..27f9e6dee83de 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportActions; @@ -46,19 +45,26 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.PrimaryShardClosedException; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,6 +88,7 @@ public abstract class TransportWriteAction< protected final SystemIndices systemIndices; private final Function<IndexShard, String> executorFunction; + private final Tracer tracer; protected TransportWriteAction( Settings settings, @@ -97,7 +104,9 @@ protected TransportWriteAction( Function<IndexShard, String> executorFunction, boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer, + AdmissionControlActionType admissionControlActionType ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -114,11 +123,50 @@ protected TransportWriteAction( replicaRequest, ThreadPool.Names.SAME, true, - forceExecutionOnPrimary + forceExecutionOnPrimary, + admissionControlActionType ); this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; this.systemIndices = systemIndices; + this.tracer = tracer; + } + + protected TransportWriteAction( + Settings settings, + String actionName, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters, + Writeable.Reader<Request> request, + Writeable.Reader<ReplicaRequest> replicaRequest, + Function<IndexShard, String> executorFunction, + boolean forceExecutionOnPrimary, + IndexingPressureService indexingPressureService, + SystemIndices systemIndices, + Tracer tracer + ) { + this( + settings, + actionName, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + request, + replicaRequest, + executorFunction, + forceExecutionOnPrimary, + indexingPressureService, + systemIndices, + tracer, + null + ); } protected String executor(IndexShard shard) { @@ -220,7 +268,12 @@ protected void shardOperationOnPrimary( threadPool.executor(executor).execute(new ActionRunnable<PrimaryResult<ReplicaRequest, Response>>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnPrimary(request, primary, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnPrimary", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnPrimary(request, primary, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -248,7 +301,12 @@ protected void shardOperationOnReplica(ReplicaRequest request, IndexShard replic threadPool.executor(executorFunction.apply(replica)).execute(new ActionRunnable<ReplicaResult>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnReplica(request, replica, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnReplica", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnReplica(request, replica, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -266,7 +324,7 @@ protected abstract void dispatchedShardOperationOnReplica( /** * Result of taking the action on the primary. - * + * <p> * NOTE: public for testing * * @opensearch.internal @@ -496,7 +554,7 @@ void run() { * A proxy for <b>write</b> operations that need to be performed on the * replicas, where a failure to execute the operation should fail * the replica shard and/or mark the replica as stale. - * + * <p> * This extends {@code TransportReplicationAction.ReplicasProxy} to do the * failing and stale-ing. * diff --git a/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequest.java index 7a722ab2a5a35..9422524133dbf 100644 --- a/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -38,9 +38,9 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.shard.ShardId; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java index 6fe7b503e4602..30ebf1fb419bf 100644 --- a/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.action.support.single.instance; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; /** * Request builder for a shard operation diff --git a/server/src/main/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 8014f1b280bf2..21d4ba726e86f 100644 --- a/server/src/main/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -32,8 +32,6 @@ package org.opensearch.action.support.single.instance; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -47,12 +45,14 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.index.IndexNotFoundException; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardOperationRequestBuilder.java index cf93947cd4afe..2c602cbc2d164 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardOperationRequestBuilder.java @@ -32,10 +32,10 @@ package org.opensearch.action.support.single.shard; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; +import org.opensearch.core.action.ActionResponse; /** * Request builder for a single shard operation request diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java index c474096ff94e4..56b34aea8248d 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java @@ -55,7 +55,7 @@ public abstract class SingleShardRequest<Request extends SingleShardRequest<Requ /** * The concrete index name - * + * <p> * Whether index property is optional depends on the concrete implementation. If index property is required the * concrete implementation should use {@link #validateNonNullIndex()} to check if the index property has been set */ diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java index 9a5ce608da472..df91559a2f8cb 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/TransportSingleShardAction.java @@ -33,8 +33,6 @@ package org.opensearch.action.support.single.shard; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionRunnable; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.support.ActionFilters; @@ -52,6 +50,8 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.logging.LoggerMessageFormat; diff --git a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java index 43ece159247bc..c2ae333b17055 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java @@ -34,14 +34,14 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; @@ -128,7 +128,7 @@ public final Request setNodes(String... nodes) { /** * Returns the id of the task that should be processed. - * + * <p> * By default tasks with any ids are returned. */ public TaskId getTaskId() { diff --git a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksResponse.java b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksResponse.java index 3bd126b452f5c..d7b73361ee249 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksResponse.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksResponse.java @@ -33,14 +33,14 @@ package org.opensearch.action.support.tasks; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/action/support/tasks/TasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/tasks/TasksRequestBuilder.java index a15d008fea475..a8bf264828122 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/TasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/TasksRequestBuilder.java @@ -31,11 +31,11 @@ package org.opensearch.action.support.tasks; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; /** * Builder for task-based requests diff --git a/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java index bfd207e6f969f..f33d7161660a3 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.NoSuchNodeException; import org.opensearch.action.TaskOperationFailure; @@ -45,10 +44,12 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NodeShouldNotConnectException; @@ -57,7 +58,6 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java index 80ca1629417ad..8dbf3dd4df512 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsItemResponse.java @@ -32,6 +32,7 @@ package org.opensearch.action.termvectors; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * A single multi term response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsItemResponse implements Writeable { private final TermVectorsResponse response; diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java index c055564c3fcbe..0eef737a54bb3 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequest.java @@ -39,6 +39,7 @@ import org.opensearch.action.RealtimeRequest; import org.opensearch.action.ValidateActions; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ /** * A single multi get request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequest extends ActionRequest implements Iterable<TermVectorsRequest>, diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java index 04dfd39112d6e..6bfa402575885 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsRequestBuilder.java @@ -34,12 +34,14 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; /** * A single multi get request builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder<MultiTermVectorsRequest, MultiTermVectorsResponse> { public MultiTermVectorsRequestBuilder(OpenSearchClient client, MultiTermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java index cc482b5190e32..8a059829dda0e 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java @@ -34,7 +34,8 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,15 +49,17 @@ /** * A multi get response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContentObject { /** * Represents a failure. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Failure implements Writeable { private final String index; private final String id; diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsShardResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsShardResponse.java index 05d949df8db00..674b285a7b3ce 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsShardResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsShardResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.termvectors; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java index 0e0202777794b..89d2f8567b3cb 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsFilter.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; import java.util.HashMap; @@ -49,8 +50,9 @@ * Filter the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsFilter { public static final int DEFAULT_MAX_QUERY_TERMS = 25; public static final int DEFAULT_MIN_TERM_FREQ = 0; @@ -179,8 +181,9 @@ public void setMaxWordLength(int maxWordLength) { /** * Internal score term * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ScoreTerm { public String field; public String word; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index 71200b05d70ad..a761cabb9599a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -40,17 +40,18 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.support.single.shard.SingleShardRequest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; @@ -74,8 +75,9 @@ * Note, the {@link #index()}, and {@link #id(String)} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> implements RealtimeRequest { private static final ParseField INDEX = new ParseField("_index"); private static final ParseField ID = new ParseField("_id"); @@ -118,8 +120,9 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i /** * Internal filter settings * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class FilterSettings { public Integer maxNumTerms; public Integer minTermFreq; @@ -186,7 +189,7 @@ public TermVectorsRequest() {} if (in.readBoolean()) { doc = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -306,7 +309,7 @@ public TermVectorsRequest doc(XContentBuilder documentBuilder) { */ @Deprecated public TermVectorsRequest doc(BytesReference doc, boolean generateRandomId) { - return this.doc(doc, generateRandomId, XContentHelper.xContentType(doc)); + return this.doc(doc, generateRandomId, MediaTypeRegistry.xContentType(doc)); } /** @@ -538,7 +541,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); @@ -572,8 +575,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * The flags. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Flag { // Do not change the order of these flags we use // the ordinal for encoding! Only append to the end! diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java index 02cfff1a6682b..ce68c06206fef 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequestBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.VersionType; @@ -46,8 +47,9 @@ * Note, the {@code index}, {@code type} and {@code id} are * required. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsRequestBuilder extends ActionRequestBuilder<TermVectorsRequest, TermVectorsResponse> { public TermVectorsRequestBuilder(OpenSearchClient client, TermVectorsAction action) { diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java index 8889d72086f47..7ad27808588ae 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsResponse.java @@ -41,13 +41,14 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -62,8 +63,9 @@ * Response returning the term vector (doc frequency, positions, offsets) for a * document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TermVectorsResponse extends ActionResponse implements ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsWriter.java index 0e16291131e2a..b60376d6f7e8d 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsWriter.java @@ -40,8 +40,8 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.termvectors.TermVectorsRequest.Flag; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java index 4f3d0f2a095df..0364f36106cb0 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.termvectors; -import org.opensearch.action.ActionListener; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -42,8 +41,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.util.concurrent.AtomicArray; -import org.opensearch.index.IndexNotFoundException; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 73da857754cfa..a298e267cca37 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -42,9 +42,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.termvectors.TermVectorsService; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java index af6b0e6b8e251..b7e8a29bd4027 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java @@ -32,20 +32,21 @@ package org.opensearch.action.termvectors; -import org.opensearch.action.ActionListener; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.termvectors.TermVectorsService; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; @@ -87,15 +88,24 @@ public TransportTermVectorsAction( @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { + + String preference = request.request().preference; + // For a real time request on a seg rep index, use primary shard as the preferred query shard. + if (request.request().realtime() + && preference == null + && state.getMetadata().isSegmentReplicationEnabled(request.concreteIndex())) { + preference = Preference.PRIMARY.type(); + } + if (request.request().doc() != null && request.request().routing() == null) { // artificial document without routing specified, ignore its "id" and use either random shard or according to preference GroupShardsIterator<ShardIterator> groupShardsIter = clusterService.operationRouting() - .searchShards(state, new String[] { request.concreteIndex() }, null, request.request().preference()); + .searchShards(state, new String[] { request.concreteIndex() }, null, preference); return groupShardsIter.iterator().next(); } return clusterService.operationRouting() - .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()); + .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), preference); } @Override diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 95433bc42c2d3..819112eb497f6 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -32,8 +32,8 @@ package org.opensearch.action.update; +import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.RoutingMissingException; @@ -55,18 +55,21 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -154,10 +157,13 @@ public static void resolveAndValidateRouting(Metadata metadata, String concreteI @Override protected void doExecute(Task task, final UpdateRequest request, final ActionListener<UpdateResponse> listener) { if (request.isRequireAlias() && (clusterService.state().getMetadata().hasAlias(request.index()) == false)) { - throw new IndexNotFoundException( + IndexNotFoundException e = new IndexNotFoundException( "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); + + incDocStatusStats(e); + throw e; } // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -193,7 +199,10 @@ public void onFailure(Exception e) { } private void innerExecute(final Task task, final UpdateRequest request, final ActionListener<UpdateResponse> listener) { - super.doExecute(task, request, listener); + super.doExecute(task, request, ActionListener.wrap(listener::onResponse, e -> { + incDocStatusStats(e); + listener.onFailure(e); + })); } @Override @@ -330,7 +339,13 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< shard.noopUpdate(); } } + + DocStatusStats stats = new DocStatusStats(); + stats.inc(RestStatus.OK); + + indicesService.addDocStatusStats(stats); listener.onResponse(update); + break; default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); @@ -361,4 +376,10 @@ private void handleUpdateFailureWithRetry( } listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } + + private void incDocStatusStats(final Exception e) { + DocStatusStats stats = new DocStatusStats(); + stats.inc(ExceptionsHelper.status(e)); + indicesService.addDocStatusStats(stats); + } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java index a5568a838f21f..19c32f9336df8 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java @@ -42,19 +42,20 @@ import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.DocumentSourceMissingException; import org.opensearch.index.get.GetResult; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; import org.opensearch.script.UpdateScript; @@ -165,7 +166,7 @@ Result prepareUpsert(ShardId shardId, UpdateRequest request, final GetResult get DocWriteResponse.Result.NOOP ); update.setGetResult(getResult); - return new Result(update, DocWriteResponse.Result.NOOP, upsertResult.v2(), XContentType.JSON); + return new Result(update, DocWriteResponse.Result.NOOP, upsertResult.v2(), MediaTypeRegistry.JSON); default: // It's fine to throw an exception here, the leniency is handled/logged by `executeScriptedUpsert` throw new IllegalArgumentException("unknown upsert operation, got: " + upsertResult.v1()); @@ -401,13 +402,13 @@ public static class Result { private final Writeable action; private final DocWriteResponse.Result result; private final Map<String, Object> updatedSourceAsMap; - private final XContentType updateSourceContentType; + private final MediaType updateSourceContentType; public Result( Writeable action, DocWriteResponse.Result result, Map<String, Object> updatedSourceAsMap, - XContentType updateSourceContentType + MediaType updateSourceContentType ) { this.action = action; this.result = result; @@ -428,7 +429,7 @@ public Map<String, Object> updatedSourceAsMap() { return updatedSourceAsMap; } - public XContentType updateSourceContentType() { + public MediaType updateSourceContentType() { return updateSourceContentType; } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index 44454630ff24d..9654bd1c114ba 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -42,24 +42,24 @@ import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.support.single.instance.InstanceShardOperationRequest; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -75,8 +75,9 @@ /** * Transport request for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest> implements DocWriteRequest<UpdateRequest>, @@ -107,12 +108,12 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest> ); PARSER.declareBoolean(UpdateRequest::scriptedUpsert, SCRIPTED_UPSERT_FIELD); PARSER.declareObject((request, builder) -> request.safeUpsertRequest().source(builder), (parser, context) -> { - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(parser.contentType()); builder.copyCurrentStructure(parser); return builder; }, UPSERT_FIELD); PARSER.declareObject((request, builder) -> request.safeDoc().source(builder), (parser, context) -> { - XContentBuilder docBuilder = XContentFactory.contentBuilder(parser.contentType()); + XContentBuilder docBuilder = MediaTypeRegistry.contentBuilder(parser.contentType()); docBuilder.copyCurrentStructure(parser); return docBuilder; }, DOC_FIELD); @@ -590,7 +591,7 @@ public long ifSeqNo() { /** * If set, only perform this update request if the document was last modification was assigned this primary term. - * + * <p> * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ @@ -663,32 +664,32 @@ public UpdateRequest doc(Map<String, Object> source) { /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequest doc(Map<String, Object> source, XContentType contentType) { - safeDoc().source(source, contentType); + public UpdateRequest doc(Map<String, Object> source, MediaType mediaType) { + safeDoc().source(source, mediaType); return this; } /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequest doc(String source, XContentType xContentType) { - safeDoc().source(source, xContentType); + public UpdateRequest doc(String source, MediaType mediaType) { + safeDoc().source(source, mediaType); return this; } /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequest doc(byte[] source, XContentType xContentType) { - safeDoc().source(source, xContentType); + public UpdateRequest doc(byte[] source, MediaType mediaType) { + safeDoc().source(source, mediaType); return this; } /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequest doc(byte[] source, int offset, int length, XContentType xContentType) { - safeDoc().source(source, offset, length, xContentType); + public UpdateRequest doc(byte[] source, int offset, int length, MediaType mediaType) { + safeDoc().source(source, offset, length, mediaType); return this; } @@ -705,8 +706,8 @@ public UpdateRequest doc(Object... source) { * Sets the doc to use for updates when a script is not specified, the doc provided * is a field and value pairs. */ - public UpdateRequest doc(XContentType xContentType, Object... source) { - safeDoc().source(xContentType, source); + public UpdateRequest doc(MediaType mediaType, Object... source) { + safeDoc().source(mediaType, source); return this; } @@ -749,32 +750,32 @@ public UpdateRequest upsert(Map<String, Object> source) { /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequest upsert(Map<String, Object> source, XContentType contentType) { - safeUpsertRequest().source(source, contentType); + public UpdateRequest upsert(Map<String, Object> source, MediaType mediaType) { + safeUpsertRequest().source(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequest upsert(String source, XContentType xContentType) { - safeUpsertRequest().source(source, xContentType); + public UpdateRequest upsert(String source, MediaType mediaType) { + safeUpsertRequest().source(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequest upsert(byte[] source, XContentType xContentType) { - safeUpsertRequest().source(source, xContentType); + public UpdateRequest upsert(byte[] source, MediaType mediaType) { + safeUpsertRequest().source(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequest upsert(byte[] source, int offset, int length, XContentType xContentType) { - safeUpsertRequest().source(source, offset, length, xContentType); + public UpdateRequest upsert(byte[] source, int offset, int length, MediaType mediaType) { + safeUpsertRequest().source(source, offset, length, mediaType); return this; } @@ -791,8 +792,8 @@ public UpdateRequest upsert(Object... source) { * Sets the doc source of the update request to be used when the document does not exists. The doc * includes field and value pairs. */ - public UpdateRequest upsert(XContentType xContentType, Object... source) { - safeUpsertRequest().source(xContentType, source); + public UpdateRequest upsert(MediaType mediaType, Object... source) { + safeUpsertRequest().source(mediaType, source); return this; } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java index 4fdcac0f6927a..d662381ac5e19 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequestBuilder.java @@ -39,8 +39,9 @@ import org.opensearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.script.Script; @@ -49,8 +50,9 @@ /** * Transport request builder for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder> implements WriteRequestBuilder<UpdateRequestBuilder> { @@ -230,7 +232,7 @@ public UpdateRequestBuilder setDoc(Map<String, Object> source) { /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequestBuilder setDoc(Map<String, Object> source, XContentType contentType) { + public UpdateRequestBuilder setDoc(Map<String, Object> source, MediaType contentType) { request.doc(source, contentType); return this; } @@ -238,24 +240,24 @@ public UpdateRequestBuilder setDoc(Map<String, Object> source, XContentType cont /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequestBuilder setDoc(String source, XContentType xContentType) { - request.doc(source, xContentType); + public UpdateRequestBuilder setDoc(String source, MediaType mediaType) { + request.doc(source, mediaType); return this; } /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequestBuilder setDoc(byte[] source, XContentType xContentType) { - request.doc(source, xContentType); + public UpdateRequestBuilder setDoc(byte[] source, MediaType mediaType) { + request.doc(source, mediaType); return this; } /** * Sets the doc to use for updates when a script is not specified. */ - public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, XContentType xContentType) { - request.doc(source, offset, length, xContentType); + public UpdateRequestBuilder setDoc(byte[] source, int offset, int length, MediaType mediaType) { + request.doc(source, offset, length, mediaType); return this; } @@ -272,8 +274,8 @@ public UpdateRequestBuilder setDoc(Object... source) { * Sets the doc to use for updates when a script is not specified, the doc provided * is a field and value pairs. */ - public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) { - request.doc(xContentType, source); + public UpdateRequestBuilder setDoc(MediaType mediaType, Object... source) { + request.doc(mediaType, source); return this; } @@ -305,32 +307,32 @@ public UpdateRequestBuilder setUpsert(Map<String, Object> source) { /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequestBuilder setUpsert(Map<String, Object> source, XContentType contentType) { - request.upsert(source, contentType); + public UpdateRequestBuilder setUpsert(Map<String, Object> source, MediaType mediaType) { + request.upsert(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequestBuilder setUpsert(String source, XContentType xContentType) { - request.upsert(source, xContentType); + public UpdateRequestBuilder setUpsert(String source, MediaType mediaType) { + request.upsert(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequestBuilder setUpsert(byte[] source, XContentType xContentType) { - request.upsert(source, xContentType); + public UpdateRequestBuilder setUpsert(byte[] source, MediaType mediaType) { + request.upsert(source, mediaType); return this; } /** * Sets the doc source of the update request to be used when the document does not exists. */ - public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, XContentType xContentType) { - request.upsert(source, offset, length, xContentType); + public UpdateRequestBuilder setUpsert(byte[] source, int offset, int length, MediaType mediaType) { + request.upsert(source, offset, length, mediaType); return this; } @@ -347,8 +349,8 @@ public UpdateRequestBuilder setUpsert(Object... source) { * Sets the doc source of the update request to be used when the document does not exists. The doc * includes field and value pairs. */ - public UpdateRequestBuilder setUpsert(XContentType xContentType, Object... source) { - request.upsert(xContentType, source); + public UpdateRequestBuilder setUpsert(MediaType mediaType, Object... source) { + request.upsert(mediaType, source); return this; } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java index 61767abb99cb9..c7ee1742af0f2 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateResponse.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateResponse.java @@ -33,13 +33,14 @@ package org.opensearch.action.update; import org.opensearch.action.DocWriteResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.get.GetResult; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; @@ -48,8 +49,9 @@ /** * Transport response for updating an index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class UpdateResponse extends DocWriteResponse { private static final String GET = "get"; @@ -173,8 +175,9 @@ public static void parseXContentFields(XContentParser parser, Builder context) t * temporarily store the parsed values, then the {@link DocWriteResponse.Builder#build()} method is called to * instantiate the {@link UpdateResponse}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder extends DocWriteResponse.Builder { private GetResult getResult = null; diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 2a23b501a8a0e..4e167d10b99fa 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -53,10 +53,10 @@ import org.opensearch.common.network.IfConfig; import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.common.settings.SecureSettings; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.settings.SecureString; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.env.Environment; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.os.OsProbe; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java index 429612ba1b93d..a695486bd084c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java @@ -32,18 +32,22 @@ package org.opensearch.bootstrap; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** * Encapsulates a bootstrap check. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BootstrapCheck { /** * Encapsulate the result of a bootstrap check. */ + @PublicApi(since = "1.0.0") final class BootstrapCheckResult { private final String message; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index c27c149947444..485dd43a5999c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -42,8 +42,8 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; @@ -73,7 +73,7 @@ /** * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code - * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and + * opensearch.enforce.bootstrap.checks} is set to {@code true}. In this case we assume the node is running in production and * all bootstrap checks must pass. * * @opensearch.internal @@ -718,6 +718,7 @@ public final BootstrapCheckResult check(BootstrapContext context) { return BootstrapCheckResult.success(); } + @SuppressWarnings("removal") boolean isAllPermissionGranted() { final SecurityManager sm = System.getSecurityManager(); assert sm != null; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java index 1cfd8bf6dfc35..a7ffd701d07f0 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java @@ -32,14 +32,16 @@ package org.opensearch.bootstrap; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; /** * Context that is passed to every bootstrap check to make decisions on. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BootstrapContext { /** * The node's environment diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java index 0aa965ce46096..52dd5d710eedc 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapInfo.java @@ -128,6 +128,7 @@ public Object remove(Object key) { /** * Returns a read-only view of all system properties */ + @SuppressWarnings("removal") public static Dictionary<Object, Object> getSystemProperties() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java index 8e556df4b2f9b..91da34fb7216d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java @@ -141,7 +141,7 @@ public boolean callback(long dwCtrlType) { /** * Memory protection constraints - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx */ public static final int PAGE_NOACCESS = 0x0001; @@ -151,7 +151,7 @@ public boolean callback(long dwCtrlType) { /** * Contains information about a range of pages in the virtual address space of a process. * The VirtualQuery and VirtualQueryEx functions use this structure. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx */ public static class MemoryBasicInformation extends Structure { @@ -186,7 +186,7 @@ public SizeT() { /** * Locks the specified region of the process's virtual address space into physical * memory, ensuring that subsequent access to the region will not incur a page fault. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx * * @param address A pointer to the base address of the region of pages to be locked. @@ -197,7 +197,7 @@ public SizeT() { /** * Retrieves information about a range of pages within the virtual address space of a specified process. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx * * @param handle A handle to the process whose memory information is queried. @@ -210,7 +210,7 @@ public SizeT() { /** * Sets the minimum and maximum working set sizes for the specified process. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx * * @param handle A handle to the process whose working set sizes is to be set. @@ -222,7 +222,7 @@ public SizeT() { /** * Retrieves a pseudo handle for the current process. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx * * @return a pseudo handle to the current process. @@ -231,7 +231,7 @@ public SizeT() { /** * Closes an open object handle. - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx * * @param handle A valid handle to an open object. @@ -252,7 +252,7 @@ public SizeT() { /** * Creates or opens a new job object - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx * * @param jobAttributes security attributes @@ -263,7 +263,7 @@ public SizeT() { /** * Associates a process with an existing job - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx * * @param job job handle @@ -274,7 +274,7 @@ public SizeT() { /** * Basic limit information for a job object - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx */ public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { @@ -316,7 +316,7 @@ protected List<String> getFieldOrder() { /** * Get job limit and state information - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx * * @param job job handle @@ -330,7 +330,7 @@ protected List<String> getFieldOrder() { /** * Set job limit and state information - * + * <p> * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx * * @param job job handle diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index ab52ae5a43a2a..8eb4f841b9671 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -83,6 +83,7 @@ class OpenSearch extends EnvironmentAwareCommand { /** * Main entry point for starting opensearch */ + @SuppressWarnings("removal") public static void main(final String[] args) throws Exception { overrideDnsCachePolicyProperties(); /* @@ -188,9 +189,9 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, Envi /** * Required method that's called by Apache Commons procrun when * running as a service on Windows, when the service is stopped. - * + * <p> * http://commons.apache.org/proper/commons-daemon/procrun.html - * + * <p> * NOTE: If this method is renamed and/or moved, make sure to * update opensearch-service.bat! */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java index 14435db64274c..4571eb35ca93c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchPolicy.java @@ -53,6 +53,7 @@ * * @opensearch.internal **/ +@SuppressWarnings("removal") final class OpenSearchPolicy extends Policy { /** template policy file, the one used in tests */ diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java index 2b28260097ce1..5f9a01436b4cb 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearchUncaughtExceptionHandler.java @@ -98,6 +98,7 @@ void onNonFatalUncaught(final String threadName, final Throwable t) { Terminal.DEFAULT.flush(); } + @SuppressWarnings("removal") void halt(int status) { AccessController.doPrivileged(new PrivilegedHaltAction(status)); } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 749c146de4f16..53b1d990f9a0c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -66,6 +66,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; @@ -119,7 +121,10 @@ * * @opensearch.internal */ +@SuppressWarnings("removal") final class Security { + private static final Pattern CODEBASE_JAR_WITH_CLASSIFIER = Pattern.compile("^(.+)-\\d+\\.\\d+[^-]*.*?[-]?([^-]+)?\\.jar$"); + /** no instantiation */ private Security() {} @@ -230,33 +235,45 @@ static Policy readPolicy(URL policyFile, Map<String, URL> codebases) { try { List<String> propertiesSet = new ArrayList<>(); try { + final Map<Map.Entry<String, URL>, String> jarsWithPossibleClassifiers = new HashMap<>(); // set codebase properties for (Map.Entry<String, URL> codebase : codebases.entrySet()) { - String name = codebase.getKey(); - URL url = codebase.getValue(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); // We attempt to use a versionless identifier for each codebase. This assumes a specific version // format in the jar filename. While we cannot ensure all jars in all plugins use this format, nonconformity // only means policy grants would need to include the entire jar filename as they always have before. + final Matcher matcher = CODEBASE_JAR_WITH_CLASSIFIER.matcher(name); + if (matcher.matches() && matcher.group(2) != null) { + // There is a JAR that, possibly, has a classifier or SNAPSHOT at the end, examples are: + // - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar + // - kafka-server-common-3.6.1-test.jar + // - lucene-core-9.11.0-snapshot-8a555eb.jar + // - zstd-jni-1.5.5-5.jar + jarsWithPossibleClassifiers.put(codebase, matcher.group(2)); + } else { + String property = "codebase." + name; + String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); + } + } + + // set codebase properties for JARs that might present with classifiers + for (Map.Entry<Map.Entry<String, URL>, String> jarWithPossibleClassifier : jarsWithPossibleClassifiers.entrySet()) { + final Map.Entry<String, URL> codebase = jarWithPossibleClassifier.getKey(); + final String name = codebase.getKey(); + final URL url = codebase.getValue(); + String property = "codebase." + name; String aliasProperty = "codebase." + name.replaceFirst("-\\d+\\.\\d+.*\\.jar", ""); - if (aliasProperty.equals(property) == false) { - propertiesSet.add(aliasProperty); - String previous = System.setProperty(aliasProperty, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() - ); - } - } - propertiesSet.add(property); - String previous = System.setProperty(property, url.toString()); - if (previous != null) { - throw new IllegalStateException( - "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() - ); + if (System.getProperties().containsKey(aliasProperty)) { + aliasProperty = aliasProperty + "@" + jarWithPossibleClassifier.getValue(); } + + addCodebaseToSystemProperties(propertiesSet, url, property, aliasProperty); } + return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); } finally { // clear codebase properties @@ -269,6 +286,27 @@ static Policy readPolicy(URL policyFile, Map<String, URL> codebases) { } } + /** adds the codebase to properties and System properties */ + @SuppressForbidden(reason = "accesses System properties to configure codebases") + private static void addCodebaseToSystemProperties(List<String> propertiesSet, final URL url, String property, String aliasProperty) { + if (aliasProperty.equals(property) == false) { + propertiesSet.add(aliasProperty); + String previous = System.setProperty(aliasProperty, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + aliasProperty + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + propertiesSet.add(property); + String previous = System.setProperty(property, url.toString()); + if (previous != null) { + throw new IllegalStateException( + "codebase property already set: " + property + " -> " + previous + ", cannot set to " + url.toString() + ); + } + } + /** returns dynamic Permissions to configured paths and bind ports */ static Permissions createPermissions(Environment environment) throws IOException { Permissions policy = new Permissions(); diff --git a/server/src/main/java/org/opensearch/client/AdminClient.java b/server/src/main/java/org/opensearch/client/AdminClient.java index 0c6c97b795983..1a5a39be4241a 100644 --- a/server/src/main/java/org/opensearch/client/AdminClient.java +++ b/server/src/main/java/org/opensearch/client/AdminClient.java @@ -32,13 +32,16 @@ package org.opensearch.client; +import org.opensearch.common.annotation.PublicApi; + /** * Administrative actions/operations against the cluster or the indices. * * @see org.opensearch.client.Client#admin() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AdminClient { /** diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 6d9b1f7adb64e..322b435bdf35c 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -84,10 +84,13 @@ import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.Nullable; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; import java.util.Map; @@ -95,15 +98,16 @@ * A client provides a one stop interface for performing actions/operations against the cluster. * <p> * All operations performed are asynchronous by nature. Each action/operation has two flavors, the first - * simply returns an {@link org.opensearch.action.ActionFuture}, while the second accepts an + * simply returns an {@link ActionFuture}, while the second accepts an * {@link ActionListener}. * <p> * A client can be retrieved from a started {@link org.opensearch.node.Node}. * * @see org.opensearch.node.Node#client() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Client extends OpenSearchClient, Releasable { Setting<String> CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { @@ -464,6 +468,18 @@ public interface Client extends OpenSearchClient, Releasable { */ void fieldCaps(FieldCapabilitiesRequest request, ActionListener<FieldCapabilitiesResponse> listener); + /** Search a view */ + void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener); + + /** Search a view */ + ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request); + + /** List all view names */ + void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener); + + /** List all view names */ + ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request); + /** * Returns this clients settings */ diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index f3c04b23dfd54..05f09c1a6e661 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; @@ -59,9 +57,6 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequest; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -76,6 +71,9 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequest; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; @@ -158,17 +156,21 @@ import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaType; /** * Administrative actions/operations against indices. * * @see AdminClient#cluster() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterAdminClient extends OpenSearchClient { /** @@ -669,7 +671,7 @@ public interface ClusterAdminClient extends OpenSearchClient { /** * Stores an ingest pipeline */ - PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, XContentType xContentType); + PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, MediaType mediaType); /** * Deletes a stored ingest pipeline @@ -719,7 +721,7 @@ public interface ClusterAdminClient extends OpenSearchClient { /** * Simulates an ingest pipeline */ - SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType); + SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, MediaType mediaType); /** * Explain the allocation of a shard diff --git a/server/src/main/java/org/opensearch/client/FilterClient.java b/server/src/main/java/org/opensearch/client/FilterClient.java index d121fdd77dfeb..2ce7146a794cb 100644 --- a/server/src/main/java/org/opensearch/client/FilterClient.java +++ b/server/src/main/java/org/opensearch/client/FilterClient.java @@ -31,12 +31,12 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.client.support.AbstractClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 217902a2600e8..588584cd8a280 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -87,13 +85,13 @@ import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequestBuilder; import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequest; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequestBuilder; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.resolve.ResolveIndexAction; import org.opensearch.action.admin.indices.rollover.RolloverRequest; import org.opensearch.action.admin.indices.rollover.RolloverRequestBuilder; import org.opensearch.action.admin.indices.rollover.RolloverResponse; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequest; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequestBuilder; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; @@ -127,17 +125,24 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionListener; /** * Administrative actions/operations against indices. * * @see AdminClient#indices() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndicesAdminClient extends OpenSearchClient { /** @@ -836,4 +841,28 @@ public interface IndicesAdminClient extends OpenSearchClient { * Resolves names and wildcard expressions to indices, aliases, and data streams */ ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request); + + /** Create a view */ + void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Create a view */ + ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request); + + /** Get the details of a view */ + void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Get the details of a view */ + ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request); + + /** Delete a view */ + void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener); + + /** Delete a view */ + ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request); + + /** Update a view */ + void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener); + + /** Update a view */ + ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request); } diff --git a/server/src/main/java/org/opensearch/client/OpenSearchClient.java b/server/src/main/java/org/opensearch/client/OpenSearchClient.java index 755aca1f573e0..22b6436c93033 100644 --- a/server/src/main/java/org/opensearch/client/OpenSearchClient.java +++ b/server/src/main/java/org/opensearch/client/OpenSearchClient.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/client/OriginSettingClient.java b/server/src/main/java/org/opensearch/client/OriginSettingClient.java index 41fe9741cc4e7..1b0e08cc489c4 100644 --- a/server/src/main/java/org/opensearch/client/OriginSettingClient.java +++ b/server/src/main/java/org/opensearch/client/OriginSettingClient.java @@ -32,12 +32,12 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import java.util.function.Supplier; diff --git a/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java b/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java index 58ea99fa96bc3..17fda113b2fdc 100644 --- a/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java +++ b/server/src/main/java/org/opensearch/client/ParentTaskAssigningClient.java @@ -32,13 +32,13 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; /** * A {@linkplain Client} that sets the parent task on all requests that it makes. Use this to conveniently implement actions that cause diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index cad5bac8acf0d..3607590826007 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -50,9 +50,9 @@ import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; -import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -82,6 +82,8 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; /** * A handy one stop shop for creating requests (make sure to import static this class). @@ -98,7 +100,7 @@ public class Requests { /** * The default content type to use to generate source documents when indexing. */ - public static XContentType INDEX_CONTENT_TYPE = XContentType.JSON; + public static MediaType INDEX_CONTENT_TYPE = MediaTypeRegistry.JSON; public static IndexRequest indexRequest() { return new IndexRequest(); diff --git a/server/src/main/java/org/opensearch/client/node/NodeClient.java b/server/src/main/java/org/opensearch/client/node/NodeClient.java index 60551ade09416..6e1bb6ce79349 100644 --- a/server/src/main/java/org/opensearch/client/node/NodeClient.java +++ b/server/src/main/java/org/opensearch/client/node/NodeClient.java @@ -32,17 +32,17 @@ package org.opensearch.client.node; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.support.TransportAction; import org.opensearch.client.Client; import org.opensearch.client.support.AbstractClient; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskListener; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 40489e29ed9b5..6c6049f04231b 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -34,10 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; @@ -73,10 +70,6 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsAction; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequest; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; @@ -96,6 +89,10 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsAction; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequest; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; @@ -259,15 +256,15 @@ import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.admin.indices.refresh.RefreshRequestBuilder; import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsAction; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequest; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequestBuilder; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.resolve.ResolveIndexAction; import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.RolloverRequest; import org.opensearch.action.admin.indices.rollover.RolloverRequestBuilder; import org.opensearch.action.admin.indices.rollover.RolloverResponse; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsAction; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequest; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsRequestBuilder; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; @@ -315,6 +312,12 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; import org.opensearch.action.bulk.BulkAction; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; @@ -371,6 +374,7 @@ import org.opensearch.action.search.DeleteSearchPipelineRequest; import org.opensearch.action.search.GetAllPitNodesRequest; import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.GetSearchPipelineAction; import org.opensearch.action.search.GetSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineResponse; @@ -378,7 +382,6 @@ import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; -import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.PutSearchPipelineAction; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.search.SearchAction; @@ -410,11 +413,14 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.threadpool.ThreadPool; import java.util.Map; @@ -742,6 +748,26 @@ public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } + @Override + public void searchView(final SearchViewAction.Request request, final ActionListener<SearchResponse> listener) { + execute(SearchViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<SearchResponse> searchView(final SearchViewAction.Request request) { + return execute(SearchViewAction.INSTANCE, request); + } + + @Override + public void listViewNames(final ListViewNamesAction.Request request, ActionListener<ListViewNamesAction.Response> listener) { + execute(ListViewNamesAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture<ListViewNamesAction.Response> listViewNames(final ListViewNamesAction.Request request) { + return execute(ListViewNamesAction.INSTANCE, request); + } + static class Admin implements AdminClient { private final ClusterAdmin clusterAdmin; @@ -1224,8 +1250,8 @@ public ActionFuture<AcknowledgedResponse> putPipeline(PutPipelineRequest request } @Override - public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, XContentType xContentType) { - return new PutPipelineRequestBuilder(this, PutPipelineAction.INSTANCE, id, source, xContentType); + public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, MediaType mediaType) { + return new PutPipelineRequestBuilder(this, PutPipelineAction.INSTANCE, id, source, mediaType); } @Override @@ -1274,8 +1300,8 @@ public ActionFuture<SimulatePipelineResponse> simulatePipeline(SimulatePipelineR } @Override - public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, XContentType xContentType) { - return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source, xContentType); + public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source, MediaType mediaType) { + return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source, mediaType); } @Override @@ -2070,6 +2096,46 @@ public void resolveIndex(ResolveIndexAction.Request request, ActionListener<Reso public ActionFuture<ResolveIndexAction.Response> resolveIndex(ResolveIndexAction.Request request) { return execute(ResolveIndexAction.INSTANCE, request); } + + @Override + public void createView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(CreateViewAction.INSTANCE, request); + } + + @Override + public ActionFuture<GetViewAction.Response> createView(CreateViewAction.Request request) { + return execute(CreateViewAction.INSTANCE, request); + } + + /** Gets a view */ + public void getView(GetViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(GetViewAction.INSTANCE, request, listener); + } + + /** Gets a view */ + public ActionFuture<GetViewAction.Response> getView(GetViewAction.Request request) { + return execute(GetViewAction.INSTANCE, request); + } + + /** Create a view */ + public void deleteView(DeleteViewAction.Request request, ActionListener<AcknowledgedResponse> listener) { + execute(DeleteViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<AcknowledgedResponse> deleteView(DeleteViewAction.Request request) { + return execute(DeleteViewAction.INSTANCE, request); + } + + /** Create a view */ + public void updateView(CreateViewAction.Request request, ActionListener<GetViewAction.Response> listener) { + execute(UpdateViewAction.INSTANCE, request, listener); + } + + /** Create a view */ + public ActionFuture<GetViewAction.Response> updateView(CreateViewAction.Request request) { + return execute(UpdateViewAction.INSTANCE, request); + } } @Override diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java index 482087be1c8eb..28e1e7c53cb9c 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java @@ -44,7 +44,7 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener /** * Called to determine which nodes the acknowledgement is expected from. - * + * <p> * As this method will be called multiple times to determine the set of acking nodes, * it is crucial for it to return consistent results: Given the same listener instance * and the same node parameter, the method implementation should return the same result. diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java index 21c4460964067..dfca0edcfbf5f 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateUpdateTask.java @@ -31,12 +31,12 @@ package org.opensearch.cluster; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ack.AckedRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; /** * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index 236da42f94506..904083e96032f 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -37,8 +37,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.gateway.GatewayService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; +import org.opensearch.gateway.GatewayService; import java.util.ArrayList; import java.util.Collections; @@ -52,8 +53,9 @@ /** * An event received by the local node, signaling that the cluster state has changed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterChangedEvent { private final String source; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 3793b5094a4cb..4c38d6fd99f5d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -34,13 +34,14 @@ import org.opensearch.Version; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.remote.filecache.FileCacheStats; import java.io.IOException; @@ -56,8 +57,9 @@ * <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code> * for the key used in the shardSizes map * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterInfo implements ToXContentFragment, Writeable { private final Map<String, DiskUsage> leastAvailableSpaceUsage; private final Map<String, DiskUsage> mostAvailableSpaceUsage; @@ -110,7 +112,7 @@ public ClusterInfo(StreamInput in) throws IOException { this.shardSizes = Collections.unmodifiableMap(sizeMap); this.routingToDataPath = Collections.unmodifiableMap(routingMap); this.reservedSpace = Collections.unmodifiableMap(reservedSpaceMap); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { this.nodeFileCacheStats = in.readMap(StreamInput::readString, FileCacheStats::new); } else { this.nodeFileCacheStats = Map.of(); @@ -124,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> out.writeLong(v == null ? -1 : v)); out.writeMap(this.routingToDataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); out.writeMap(this.reservedSpace, (o, v) -> v.writeTo(o), (o, v) -> v.writeTo(o)); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeMap(this.nodeFileCacheStats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); } } @@ -287,8 +289,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * Represents the total amount of "reserved" space on a particular data path, together with the set of shards considered. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ReservedSpace implements Writeable { public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>()); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index b80fd1d746831..d2f4888ae8971 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.metadata.MetadataUpdateSettingsService; import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.ViewMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.DelayedAllocationService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -59,8 +60,8 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; -import org.opensearch.cluster.routing.allocation.decider.ConcurrentRecoveriesAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.ConcurrentRecoveriesAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.FilterAllocationDecider; @@ -77,17 +78,17 @@ import org.opensearch.cluster.routing.allocation.decider.TargetPoolAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.ParseField; import org.opensearch.common.inject.AbstractModule; -import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; -import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; +import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.ingest.IngestMetadata; @@ -195,6 +196,7 @@ public static List<Entry> getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom ); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); + registerMetadataCustom(entries, ViewMetadata.TYPE, ViewMetadata::new, ViewMetadata::readDiffFrom); registerMetadataCustom(entries, WeightedRoutingMetadata.TYPE, WeightedRoutingMetadata::new, WeightedRoutingMetadata::readDiffFrom); registerMetadataCustom( entries, @@ -292,6 +294,7 @@ public static List<NamedXContentRegistry.Entry> getNamedXWriteables() { DataStreamMetadata::fromXContent ) ); + entries.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(ViewMetadata.TYPE), ViewMetadata::fromXContent)); entries.add( new NamedXContentRegistry.Entry( Metadata.Custom.class, @@ -359,7 +362,7 @@ public static Collection<AllocationDecider> createAllocationDeciders( addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new ConcurrentRecoveriesAllocationDecider(settings, clusterSettings)); addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new NodeVersionAllocationDecider()); + addAllocationDecider(deciders, new NodeVersionAllocationDecider(settings)); addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider()); addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterName.java b/server/src/main/java/org/opensearch/cluster/ClusterName.java index 440fde284afb8..d6149421c3fd0 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterName.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterName.java @@ -32,11 +32,12 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; import java.io.IOException; import java.util.Objects; @@ -45,8 +46,9 @@ /** * Cluster Name * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterName implements Writeable { public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "opensearch", (s) -> { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java index ee5c8c00dfaf4..3dc764cb5d520 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java @@ -9,10 +9,10 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index bf4d7977222ab..9e63f961d241d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -49,15 +49,16 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.VersionedNamedWriteable; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.Discovery; @@ -97,8 +98,9 @@ * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterState implements ToXContentFragment, Diffable<ClusterState> { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); @@ -139,8 +141,9 @@ static <T extends VersionedNamedWriteable & FeatureAware> boolean shouldSerializ /** * Custom cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable<Custom>, ToXContentFragment, FeatureAware { /** @@ -596,8 +599,9 @@ public static Builder builder(ClusterState state) { /** * Builder for cluster state. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final ClusterName clusterName; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java index 140e6426bb801..d7702729ae884 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateApplier.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; /** * A component that is in charge of applying an incoming cluster state to the node internal data structures. * The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateApplier { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java index 01a8e51a3d13e..57fcfcad099ed 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateListener.java @@ -32,11 +32,14 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * A listener to be notified when a cluster state changes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java index 9a4b708548a7d..149d93a158007 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskConfig.java @@ -33,13 +33,15 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Cluster state update task configuration for timeout and priority * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskConfig { /** * The timeout for this cluster state update task configuration. If diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 50beeb1f03deb..115816798959e 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.IdentityHashMap; import java.util.List; @@ -41,8 +42,9 @@ /** * Interface that updates the cluster state based on the task * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskExecutor<T> { /** * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state @@ -70,7 +72,7 @@ default boolean runOnlyOnMaster() { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. - * + * <p> * Note that this method will be executed using system context. * * @param clusterChangedEvent the change event for this cluster state change, containing @@ -80,7 +82,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). - * + * <p> * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. * This allows groupd task description but the submitting source. @@ -103,8 +105,9 @@ default ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey * Represents the result of a batched execution of cluster state update tasks * @param <T> the type of the cluster state update task * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ClusterTasksResult<T> { @Nullable public final ClusterState resultingState; @@ -127,8 +130,9 @@ public static <T> Builder<T> builder() { /** * Builder for cluster state task. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder<T> { private final Map<T, TaskResult> executionResults = new IdentityHashMap<>(); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index d6c4abfad7b8d..0cb24bd3f3eab 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -32,14 +32,16 @@ package org.opensearch.cluster; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.common.annotation.PublicApi; import java.util.List; /** * Interface to implement a cluster state change listener * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStateTaskListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java index 9225914a931b2..ae6626dd4785d 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateUpdateTask.java @@ -34,6 +34,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.List; @@ -41,8 +42,9 @@ /** * A task that can update the cluster state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, diff --git a/server/src/main/java/org/opensearch/cluster/Diff.java b/server/src/main/java/org/opensearch/cluster/Diff.java index c0e8e7038d9b4..77301b7c04b66 100644 --- a/server/src/main/java/org/opensearch/cluster/Diff.java +++ b/server/src/main/java/org/opensearch/cluster/Diff.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; /** * Represents difference between states of cluster state parts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Diff<T> extends Writeable { /** diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index dd2232968114e..a38fc81bebc08 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -182,7 +182,7 @@ public Map<K, T> apply(Map<K, T> map) { /** * Represents differences between two maps of objects and is used as base class for different map implementations. - * + * <p> * Implements serialization. How differences are applied is left to subclasses. * * @param <K> the type of map keys @@ -381,9 +381,9 @@ public Integer readKey(StreamInput in) throws IOException { /** * Provides read and write operations to serialize map values. * Reading of values can be made dependent on map key. - * + * <p> * Also provides operations to distinguish whether map values are diffable. - * + * <p> * Should not be directly implemented, instead implement either * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}. * @@ -517,7 +517,7 @@ public Diff<V> readDiff(StreamInput in, K key) throws IOException { /** * Implementation of the ValueSerializer that wraps value and diff readers. - * + * <p> * Note: this implementation is ignoring the key. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/DiskUsage.java b/server/src/main/java/org/opensearch/cluster/DiskUsage.java index 961bfce053243..33ed030c58a02 100644 --- a/server/src/main/java/org/opensearch/cluster/DiskUsage.java +++ b/server/src/main/java/org/opensearch/cluster/DiskUsage.java @@ -32,11 +32,12 @@ package org.opensearch.cluster; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,8 +47,9 @@ /** * Encapsulation class used to represent the amount of disk used on a node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiskUsage implements ToXContentFragment, Writeable { final String nodeId; final String nodeName; diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 9c12d6bb3e7ea..e381b8f244bf3 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; @@ -57,6 +56,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.store.StoreStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -82,7 +82,7 @@ * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node. * Listens for changes in the number of data nodes and immediately submits a * ClusterInfoUpdateJob if a node has been added. - * + * <p> * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. * diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java index c07dcc5daaee6..0b2ed7ef66d43 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeClusterManagerListener.java @@ -31,12 +31,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LocalNodeClusterManagerListener extends ClusterStateListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java index 31c0b294b8004..2487aaf0d7c51 100644 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java @@ -31,14 +31,17 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.DeprecatedApi; + /** * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local * node cease being a cluster-manager). * - * @opensearch.internal + * @opensearch.api * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} */ @Deprecated +@DeprecatedApi(since = "2.2.0") public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { /** diff --git a/server/src/main/java/org/opensearch/cluster/NamedDiff.java b/server/src/main/java/org/opensearch/cluster/NamedDiff.java index ce971aa723394..e994cfb224386 100644 --- a/server/src/main/java/org/opensearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/opensearch/cluster/NamedDiff.java @@ -33,13 +33,15 @@ package org.opensearch.cluster; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; /** * Diff that also support NamedWriteable interface * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface NamedDiff<T extends Diffable<T>> extends Diff<T>, NamedWriteable { /** * The minimal version of the recipient this custom object can be sent to diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 0014d5c61fb2d..1c12c260b3929 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainListenableActionFuture; import org.opensearch.cluster.coordination.FollowersChecker; @@ -44,12 +43,14 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.common.Nullable; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -81,8 +82,9 @@ * This component does not block on disconnections at all, because a disconnection might need to wait for an ongoing (background) connection * attempt to complete first. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeConnectionsService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index c399bd59dbbe1..72a3519aca6f8 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -33,11 +33,11 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.repositories.RepositoryOperation; @@ -110,7 +110,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index ce7e4f6d918be..769f97373f7b7 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -34,12 +34,13 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.Snapshot; import java.io.IOException; @@ -54,8 +55,9 @@ /** * Meta data about restore processes that are currently executing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInProgress extends AbstractNamedDiffable<Custom> implements Custom, Iterable<RestoreInProgress.Entry> { /** @@ -139,8 +141,9 @@ public RestoreInProgress build() { /** * Restore metadata * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { private final String uuid; private final State state; @@ -189,7 +192,7 @@ public Snapshot snapshot() { /** * Returns list of shards that being restore and their status * - * @return list of shards + * @return map of shard id to shard restore status */ public Map<ShardId, ShardRestoreStatus> shards() { return this.shards; @@ -238,8 +241,9 @@ public int hashCode() { /** * Represents status of a restored shard * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ShardRestoreStatus implements Writeable { private State state; private String nodeId; @@ -363,8 +367,9 @@ public int hashCode() { /** * Shard restore process state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { /** * Initializing state diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java index 1108a595cd5ac..e33245e02f75c 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotDeletionsInProgress.java @@ -35,10 +35,10 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.UUIDs; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.repositories.RepositoryOperation; import org.opensearch.snapshots.SnapshotId; diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 855f7755419d8..3de23d2490c63 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -35,15 +35,16 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoryOperation; import org.opensearch.repositories.RepositoryShardId; @@ -699,7 +700,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override @@ -928,8 +929,9 @@ public String toString() { /** * State of the snapshots. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INIT((byte) 0, false), STARTED((byte) 1, false), diff --git a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java index eb31fa2b7e69d..f0fa5af64d157 100644 --- a/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java +++ b/server/src/main/java/org/opensearch/cluster/TimeoutClusterStateListener.java @@ -32,13 +32,15 @@ package org.opensearch.cluster; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * An exception to cluster state listener that allows for timeouts and for post added notifications. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TimeoutClusterStateListener extends ClusterStateListener { void postAdded(); diff --git a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java index db26496c6f263..837bf8af449ad 100644 --- a/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ack/ClusterStateUpdateResponse.java @@ -32,11 +32,14 @@ package org.opensearch.cluster.ack; +import org.opensearch.common.annotation.PublicApi; + /** * Base response returned after a cluster state update * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterStateUpdateResponse { private final boolean acknowledged; diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index c589d9bfeeab2..0b569901d0da1 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.action.index; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -48,8 +47,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.UncategorizedExecutionException; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.Mapping; import java.util.concurrent.Semaphore; @@ -153,7 +153,7 @@ int blockedThreads() { protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListener<Void> listener) { PutMappingRequest putMappingRequest = new PutMappingRequest(); putMappingRequest.setConcreteIndex(index); - putMappingRequest.source(mappingUpdate.toString(), XContentType.JSON); + putMappingRequest.source(mappingUpdate.toString(), MediaTypeRegistry.JSON); putMappingRequest.clusterManagerNodeTimeout(dynamicMappingUpdateTimeout); putMappingRequest.timeout(TimeValue.ZERO); client.execute( diff --git a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java index e6781fc22e1a7..fe4793171b428 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java @@ -42,13 +42,13 @@ import org.opensearch.common.inject.Inject; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 9d9b6c52f6b25..cb5749a91d448 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -35,9 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; +import org.opensearch.OpenSearchException; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterManagerNodeChangePredicate; import org.opensearch.cluster.ClusterState; @@ -58,11 +57,13 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -74,7 +75,6 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestDeduplicator; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java index 75b73be8fa12e..b4d797a39dd2f 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributeValueHealth.java @@ -14,11 +14,12 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.WeightedRouting; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -31,7 +32,10 @@ /** * Cluster Awareness AttributeValue Health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributeValueHealth implements Writeable, ToXContentFragment { private static final String ACTIVE_SHARDS = "active_shards"; @@ -293,7 +297,7 @@ private void setWeightInfo(ClusterState clusterState) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java index 340fcfe0d0d31..3de2260d0e8bd 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealth.java @@ -11,11 +11,12 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -34,7 +35,9 @@ /** * Cluster Awareness health information * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessAttributesHealth implements Iterable<ClusterAwarenessAttributeValueHealth>, Writeable, ToXContentFragment { private final String awarenessAttributeName; @@ -279,7 +282,7 @@ public static ClusterAwarenessAttributesHealth fromXContent(XContentParser parse @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java index 85c5ee60e3ba5..841764110626d 100644 --- a/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java +++ b/server/src/main/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealth.java @@ -11,11 +11,12 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -30,7 +31,10 @@ /** * Cluster state Awareness health information + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterAwarenessHealth implements Writeable, ToXContentFragment, Iterable<ClusterAwarenessAttributesHealth> { private static final String AWARENESS_ATTRIBUTE = "awareness_attributes"; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java index a839644f74cb3..5fa897c0b1185 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlock.java @@ -33,12 +33,13 @@ package org.opensearch.cluster.block; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.EnumSet; @@ -48,8 +49,9 @@ /** * Blocks the cluster for concurrency * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlock implements Writeable, ToXContentFragment { private final int id; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java index 4673f075e8439..ea6c0c69c523d 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockException.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.block; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; @@ -49,8 +50,9 @@ /** * Internal exception on obtaining a cluster block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlockException extends OpenSearchException { private final Set<ClusterBlock> blocks; diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java index 5ec847e100c86..5d3bf94aedb19 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlockLevel.java @@ -32,13 +32,16 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; + import java.util.EnumSet; /** * What level to block the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterBlockLevel { READ, WRITE, diff --git a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java index 33d55e6e747a3..304136166d515 100644 --- a/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/opensearch/cluster/block/ClusterBlocks.java @@ -37,11 +37,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.set.Sets; -import org.opensearch.index.IndexModule; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexModule; import java.io.IOException; import java.util.Collections; @@ -59,8 +60,9 @@ /** * Represents current cluster level blocks to block dirty operations done against the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(emptySet(), Map.of()); @@ -355,8 +357,9 @@ public static Builder builder() { /** * Builder for cluster blocks. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Set<ClusterBlock> global = new HashSet<>(); diff --git a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java index 729d76c72e99e..3e9fa3e23a12f 100644 --- a/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java +++ b/server/src/main/java/org/opensearch/cluster/block/IndexCreateBlockException.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.block; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -16,8 +17,9 @@ /** * Internal exception on obtaining an index create block * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexCreateBlockException extends ClusterBlockException { public IndexCreateBlockException(Set<ClusterBlock> globalLevelBlocks) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index 9b51e56dce966..6b0f9b1b7aa2f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -40,9 +40,9 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 3900155ffc12e..0ef8262a216ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -31,26 +31,28 @@ package org.opensearch.cluster.coordination; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; /** * Publishes the cluster state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish * process should apply this state to the cluster-manager as well! - * + * <p> * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not * committed and should be rejected. Any other exception signals that something bad happened but the change is committed. - * + * <p> * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ @@ -59,8 +61,9 @@ public interface ClusterStatePublisher { /** * An acknowledgement listener. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface AckListener { /** * Should be called when the cluster coordination layer has committed the cluster state (i.e. even if this publication fails, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CompressedStreamUtils.java b/server/src/main/java/org/opensearch/cluster/coordination/CompressedStreamUtils.java index ab3cb51683c8f..dc7b203eb7c4b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CompressedStreamUtils.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CompressedStreamUtils.java @@ -12,16 +12,16 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.common.CheckedConsumer; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.Compressor; -import org.opensearch.common.compress.CompressorFactory; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.transport.BytesTransportRequest; import java.io.IOException; @@ -37,7 +37,7 @@ public final class CompressedStreamUtils { public static BytesReference createCompressedStream(Version version, CheckedConsumer<StreamOutput, IOException> outputConsumer) throws IOException { final BytesStreamOutput bStream = new BytesStreamOutput(); - try (StreamOutput stream = new OutputStreamStreamOutput(CompressorFactory.defaultCompressor().threadLocalOutputStream(bStream))) { + try (StreamOutput stream = new OutputStreamStreamOutput(CompressorRegistry.defaultCompressor().threadLocalOutputStream(bStream))) { stream.setVersion(version); outputConsumer.accept(stream); } @@ -48,7 +48,7 @@ public static BytesReference createCompressedStream(Version version, CheckedCons public static StreamInput decompressBytes(BytesTransportRequest request, NamedWriteableRegistry namedWriteableRegistry) throws IOException { - final Compressor compressor = CompressorFactory.compressor(request.bytes()); + final Compressor compressor = CompressorRegistry.compressor(request.bytes()); final StreamInput in; if (compressor != null) { in = new InputStreamStreamInput(compressor.threadLocalInputStream(request.bytes().streamInput())); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java index 520eb0e15754d..53398d6f3f98f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationMetadata.java @@ -32,11 +32,12 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -55,8 +56,9 @@ /** * Metadata for cluster coordination * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CoordinationMetadata implements Writeable, ToXContentFragment { public static final CoordinationMetadata EMPTY_METADATA = builder().build(); @@ -214,8 +216,9 @@ public String toString() { /** * Builder for coordination metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private long term = 0; private VotingConfiguration lastCommittedConfiguration = VotingConfiguration.EMPTY_CONFIG; @@ -266,8 +269,9 @@ public CoordinationMetadata build() { /** * Excluded nodes from voting config. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfigExclusion implements Writeable, ToXContentFragment { public static final String MISSING_VALUE_MARKER = "_absent_"; private final String nodeId; @@ -362,8 +366,9 @@ public String toString() { /** * A collection of persistent node ids, denoting the voting configuration for cluster state changes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VotingConfiguration implements Writeable, ToXContentFragment { public static final VotingConfiguration EMPTY_CONFIG = new VotingConfiguration(Collections.emptySet()); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 08cd7d0ab02db..987a3e3ffa7d3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -35,8 +35,11 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; import java.io.Closeable; import java.io.IOException; @@ -49,6 +52,7 @@ import java.util.Set; import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * The core class of the cluster state coordination algorithm, directly implementing the @@ -64,8 +68,8 @@ public class CoordinationState { private final ElectionStrategy electionStrategy; - // persisted state - private final PersistedState persistedState; + // persisted state registry + private final PersistedStateRegistry persistedStateRegistry; // transient state private VoteCollection joinVotes; @@ -74,12 +78,18 @@ public class CoordinationState { private long lastPublishedVersion; private VotingConfiguration lastPublishedConfiguration; private VoteCollection publishVotes; - - public CoordinationState(DiscoveryNode localNode, PersistedState persistedState, ElectionStrategy electionStrategy) { + private final boolean isRemoteStateEnabled; + + public CoordinationState( + DiscoveryNode localNode, + PersistedStateRegistry persistedStateRegistry, + ElectionStrategy electionStrategy, + Settings settings + ) { this.localNode = localNode; - // persisted state - this.persistedState = persistedState; + // persisted state registry + this.persistedStateRegistry = persistedStateRegistry; this.electionStrategy = electionStrategy; // transient state @@ -87,16 +97,19 @@ public CoordinationState(DiscoveryNode localNode, PersistedState persistedState, this.startedJoinSinceLastReboot = false; this.electionWon = false; this.lastPublishedVersion = 0L; - this.lastPublishedConfiguration = persistedState.getLastAcceptedState().getLastAcceptedConfiguration(); + this.lastPublishedConfiguration = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL) + .getLastAcceptedState() + .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); + this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); } public long getCurrentTerm() { - return persistedState.getCurrentTerm(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getCurrentTerm(); } public ClusterState getLastAcceptedState() { - return persistedState.getLastAcceptedState(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getLastAcceptedState(); } public long getLastAcceptedTerm() { @@ -186,7 +199,7 @@ public void setInitialState(ClusterState initialState) { assert initialState.getLastAcceptedConfiguration().isEmpty() == false; assert initialState.getLastCommittedConfiguration().isEmpty() == false; - persistedState.setLastAcceptedState(initialState); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setLastAcceptedState(initialState); } /** @@ -222,7 +235,7 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { logger.debug("handleStartJoin: discarding {}: {}", joinVotes, reason); } - persistedState.setCurrentTerm(startJoinRequest.getTerm()); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setCurrentTerm(startJoinRequest.getTerm()); assert getCurrentTerm() == startJoinRequest.getTerm(); lastPublishedVersion = 0; lastPublishedConfiguration = getLastAcceptedConfiguration(); @@ -436,7 +449,7 @@ public PublishResponse handlePublishRequest(PublishRequest publishRequest) { clusterState.version(), clusterState.term() ); - persistedState.setLastAcceptedState(clusterState); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setLastAcceptedState(clusterState); assert getLastAcceptedState() == clusterState; return new PublishResponse(clusterState.term(), clusterState.version()); @@ -490,6 +503,7 @@ public Optional<ApplyCommitRequest> handlePublishResponse(DiscoveryNode sourceNo publishResponse.getVersion(), publishResponse.getTerm() ); + handlePreCommit(); return Optional.of(new ApplyCommitRequest(localNode, publishResponse.getTerm(), publishResponse.getVersion())); } @@ -547,10 +561,36 @@ public void handleCommit(ApplyCommitRequest applyCommit) { applyCommit.getVersion() ); - persistedState.markLastAcceptedStateAsCommitted(); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).markLastAcceptedStateAsCommitted(); assert getLastCommittedConfiguration().equals(getLastAcceptedConfiguration()); } + /** + * This method should be called just before sending the PublishRequest to all cluster nodes. + * @param clusterState The cluster state for which pre publish activities should happen. + */ + public void handlePrePublish(ClusterState clusterState) { + // Publishing the current state to remote store before sending the cluster state to other nodes. + // This is to ensure the remote store is the single source of truth for current state. Even if the current node + // goes down after sending the cluster state to other nodes, we should be able to read the remote state and + // recover the cluster. + if (isRemoteStateEnabled) { + assert persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null : "Remote state has not been initialized"; + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).setLastAcceptedState(clusterState); + } + } + + /** + * This method should be called just before sending the ApplyCommitRequest to all cluster nodes. + */ + public void handlePreCommit() { + // Publishing the committed state to remote store before sending apply commit to other nodes. + if (isRemoteStateEnabled) { + assert persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null : "Remote state has not been initialized"; + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).markLastAcceptedStateAsCommitted(); + } + } + public void invariant() { assert getLastAcceptedTerm() <= getCurrentTerm(); assert electionWon() == isElectionQuorum(joinVotes); @@ -564,7 +604,7 @@ public void invariant() { } public void close() throws IOException { - persistedState.close(); + IOUtils.close(persistedStateRegistry); } /** @@ -598,6 +638,12 @@ public interface PersistedState extends Closeable { */ void setLastAcceptedState(ClusterState clusterState); + /** + * Returns the stats for the persistence layer for {@link CoordinationState}. + * @return PersistedStateStats + */ + PersistedStateStats getStats(); + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index de751d881bc0e..5a07f964f94a4 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -57,24 +56,27 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ListenableFuture; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse.Empty; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.DiscoveryStats; @@ -84,9 +86,9 @@ import org.opensearch.discovery.SeedHostsResolver; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool.Names; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -181,6 +183,8 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private JoinHelper.JoinAccumulator joinAccumulator; private Optional<CoordinatorPublication> currentPublication = Optional.empty(); private final NodeHealthService nodeHealthService; + private final PersistedStateRegistry persistedStateRegistry; + private final RemoteStoreNodeService remoteStoreNodeService; /** * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. @@ -201,7 +205,9 @@ public Coordinator( Random random, RerouteService rerouteService, ElectionStrategy electionStrategy, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { this.settings = settings; this.transportService = transportService; @@ -215,6 +221,7 @@ public Coordinator( allocationService, clusterManagerService, transportService, + remoteStoreNodeService, this::getCurrentTerm, this::getStateForClusterManagerService, this::handleJoinRequest, @@ -254,9 +261,10 @@ public Coordinator( this::handlePublishRequest, this::handleApplyCommit ); - this.leaderChecker = new LeaderChecker(settings, transportService, this::onLeaderFailure, nodeHealthService); + this.leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, this::onLeaderFailure, nodeHealthService); this.followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, this::onFollowerCheckRequest, this::removeNode, @@ -286,7 +294,9 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.persistedStateRegistry = persistedStateRegistry; this.localNodeCommissioned = true; + this.remoteStoreNodeService = remoteStoreNodeService; } private ClusterFormationState getClusterFormationState() { @@ -820,8 +830,7 @@ boolean publicationInProgress() { @Override protected void doStart() { synchronized (mutex) { - CoordinationState.PersistedState persistedState = persistedStateSupplier.get(); - coordinationState.set(new CoordinationState(getLocalNode(), persistedState, electionStrategy)); + coordinationState.set(new CoordinationState(getLocalNode(), persistedStateRegistry, electionStrategy, settings)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); @@ -858,7 +867,16 @@ protected void doStart() { @Override public DiscoveryStats stats() { - return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); + ClusterStateStats clusterStateStats = clusterManagerService.getClusterStateStats(); + ArrayList<PersistedStateStats> stats = new ArrayList<>(); + Stream.of(PersistedStateRegistry.PersistedStateType.values()).forEach(stateType -> { + if (persistedStateRegistry.getPersistedState(stateType) != null + && persistedStateRegistry.getPersistedState(stateType).getStats() != null) { + stats.add(persistedStateRegistry.getPersistedState(stateType).getStats()); + } + }); + clusterStateStats.setPersistenceStats(stats); + return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats(), clusterStateStats); } @Override @@ -1308,6 +1326,7 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) leaderChecker.setCurrentNodes(publishNodes); followersChecker.setCurrentNodes(publishNodes); lagDetector.setTrackedNodes(publishNodes); + coordinationState.get().handlePrePublish(clusterState); publication.start(followersChecker.getFaultyNodes()); } } catch (Exception e) { @@ -1320,20 +1339,24 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) // deserialized from the resulting JSON private boolean assertPreviousStateConsistency(ClusterChangedEvent event) { assert event.previousState() == coordinationState.get().getLastAcceptedState() - || XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.toString(XContentType.JSON, event.previousState()), false) + || XContentHelper.convertToMap( + JsonXContent.jsonXContent, + Strings.toString(MediaTypeRegistry.JSON, event.previousState()), + false + ) .equals( XContentHelper.convertToMap( JsonXContent.jsonXContent, Strings.toString( - XContentType.JSON, + MediaTypeRegistry.JSON, clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState()) ), false ) - ) : Strings.toString(XContentType.JSON, event.previousState()) + ) : Strings.toString(MediaTypeRegistry.JSON, event.previousState()) + " vs " + Strings.toString( - XContentType.JSON, + MediaTypeRegistry.JSON, clusterStateWithNoClusterManagerBlock(coordinationState.get().getLastAcceptedState()) ); return true; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java index fa7927001effc..828db5864d28b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java @@ -36,12 +36,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.lease.Releasable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index 08008152cfcd6..70bb0515bb022 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -38,12 +38,14 @@ import org.opensearch.cluster.coordination.Coordinator.Mode; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool.Names; @@ -55,7 +57,6 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportRequestOptions.Type; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -98,7 +99,9 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the follower is considered to have failed. @@ -112,7 +115,7 @@ public class FollowersChecker { private final Settings settings; private final TimeValue followerCheckInterval; - private final TimeValue followerCheckTimeout; + private TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer<DiscoveryNode, String> onNodeFailure; private final Consumer<FollowerCheckRequest> handleRequestAndUpdateState; @@ -127,6 +130,7 @@ public class FollowersChecker { public FollowersChecker( Settings settings, + ClusterSettings clusterSettings, TransportService transportService, Consumer<FollowerCheckRequest> handleRequestAndUpdateState, BiConsumer<DiscoveryNode, String> onNodeFailure, @@ -141,7 +145,7 @@ public FollowersChecker( followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); - + clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_TIMEOUT_SETTING, this::setFollowerCheckTimeout); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler( FOLLOWER_CHECK_ACTION_NAME, @@ -159,6 +163,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { + this.followerCheckTimeout = followerCheckTimeout; + } + /** * Update the set of known nodes, starting to check any new ones and stopping checking any previously-known-but-now-unknown ones. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index 67ef82ee7b2e9..b77ede5471534 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -65,6 +65,11 @@ public void setLastAcceptedState(ClusterState clusterState) { this.acceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public long getCurrentTerm() { return currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 42f09f95a7f56..9bf6bac07da53 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -50,15 +49,19 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Priority; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.BytesTransportRequest; @@ -67,8 +70,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -135,6 +136,7 @@ public class JoinHelper { AllocationService allocationService, ClusterManagerService clusterManagerService, TransportService transportService, + RemoteStoreNodeService remoteStoreNodeService, LongSupplier currentTermSupplier, Supplier<ClusterState> currentStateSupplier, BiConsumer<JoinRequest, JoinCallback> joinHandler, @@ -152,7 +154,13 @@ public class JoinHelper { this.nodeCommissioned = nodeCommissioned; this.namedWriteableRegistry = namedWriteableRegistry; - this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService) { + this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor( + settings, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ) { private final long term = currentTermSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 626e47108cc63..bc365b9872037 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; @@ -41,12 +40,16 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; @@ -55,12 +58,16 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiConsumer; import java.util.stream.Collectors; import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; /** * Main executor for Nodes joining the OpenSearch cluster @@ -74,6 +81,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor<JoinTaskExecut private final Logger logger; private final RerouteService rerouteService; + private final RemoteStoreNodeService remoteStoreNodeService; + /** * Task for the join task executor. * @@ -125,10 +134,17 @@ public boolean isFinishElectionTask() { private static final String FINISH_ELECTION_TASK_REASON = "_FINISH_ELECTION_"; } - public JoinTaskExecutor(Settings settings, AllocationService allocationService, Logger logger, RerouteService rerouteService) { + public JoinTaskExecutor( + Settings settings, + AllocationService allocationService, + Logger logger, + RerouteService rerouteService, + RemoteStoreNodeService remoteStoreNodeService + ) { this.allocationService = allocationService; this.logger = logger; this.rerouteService = rerouteService; + this.remoteStoreNodeService = remoteStoreNodeService; } @Override @@ -161,6 +177,16 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + // An optimization can be done as this will get invoked + // for every set of node join task which we can optimize to not compute if cluster state already has + // repository information. + Optional<DiscoveryNode> remoteDN = currentNodes.getNodes().values().stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + DiscoveryNode dn = remoteDN.orElseGet(() -> (currentNodes.getNodes().values()).stream().findFirst().get()); + RepositoriesMetadata repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + dn, + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + assert nodesBuilder.isLocalNodeElectedClusterManager(); Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); @@ -170,17 +196,17 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo // processing any joins Map<String, String> joiniedNodeNameIds = new HashMap<>(); for (final Task joinTask : joiningNodes) { + final DiscoveryNode node = joinTask.node(); if (joinTask.isBecomeClusterManagerTask() || joinTask.isFinishElectionTask()) { // noop - } else if (currentNodes.nodeExistsWithSameRoles(joinTask.node())) { - logger.debug("received a join request for an existing node [{}]", joinTask.node()); + } else if (currentNodes.nodeExistsWithSameRoles(node)) { + logger.debug("received a join request for an existing node [{}]", node); } else { - final DiscoveryNode node = joinTask.node(); try { if (enforceMajorVersion) { ensureMajorVersionBarrier(node.getVersion(), minClusterNodeVersion); } - ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + ensureNodesCompatibility(node, currentNodes, currentState.metadata(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); @@ -188,6 +214,16 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); + + if (remoteDN.isEmpty()) { + // This is hit only on cases where we encounter first remote node + logger.info("Updating system repository now for remote store"); + repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + node, + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + } + nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -232,16 +268,36 @@ public ClusterTasksResult<Task> execute(ClusterState currentState, List<Task> jo .coordinationMetadata(coordMetadataBuilder.build()) .build(); return results.build( - allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).metadata(newMetadata).build()) + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(newMetadata, repositoriesMetadata)) + .build() + ) ); } } - return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); + return results.build( + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)) + .build() + ) + ); } else { // we must return a new cluster state instance to force publishing. This is important // for the joining node to finalize its join and set us as a cluster-manager - return results.build(newState.build()); + return results.build( + newState.metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)).build() + ); + } + } + + private Metadata updateMetadataWithRepositoriesMetadata(Metadata currentMetadata, RepositoriesMetadata repositoriesMetadata) { + if (repositoriesMetadata == null || repositoriesMetadata.repositories() == null || repositoriesMetadata.repositories().isEmpty()) { + return currentMetadata; + } else { + return Metadata.builder(currentMetadata).putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata.get()).build(); } } @@ -359,16 +415,24 @@ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata /** * ensures that the joining node has a version that's compatible with all current nodes */ - public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { + public static void ensureNodesCompatibility(final DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { final Version minNodeVersion = currentNodes.getMinNodeVersion(); final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); - ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); + ensureNodesCompatibility(joiningNode, currentNodes, metadata, minNodeVersion, maxNodeVersion); } /** - * ensures that the joining node has a version that's compatible with a given version range + * ensures that the joining node has a version that's compatible with a given version range and ensures that the + * joining node has required attributes to join a remotestore cluster. */ - public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { + public static void ensureNodesCompatibility( + DiscoveryNode joiningNode, + DiscoveryNodes currentNodes, + Metadata metadata, + Version minClusterNodeVersion, + Version maxClusterNodeVersion + ) { + Version joiningNodeVersion = joiningNode.getVersion(); assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { throw new IllegalStateException( @@ -390,6 +454,8 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version + "], which is incompatible." ); } + + ensureRemoteStoreNodesCompatibility(joiningNode, currentNodes, metadata); } /** @@ -422,12 +488,75 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) } } + /** + * The method ensures homogeneity - + * 1. The joining node has to be a remote store backed if it's joining a remote store backed cluster. Validates + * remote store attributes of joining node against the existing nodes of cluster. + * 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster. + * Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have + * remote store attributes. + * <p> + * A remote store backed node is the one which holds all the remote store attributes and a remote store backed + * cluster is the one which has only homogeneous remote store backed nodes with same node attributes + * <p> + * TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will + * needs to be modified. + */ + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { + List<DiscoveryNode> existingNodes = new ArrayList<>(currentNodes.getNodes().values()); + + assert existingNodes.isEmpty() == false; + + CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); + if (STRICT.equals(remoteStoreCompatibilityMode)) { + + DiscoveryNode existingNode = existingNodes.get(0); + if (joiningNode.isRemoteStoreNode()) { + ensureRemoteStoreNodesCompatibility(joiningNode, existingNode); + } else { + if (existingNode.isRemoteStoreNode()) { + throw new IllegalStateException( + "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" + ); + } + } + } else { + if (remoteStoreCompatibilityMode == CompatibilityMode.MIXED) { + if (joiningNode.isRemoteStoreNode()) { + Optional<DiscoveryNode> remoteDN = existingNodes.stream().filter(DiscoveryNode::isRemoteStoreNode).findFirst(); + remoteDN.ifPresent(discoveryNode -> ensureRemoteStoreNodesCompatibility(joiningNode, discoveryNode)); + } + } + } + } + + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNode existingNode) { + if (joiningNode.isRemoteStoreNode()) { + if (existingNode.isRemoteStoreNode()) { + RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); + RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); + if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { + throw new IllegalStateException( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ); + } + } else { + throw new IllegalStateException("a remote store node [" + joiningNode + "] is trying to join a non remote store cluster"); + } + } + } + public static Collection<BiConsumer<DiscoveryNode, ClusterState>> addBuiltInJoinValidators( Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators ) { final Collection<BiConsumer<DiscoveryNode, ClusterState>> validators = new ArrayList<>(); validators.add((node, state) -> { - ensureNodesCompatibility(node.getVersion(), state.getNodes()); + ensureNodesCompatibility(node, state.getNodes(), state.metadata()); ensureIndexCompatibility(node.getVersion(), state.getMetadata()); ensureNodeCommissioned(node, state.getMetadata()); }); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index f43abf0080575..8d4373b865f62 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -39,12 +39,15 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool.Names; @@ -56,8 +59,6 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportRequestOptions.Type; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -97,7 +98,9 @@ public class LeaderChecker { "cluster.fault_detection.leader_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + TimeValue.timeValueMillis(60000), + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the number of failed checks that must happen before the leader is considered to have failed. @@ -111,7 +114,7 @@ public class LeaderChecker { private final Settings settings; private final TimeValue leaderCheckInterval; - private final TimeValue leaderCheckTimeout; + private TimeValue leaderCheckTimeout; private final int leaderCheckRetryCount; private final TransportService transportService; private final Consumer<Exception> onLeaderFailure; @@ -123,6 +126,7 @@ public class LeaderChecker { LeaderChecker( final Settings settings, + final ClusterSettings clusterSettings, final TransportService transportService, final Consumer<Exception> onLeaderFailure, NodeHealthService nodeHealthService @@ -134,6 +138,7 @@ public class LeaderChecker { this.transportService = transportService; this.onLeaderFailure = onLeaderFailure; this.nodeHealthService = nodeHealthService; + clusterSettings.addSettingsUpdateConsumer(LEADER_CHECK_TIMEOUT_SETTING, this::setLeaderCheckTimeout); transportService.registerRequestHandler( LEADER_CHECK_ACTION_NAME, @@ -155,6 +160,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + private void setLeaderCheckTimeout(TimeValue leaderCheckTimeout) { + this.leaderCheckTimeout = leaderCheckTimeout; + } + public DiscoveryNode leader() { CheckScheduler checkScheduler = currentChecker.get(); return checkScheduler == null ? null : checkScheduler.leader; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java index 3f4c4ee6f180a..259d8961a3e78 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java @@ -50,10 +50,10 @@ import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index b68f689ef63fd..8a70c71d53fdd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -35,7 +35,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java new file mode 100644 index 0000000000000..470ab02a682a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * A class which encapsulates the PersistedStates + * + * @opensearch.internal + */ +public class PersistedStateRegistry implements Closeable { + + public PersistedStateRegistry() {} + + /** + * Distinct Types PersistedState which can be present on a node + */ + public enum PersistedStateType { + LOCAL, + REMOTE; + } + + private final Map<PersistedStateType, PersistedState> persistedStates = new ConcurrentHashMap<>(); + + public void addPersistedState(PersistedStateType persistedStateType, PersistedState persistedState) { + PersistedState existingState = this.persistedStates.putIfAbsent(persistedStateType, persistedState); + assert existingState == null : "should only be set once, but already have " + existingState; + } + + public PersistedState getPersistedState(PersistedStateType persistedStateType) { + return this.persistedStates.get(persistedStateType); + } + + @Override + public void close() throws IOException { + IOUtils.close(persistedStates.values()); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java new file mode 100644 index 0000000000000..0b7ed4fee5775 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Persisted cluster state related stats. + * + * @opensearch.api + */ +@PublicApi(since = "2.12.0") +public class PersistedStateStats implements Writeable, ToXContentObject { + private final String statsName; + private AtomicLong totalTimeInMillis = new AtomicLong(0); + private AtomicLong failedCount = new AtomicLong(0); + private AtomicLong successCount = new AtomicLong(0); + private Map<String, AtomicLong> extendedFields = new HashMap<>(); // keeping minimal extensibility + + public PersistedStateStats(String statsName) { + this.statsName = statsName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(statsName); + out.writeVLong(successCount.get()); + out.writeVLong(failedCount.get()); + out.writeVLong(totalTimeInMillis.get()); + if (extendedFields.size() > 0) { + out.writeBoolean(true); + out.writeVInt(extendedFields.size()); + for (Map.Entry<String, AtomicLong> extendedField : extendedFields.entrySet()) { + out.writeString(extendedField.getKey()); + out.writeVLong(extendedField.getValue().get()); + } + } else { + out.writeBoolean(false); + } + } + + public PersistedStateStats(StreamInput in) throws IOException { + this.statsName = in.readString(); + this.successCount = new AtomicLong(in.readVLong()); + this.failedCount = new AtomicLong(in.readVLong()); + this.totalTimeInMillis = new AtomicLong(in.readVLong()); + if (in.readBoolean()) { + int extendedFieldsSize = in.readVInt(); + this.extendedFields = new HashMap<>(); + for (int fieldNumber = 0; fieldNumber < extendedFieldsSize; fieldNumber++) { + extendedFields.put(in.readString(), new AtomicLong(in.readVLong())); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsName); + builder.field(Fields.SUCCESS_COUNT, getSuccessCount()); + builder.field(Fields.FAILED_COUNT, getFailedCount()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getTotalTimeInMillis()); + if (extendedFields.size() > 0) { + for (Map.Entry<String, AtomicLong> extendedField : extendedFields.entrySet()) { + builder.field(extendedField.getKey(), extendedField.getValue().get()); + } + } + builder.endObject(); + return builder; + } + + public void stateFailed() { + failedCount.incrementAndGet(); + } + + public void stateSucceeded() { + successCount.incrementAndGet(); + } + + /** + * Expects user to send time taken in milliseconds. + * + * @param timeTakenInUpload time taken in uploading the cluster state to remote + */ + public void stateTook(long timeTakenInUpload) { + totalTimeInMillis.addAndGet(timeTakenInUpload); + } + + public long getTotalTimeInMillis() { + return totalTimeInMillis.get(); + } + + public long getFailedCount() { + return failedCount.get(); + } + + public long getSuccessCount() { + return successCount.get(); + } + + protected void addToExtendedFields(String extendedField, AtomicLong extendedFieldValue) { + this.extendedFields.put(extendedField, extendedFieldValue); + } + + public String getStatsName() { + return statsName; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String SUCCESS_COUNT = "success_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java index f8e568d3f5a0a..cc4d1ac156c53 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteCollector.java @@ -40,8 +40,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool.Names; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java index c8186441db449..9c683f7de0878 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java index 429890e7420de..43801a05dbc24 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java @@ -37,13 +37,13 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ClusterStatePublisher.AckListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 60c931a601561..1fdaeead0d28d 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -36,22 +36,22 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; import org.opensearch.cluster.IncompatibleClusterStateVersionException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BytesTransportRequest; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java index f99ba82be5514..f6350c5558a82 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java @@ -33,7 +33,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Optional; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 1570a84ab871f..128bd42fd7947 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -63,7 +63,7 @@ public class Reconfigurator { * and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a * five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the * loss of a further node before failing. - * + * <p> * We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we * require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java index 188ea1325e806..168ae5212888f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java @@ -53,6 +53,8 @@ import java.util.Locale; import java.util.Objects; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + /** * Tool to run an unsafe bootstrap * @@ -81,7 +83,11 @@ public class UnsafeBootstrapClusterManagerCommand extends OpenSearchNodeCommand static final Setting<String> UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_METADATA.getConcreteSetting( "cluster.metadata.unsafe-bootstrap" ); - + static final String REMOTE_CLUSTER_STATE_ENABLED_NODE = + "Unsafe bootstrap cannot be performed when remote cluster state is enabled. The cluster state in the remote store is considered the source of truth. " + + "In case, you still wish to do best effort recovery with unsafe-bootstrap, then please disable the " + + REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey() + + ". For more details, please check the OpenSearch documentation."; private OptionSpec<Boolean> applyClusterReadOnlyBlockOption; UnsafeBootstrapClusterManagerCommand() { @@ -101,6 +107,13 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { if (clusterManager == false) { throw new OpenSearchException(NOT_CLUSTER_MANAGER_NODE_MSG); } + // During unsafe bootstrap, node will form a cluster with a new cluster UUID but with the existing metadata. + // This new state will not know about the previous cluster UUIDs and so we will not able to construct + // the cluster UUID chain to get the last known cluster UUID to restore from. + // Blocking unsafe-bootstrap below for this reason. + if (REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { + throw new OpenSearchException(REMOTE_CLUSTER_STATE_ENABLED_NODE); + } return true; } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java index 3f67870781580..914743299b023 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttribute.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * {@link DecommissionAttribute} encapsulates information about decommissioned node attribute like attribute name, attribute value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public final class DecommissionAttribute implements Writeable { private final String attributeName; private final String attributeValue; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java index 8af783bbdc52e..254d70b0422d7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionAttributeMetadata.java @@ -14,10 +14,11 @@ import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Custom; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -30,8 +31,9 @@ /** * Contains metadata about decommission attribute * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class DecommissionAttributeMetadata extends AbstractNamedDiffable<Custom> implements Custom { public static final String TYPE = "decommissionedAttribute"; @@ -282,6 +284,6 @@ public static void toXContent( @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java index 7a21dae35af94..fec313b4b0b73 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; @@ -27,8 +26,9 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.http.HttpStats; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java index 6c76fd923aad0..4cd65e4f32453 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java @@ -20,8 +20,8 @@ import java.util.Iterator; import java.util.Set; -import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.addExclusionAndGetState; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; /** * Static helper utilities to execute decommission diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 2e27898dd413c..7ff894afa21a7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -12,10 +12,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; -import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateObserver.Listener; @@ -33,6 +32,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java index 4ca8c3cc4286e..f4f6cbf632ae2 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionStatus.java @@ -8,9 +8,14 @@ package org.opensearch.cluster.decommission; +import org.opensearch.common.annotation.PublicApi; + /** * An enumeration of the states during decommissioning + * + * @opensearch.api */ +@PublicApi(since = "2.4.0") public enum DecommissionStatus { /** * Decommission process is initiated, and to-be-decommissioned leader is excluded from voting config diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java index 5ea482b8b8ffa..06f6b3d57385d 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterHealthStatus.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.health; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -41,8 +42,9 @@ /** * Cluster health status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java index 0bb762e3ff744..19c64965e6941 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterIndexHealth.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -61,8 +62,9 @@ /** * Cluster Index Health Information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String NUMBER_OF_SHARDS = "number_of_shards"; diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index 00a83c85c17be..1fe88f65248c2 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -37,13 +37,14 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -52,14 +53,15 @@ import java.util.Locale; import java.util.Objects; -import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** * Cluster shard health information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterShardHealth implements Writeable, ToXContentFragment { private static final String STATUS = "status"; private static final String ACTIVE_SHARDS = "active_shards"; @@ -219,13 +221,13 @@ public void writeTo(final StreamOutput out) throws IOException { /** * Checks if an inactive primary shard should cause the cluster health to go RED. - * + * <p> * An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is * unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle, * cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead. * However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at * some point, cluster health should still turn RED. - * + * <p> * NB: this method should *not* be called on active shards nor on non-primary shards. */ public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) { @@ -275,7 +277,7 @@ public static ClusterShardHealth fromXContent(XContentParser parser) throws IOEx @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java index 47c6cad04343e..dac6ecaa13781 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; +import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; /** * Individual operation to perform on the cluster state as part of an {@link IndicesAliasesRequest}. @@ -225,8 +226,8 @@ boolean removeIndex() { @Override boolean apply(NewAliasValidator aliasValidator, Metadata.Builder metadata, IndexMetadata index) { if (false == index.getAliases().containsKey(alias)) { - if (mustExist != null && mustExist.booleanValue()) { - throw new IllegalArgumentException("required alias [" + alias + "] does not exist"); + if (mustExist != null && mustExist) { + throw new AliasesNotFoundException(alias); } return false; } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java index 44f6f2d7313a3..8b3cc3c3cc2cc 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasMetadata.java @@ -36,15 +36,16 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -61,8 +62,9 @@ /** * Metadata for index aliases * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AliasMetadata extends AbstractDiffable<AliasMetadata> implements ToXContentFragment { private final String alias; @@ -259,7 +261,7 @@ public static Diff<AliasMetadata> readDiffFrom(StreamInput in) throws IOExceptio @Override public String toString() { - return org.opensearch.common.Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } @Override @@ -271,8 +273,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Builder of alias metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final String alias; @@ -307,7 +310,7 @@ public Builder filter(String filter) { this.filter = null; return this; } - return filter(XContentHelper.convertToMap(XContentFactory.xContent(filter), filter, true)); + return filter(XContentHelper.convertToMap(MediaTypeRegistry.xContent(filter).xContent(), filter, true)); } public Builder filter(Map<String, Object> filter) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java index 5a019804f5eac..db7f38518b80d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasValidator.java @@ -34,12 +34,12 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -88,7 +88,7 @@ public void validateAliasStandalone(Alias alias) { validateAliasStandalone(alias.name(), alias.indexRouting()); if (Strings.hasLength(alias.filter())) { try { - XContentHelper.convertToMap(XContentFactory.xContent(alias.filter()), alias.filter(), false); + XContentHelper.convertToMap(MediaTypeRegistry.xContent(alias.filter()).xContent(), alias.filter(), false); } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); } @@ -134,7 +134,8 @@ public void validateAliasFilter( ) { assert queryShardContext != null; try ( - XContentParser parser = XContentFactory.xContent(filter) + XContentParser parser = MediaTypeRegistry.xContent(filter) + .xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, filter) ) { validateAliasFilter(parser, queryShardContext); @@ -158,7 +159,7 @@ public void validateAliasFilter( try ( InputStream inputStream = filter.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream) + XContentParser parser = MediaTypeRegistry.xContentType(inputStream) .xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, filter.streamInput()) ) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java index 52096422248a5..abc3712ee07e3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplate.java @@ -35,12 +35,13 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -55,8 +56,9 @@ * contains a field "foo", it's expected to contain all the necessary settings/mappings/etc for the * "foo" field. These component templates make up the individual pieces composing an index template. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComponentTemplate extends AbstractDiffable<ComponentTemplate> implements ToXContentObject { private static final ParseField TEMPLATE = new ParseField("template"); private static final ParseField VERSION = new ParseField("version"); @@ -152,7 +154,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java index ce806b2aa1f12..d19743e643d12 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComponentTemplateMetadata.java @@ -36,12 +36,12 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiff; -import org.opensearch.common.Strings; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -155,7 +155,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java index 15e5cb5873719..e7f1b97f28842 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplate.java @@ -36,13 +36,14 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.metadata.DataStream.TimestampField; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -63,8 +64,9 @@ * ids corresponding to component templates that should be composed in order when creating a new * index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ComposableIndexTemplate extends AbstractDiffable<ComposableIndexTemplate> implements ToXContentObject { private static final ParseField INDEX_PATTERNS = new ParseField("index_patterns"); private static final ParseField TEMPLATE = new ParseField("template"); @@ -278,14 +280,15 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** * Template for data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DataStreamTemplate implements Writeable, ToXContentObject { private static final ParseField TIMESTAMP_FIELD_FIELD = new ParseField("timestamp_field"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java index d34416c70dc16..b2e0b2ebdc123 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -36,12 +36,12 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiff; -import org.opensearch.common.Strings; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.ParseField; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -156,7 +156,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java new file mode 100644 index 0000000000000..8bb25ba8e8472 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java @@ -0,0 +1,169 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Metadata about encryption and decryption + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CryptoMetadata implements Writeable { + static final public String CRYPTO_METADATA_KEY = "crypto_metadata"; + static final public String KEY_PROVIDER_NAME_KEY = "key_provider_name"; + static final public String KEY_PROVIDER_TYPE_KEY = "key_provider_type"; + static final public String SETTINGS_KEY = "settings"; + private final String keyProviderName; + private final String keyProviderType; + private final Settings settings; + + /** + * Constructs new crypto metadata + * + * @param keyProviderName key provider name + * @param keyProviderType key provider type + * @param settings crypto settings + */ + public CryptoMetadata(String keyProviderName, String keyProviderType, Settings settings) { + this.keyProviderName = keyProviderName; + this.keyProviderType = keyProviderType; + this.settings = settings; + } + + /** + * Returns key provider name + * + * @return Key provider name + */ + public String keyProviderName() { + return this.keyProviderName; + } + + /** + * Returns key provider type + * + * @return key provider type + */ + public String keyProviderType() { + return this.keyProviderType; + } + + /** + * Returns crypto settings + * + * @return crypto settings + */ + public Settings settings() { + return this.settings; + } + + public CryptoMetadata(StreamInput in) throws IOException { + keyProviderName = in.readString(); + keyProviderType = in.readString(); + settings = Settings.readSettingsFromStream(in); + } + + public static CryptoMetadata fromRequest(CryptoSettings cryptoSettings) { + if (cryptoSettings == null) { + return null; + } + return new CryptoMetadata(cryptoSettings.getKeyProviderName(), cryptoSettings.getKeyProviderType(), cryptoSettings.getSettings()); + } + + /** + * Writes crypto metadata to stream output + * + * @param out stream output + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(keyProviderName); + out.writeString(keyProviderType); + Settings.writeSettingsToStream(settings, out); + } + + public static CryptoMetadata fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String keyProviderType = null; + Settings settings = null; + String keyProviderName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (KEY_PROVIDER_NAME_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + keyProviderName = parser.text(); + } else if (KEY_PROVIDER_TYPE_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + keyProviderType = parser.text(); + } else if (SETTINGS_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + settings = Settings.fromXContent(parser); + } else { + throw new OpenSearchParseException("failed to parse crypto metadata, unknown field [{}]", currentFieldName); + } + } else { + throw new OpenSearchParseException("failed to parse repositories"); + } + } + return new CryptoMetadata(keyProviderName, keyProviderType, settings); + } + + public void toXContent(CryptoMetadata cryptoMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(CRYPTO_METADATA_KEY); + builder.field(KEY_PROVIDER_NAME_KEY, cryptoMetadata.keyProviderName()); + builder.field(KEY_PROVIDER_TYPE_KEY, cryptoMetadata.keyProviderType()); + builder.startObject(SETTINGS_KEY); + cryptoMetadata.settings().toXContent(builder, params); + builder.endObject(); + builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CryptoMetadata that = (CryptoMetadata) o; + + if (!keyProviderName.equals(that.keyProviderName)) return false; + if (!keyProviderType.equals(that.keyProviderType)) return false; + return settings.equals(that.settings); + } + + @Override + public int hashCode() { + return Objects.hash(keyProviderName, keyProviderType, settings); + } + + @Override + public String toString() { + return "CryptoMetadata{" + keyProviderName + "}{" + keyProviderType + "}{" + settings + "}"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java index 76e1a0197bc1c..54df245b1b835 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStream.java @@ -37,15 +37,16 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.ArrayList; @@ -59,8 +60,9 @@ /** * Primary DataStream class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class DataStream extends AbstractDiffable<DataStream> implements ToXContentObject { public static final String BACKING_INDEX_PREFIX = ".ds-"; @@ -258,8 +260,9 @@ public int hashCode() { /** * A timestamp field. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class TimestampField implements Writeable, ToXContentObject { static ParseField NAME_FIELD = new ParseField("name"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java index 89fe6e9be2320..dbda17e5b6165 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DataStreamMetadata.java @@ -36,12 +36,12 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiff; -import org.opensearch.common.Strings; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -160,7 +160,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java index 8209c7bb56ad8..a8102182576ff 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/DiffableStringMap.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.Diff; import org.opensearch.cluster.Diffable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * This is a {@code Map<String, String>} that implements AbstractDiffable so it * can be used for cluster state purposes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiffableStringMap extends AbstractMap<String, String> implements Diffable<DiffableStringMap> { public static final DiffableStringMap EMPTY = new DiffableStringMap(Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java index 0c316373e484f..0f3ee894a7f63 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexAbstraction.java @@ -33,6 +33,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.Strings; @@ -52,8 +53,9 @@ * An index abstraction has a unique name and encapsulates all the {@link IndexMetadata} instances it is pointing to. * Also depending on type it may refer to a single or many concrete indices and may or may not have a write index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexAbstraction { /** @@ -102,7 +104,10 @@ public interface IndexAbstraction { /** * An index abstraction type. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { /** @@ -335,8 +340,9 @@ private boolean isNonEmpty(List<IndexMetadata> idxMetas) { /** * A data stream. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class DataStream implements IndexAbstraction { private final org.opensearch.cluster.metadata.DataStream dataStream; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 5bf4827cbbe46..b4f8b6b188531 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -35,19 +35,20 @@ import org.opensearch.Version; import org.opensearch.cluster.Diff; import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.time.DateFormatter; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.time.DateFormatter; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import java.io.IOException; import java.time.Instant; @@ -61,15 +62,16 @@ /** * A collection of tombstones for explicitly marking indices as deleted in the cluster state. - * + * <p> * The cluster state contains a list of index tombstones for indices that have been * deleted in the cluster. Because cluster states are processed asynchronously by * nodes and a node could be removed from the cluster for a period of time, the * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexGraveyard implements Metadata.Custom { /** @@ -191,8 +193,9 @@ public static IndexGraveyard.Builder builder(final IndexGraveyard graveyard) { /** * A class to build an IndexGraveyard. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Builder { private List<Tombstone> tombstones; private int numPurged = -1; @@ -250,7 +253,7 @@ public int getNumPurged() { /** * Purge tombstone entries. Returns the number of entries that were purged. - * + * <p> * Tombstones are purged if the number of tombstones in the list * is greater than the input parameter of maximum allowed tombstones. * Tombstones are purged until the list is equal to the maximum allowed. @@ -367,8 +370,9 @@ public String getWriteableName() { /** * An individual tombstone entry for representing a deleted index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Tombstone implements ToXContentObject, Writeable { private static final String INDEX_KEY = "index"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 1ba38daa40566..03784df509ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.core.Assertions; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.RolloverInfo; @@ -45,28 +44,30 @@ import org.opensearch.cluster.node.DiscoveryNodeFilters; import org.opensearch.cluster.routing.allocation.IndexMetadataUpdater; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.Assertions; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; -import org.opensearch.core.index.Index; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.time.Instant; @@ -82,6 +83,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Function; @@ -95,8 +97,9 @@ /** * Index metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragment { public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( @@ -158,8 +161,9 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen /** * The state of the index. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { OPEN((byte) 0), CLOSE((byte) 1); @@ -285,6 +289,32 @@ public Iterator<Setting<?>> settings() { SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT.toString(), ReplicationType::parseString, + new Setting.Validator<>() { + + @Override + public void validate(final ReplicationType value) {} + + @Override + public void validate(final ReplicationType value, final Map<Setting<?>, Object> settings) { + final Object remoteStoreEnabled = settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (ReplicationType.SEGMENT.equals(value) == false && Objects.equals(remoteStoreEnabled, true)) { + throw new IllegalArgumentException( + "To enable " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + ", " + + INDEX_REPLICATION_TYPE_SETTING.getKey() + + " should be set to " + + ReplicationType.SEGMENT + ); + } + } + + @Override + public Iterator<Setting<?>> settings() { + final List<Setting<?>> settings = List.of(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, Property.IndexScope, Property.Final ); @@ -328,13 +358,14 @@ public Iterator<Setting<?>> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); /** * Used to specify remote store repository to use for this index. */ - public static final Setting<String> INDEX_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + public static final Setting<String> INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING = Setting.simpleString( SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, new Setting.Validator<>() { @@ -345,10 +376,12 @@ public void validate(final String value) {} public void validate(final String value, final Map<Setting<?>, Object> settings) { if (value == null || value.isEmpty()) { throw new IllegalArgumentException( - "Setting " + INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + "Setting " + + INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey() + + " should be provided with non-empty repository ID" ); } else { - validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_STORE_REPOSITORY_SETTING); + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING); } } @@ -359,7 +392,8 @@ public Iterator<Setting<?>> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); private static void validateRemoteStoreSettingEnabled(final Map<Setting<?>, Object> settings, Setting<?> setting) { @@ -409,7 +443,8 @@ public Iterator<Setting<?>> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; @@ -418,8 +453,9 @@ public Iterator<Setting<?>> settings() { /** * Blocks the API. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum APIBlock implements Writeable { READ_ONLY("read_only", INDEX_READ_ONLY_BLOCK), READ("read", INDEX_READ_BLOCK), @@ -747,7 +783,7 @@ public long getAliasesVersion() { /** * The term of the current selected primary. This is a non-negative number incremented when * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary. - * + * <p> * Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard * that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}. **/ @@ -1177,8 +1213,9 @@ public static Builder builder(IndexMetadata indexMetadata) { /** * Builder of index metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String index; @@ -1296,7 +1333,7 @@ public Builder putMapping(String source) throws IOException { putMapping( new MappingMetadata( MapperService.SINGLE_MAPPING_NAME, - XContentHelper.convertToMap(XContentFactory.xContent(source), source, true) + XContentHelper.convertToMap(MediaTypeRegistry.xContent(source).xContent(), source, true) ) ); return this; @@ -1852,7 +1889,7 @@ public static Settings addHumanReadableSettings(Settings settings) { /** * Return the version the index was created from the provided index settings - * + * <p> * This looks for the presence of the {@link Version} object with key {@link IndexMetadata#SETTING_VERSION_CREATED} */ public static Version indexCreated(final Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index 52a76e9dd90f1..9a3b569a7ac3d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -38,16 +38,17 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.time.DateUtils; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexClosedException; @@ -76,8 +77,9 @@ /** * Resolves index name from an expression * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexNameExpressionResolver { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexNameExpressionResolver.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index e430ba5465499..3d532208bcfe2 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -35,20 +35,20 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -64,8 +64,9 @@ /** * Metadata for Index Templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexTemplateMetadata extends AbstractDiffable<IndexTemplateMetadata> { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexTemplateMetadata.class); @@ -263,7 +264,7 @@ public String toString() { builder.startObject(); Builder.toXContentWithTypes(this, builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -272,8 +273,9 @@ public String toString() { /** * Builder of index template metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private static final Set<String> VALID_FIELDS = Sets.newHashSet( @@ -368,7 +370,7 @@ public IndexTemplateMetadata build() { /** * Serializes the template to xContent, using the legacy format where the mappings are * nested under the type name. - * + * <p> * This method is used for serializing templates before storing them in the cluster metadata, * and also in the REST layer when returning a deprecated typed response. */ @@ -385,7 +387,7 @@ public static void toXContentWithTypes( /** * Serializes the template to xContent, making sure not to nest mappings under the * type name. - * + * <p> * Note that this method should currently only be used for creating REST responses, * and not when directly updating stored templates. Index templates are still stored * in the old, typed format, and have yet to be migrated to be typeless. @@ -480,7 +482,7 @@ public static IndexTemplateMetadata fromXContent(XContentParser parser, String t Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder() .put(mappingType, parser.mapOrdered()) .map(); - builder.putMapping(mappingType, Strings.toString(XContentFactory.jsonBuilder().map(mappingSource))); + builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).toString()); } } } else if ("aliases".equals(currentFieldName)) { @@ -496,7 +498,7 @@ public static IndexTemplateMetadata fromXContent(XContentParser parser, String t Map<String, Object> mapping = parser.mapOrdered(); if (mapping.size() == 1) { String mappingType = mapping.keySet().iterator().next(); - String mappingSource = Strings.toString(XContentFactory.jsonBuilder().map(mapping)); + String mappingSource = XContentFactory.jsonBuilder().map(mapping).toString(); if (mappingSource == null) { // crap, no mapping source, warn? diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java b/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java index 695b8756b543c..79a8f4fb95f51 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Manifest.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.metadata; import org.opensearch.core.ParseField; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContent; @@ -40,7 +41,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.Collections; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index c77d2d6b8b2b3..e8180613c0fa3 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -35,13 +35,14 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; @@ -56,8 +57,9 @@ /** * Mapping configuration for a type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MappingMetadata extends AbstractDiffable<MappingMetadata> { public static final MappingMetadata EMPTY_MAPPINGS = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 3e36a5e65bf07..59dc86ea28ed6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -32,8 +32,8 @@ package org.opensearch.cluster.metadata; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.opensearch.action.AliasesRequest; import org.opensearch.cluster.ClusterState; @@ -49,24 +49,26 @@ import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.MapperPlugin; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -97,8 +99,9 @@ /** * Metadata information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Metadata implements Iterable<IndexMetadata>, Diffable<Metadata>, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -107,11 +110,28 @@ public class Metadata implements Iterable<IndexMetadata>, Diffable<Metadata>, To public static final String UNKNOWN_CLUSTER_UUID = Strings.UNKNOWN_UUID_VALUE; public static final Pattern NUMBER_PATTERN = Pattern.compile("[0-9]+$"); + /** + * Utility to identify whether input index uses SEGMENT replication strategy in established cluster state metadata. + * Note: Method intended for use by other plugins as well. + * + * @param indexName Index name + * @return true if index uses SEGMENT replication, false otherwise + */ + public boolean isSegmentReplicationEnabled(String indexName) { + return Optional.ofNullable(index(indexName)) + .map( + indexMetadata -> ReplicationType.parseString(indexMetadata.getSettings().get(IndexMetadata.SETTING_REPLICATION_TYPE)) + .equals(ReplicationType.SEGMENT) + ) + .orElse(false); + } + /** * Context of the XContent. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum XContentContext { /* Custom metadata should be returns as part of API call */ API, @@ -149,8 +169,9 @@ public enum XContentContext { /** * Custom metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Custom extends NamedDiffable<Custom>, ToXContentFragment, ClusterState.FeatureAware { EnumSet<XContentContext> context(); @@ -810,6 +831,10 @@ public Map<String, DataStream> dataStreams() { .orElse(Collections.emptyMap()); } + public Map<String, View> views() { + return Optional.ofNullable((ViewMetadata) this.custom(ViewMetadata.TYPE)).map(ViewMetadata::views).orElse(Collections.emptyMap()); + } + public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } @@ -905,19 +930,26 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (!metadata1.coordinationMetadata.equals(metadata2.coordinationMetadata)) { return false; } - if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { + if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { return false; } - if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { + if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { return false; } - if (!metadata1.templates.equals(metadata2.templates())) { + if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { return false; } - if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { + return isGlobalResourcesMetadataEquals(metadata1, metadata2); + } + + /** + * Compares Metadata entities persisted in Remote Store. + */ + public static boolean isGlobalResourcesMetadataEquals(Metadata metadata1, Metadata metadata2) { + if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { return false; } - if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { + if (!metadata1.templates.equals(metadata2.templates())) { return false; } // Check if any persistent metadata needs to be saved @@ -1105,8 +1137,9 @@ public static Builder builder(Metadata metadata) { /** * Builder of metadata. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private String clusterUUID; @@ -1296,6 +1329,36 @@ public Builder removeDataStream(String name) { return this; } + private Map<String, View> getViews() { + return Optional.ofNullable(customs.get(ViewMetadata.TYPE)) + .map(o -> (ViewMetadata) o) + .map(vmd -> vmd.views()) + .orElse(new HashMap<>()); + } + + public View view(final String viewName) { + return getViews().get(viewName); + } + + public Builder views(final Map<String, View> views) { + this.customs.put(ViewMetadata.TYPE, new ViewMetadata(views)); + return this; + } + + public Builder put(final View view) { + Objects.requireNonNull(view, "view cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.put(view.getName(), view); + return views(replacementViews); + } + + public Builder removeView(final String viewName) { + Objects.requireNonNull(viewName, "viewName cannot be null"); + final var replacementViews = new HashMap<>(getViews()); + replacementViews.remove(viewName); + return views(replacementViews); + } + public Custom getCustom(String type) { return customs.get(type); } @@ -1649,7 +1712,7 @@ private SortedMap<String, IndexAbstraction> buildIndicesLookup() { /** * Validates there isn't any index with a name that would clash with the future backing indices of the existing data streams. - * + * <p> * E.g., if data stream `foo` has backing indices [`.ds-foo-000001`, `.ds-foo-000002`] and the indices lookup contains indices * `.ds-foo-000001`, `.ds-foo-000002` and `.ds-foo-000006` this will throw an IllegalStateException (as attempting to rollover the * `foo` data stream from generation 5 to 6 will not be possible) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 91ff4f7652879..22ad21f54e556 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchStatusException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.ActiveShardsObserver; @@ -50,11 +49,12 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectPath; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MetadataFieldMapper; -import org.opensearch.core.rest.RestStatus; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 728bac647d74a..4dde5d0ea013f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -39,7 +39,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -69,14 +69,17 @@ import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.io.PathUtils; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; @@ -86,12 +89,15 @@ import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexSettingProvider; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -105,7 +111,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -120,24 +125,20 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; /** * Service responsible for submitting create index requests @@ -262,11 +263,8 @@ public boolean validateDotIndex(String index, @Nullable Boolean isHidden) { * Validate the name for an index or alias against some static rules. */ public static void validateIndexOrAliasName(String index, BiFunction<String, String, ? extends RuntimeException> exceptionCtor) { - if (org.opensearch.common.Strings.validFileName(index) == false) { - throw exceptionCtor.apply( - index, - "must not contain the following characters " + org.opensearch.common.Strings.INVALID_FILENAME_CHARS - ); + if (Strings.validFileName(index) == false) { + throw exceptionCtor.apply(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); } if (index.isEmpty()) { throw exceptionCtor.apply(index, "must not be empty"); @@ -580,7 +578,8 @@ private ClusterState applyCreateIndexRequestWithV1Templates( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -644,7 +643,8 @@ private ClusterState applyCreateIndexRequestWithV2Template( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -724,7 +724,8 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -755,7 +756,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( /** * Parses the provided mappings json and the inheritable mappings from the templates (if any) * into a map. - * + * <p> * The template mappings are applied in the order they are encountered in the list (clients * should make sure the lower index, closer to the head of the list, templates have the highest * {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field @@ -793,7 +794,7 @@ static Map<String, Object> parseV1Mappings( * Validates and creates the settings for the new index based on the explicitly configured settings via the * {@link CreateIndexClusterStateUpdateRequest}, inherited from templates and, if recovering from another index (ie. split, shrink, * clone), the resize settings. - * + * <p> * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -807,7 +808,8 @@ static Settings aggregateIndexSettings( Settings settings, IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator, - Set<IndexSettingProvider> indexSettingProviders + Set<IndexSettingProvider> indexSettingProviders, + ClusterSettings clusterSettings ) { // Create builders for the template and request settings. We transform these into builders // because we may want settings to be "removed" from these prior to being set on the new @@ -816,6 +818,16 @@ static Settings aggregateIndexSettings( final Settings.Builder requestSettings = Settings.builder().put(request.settings()); final Settings.Builder indexSettingsBuilder = Settings.builder(); + + // Store type of `remote_snapshot` is intended to be system-managed for searchable snapshot indexes so a special case is needed here + // to prevent a user specifying this value when creating an index + String storeTypeSetting = request.settings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new IllegalArgumentException( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ); + } + if (sourceMetadata == null) { final Settings.Builder additionalIndexSettings = Settings.builder(); final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); @@ -891,8 +903,8 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings); - updateRemoteStoreSettings(indexSettingsBuilder, request.settings(), settings); + updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings, combinedTemplateSettings); + updateRemoteStoreSettings(indexSettingsBuilder, settings); if (sourceMetadata != null) { assert request.resizeType() != null; @@ -907,6 +919,10 @@ static Settings aggregateIndexSettings( ); } + List<String> validationErrors = new ArrayList<>(); + validateIndexReplicationTypeSettings(indexSettingsBuilder.build(), clusterSettings).ifPresent(validationErrors::add); + validateErrors(request.index(), validationErrors); + Settings indexSettings = indexSettingsBuilder.build(); /* * We can not validate settings until we have applied templates, otherwise we do not know the actual settings @@ -922,78 +938,70 @@ static Settings aggregateIndexSettings( } validateTranslogRetentionSettings(indexSettings); validateStoreTypeSettings(indexSettings); + validateRefreshIntervalSettings(request.settings(), clusterSettings); + validateTranslogDurabilitySettings(request.settings(), clusterSettings, settings); return indexSettings; } /** - * Updates index settings to set replication strategy by default based on cluster level settings + * Updates index settings to set replication strategy by default based on cluster level settings or remote store + * node attributes * @param settingsBuilder index settings builder to be updated with relevant settings * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings + * @param combinedTemplateSettings combined template settings which satisfy the index */ - private static void updateReplicationStrategy(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { - if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings) && INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); - return; - } - if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == true) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, INDEX_REPLICATION_TYPE_SETTING.get(requestSettings)); - return; + private static void updateReplicationStrategy( + Settings.Builder settingsBuilder, + Settings requestSettings, + Settings clusterSettings, + Settings combinedTemplateSettings + ) { + // The replication setting is applied in the following order: + // 1. Explicit index creation request parameter + // 2. Template property for replication type + // 3. Defaults to segment if remote store attributes on the cluster + // 4. Default cluster level setting + + final ReplicationType indexReplicationType; + if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { + indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(requestSettings); + } else if (INDEX_REPLICATION_TYPE_SETTING.exists(combinedTemplateSettings)) { + indexReplicationType = INDEX_REPLICATION_TYPE_SETTING.get(combinedTemplateSettings); + } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings); + } else if (isRemoteStoreAttributePresent(clusterSettings)) { + indexReplicationType = ReplicationType.SEGMENT; + } else { + indexReplicationType = CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings); } - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); + settingsBuilder.put(SETTING_REPLICATION_TYPE, indexReplicationType); } /** - * Updates index settings to enable remote store by default based on cluster level settings + * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings - * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings */ - private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { - if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings)) { - // Verify if we can create a remote store based index based on user provided settings - if (canCreateRemoteStoreIndex(requestSettings) == false) { - return; - } - - // Verify index has replication type as SEGMENT - if (ReplicationType.DOCUMENT.equals(ReplicationType.parseString(settingsBuilder.get(SETTING_REPLICATION_TYPE)))) { - throw new IllegalArgumentException( - "Cannot enable [" - + SETTING_REMOTE_STORE_ENABLED - + "] when [" - + SETTING_REPLICATION_TYPE - + "] is " - + ReplicationType.DOCUMENT - ); - } - - settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true); - String remoteStoreRepo; - if (Objects.equals(requestSettings.get(INDEX_REMOTE_STORE_ENABLED_SETTING.getKey()), "true")) { - remoteStoreRepo = requestSettings.get(INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey()); - } else { - remoteStoreRepo = CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.get(clusterSettings); - } - settingsBuilder.put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) + private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { + if (isRemoteStoreAttributePresent(clusterSettings)) { + settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) + .put( + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + ) + ) .put( SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, - requestSettings.get( - INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), - CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(clusterSettings) + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY ) ); } } - private static boolean canCreateRemoteStoreIndex(Settings requestSettings) { - return (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false - || INDEX_REPLICATION_TYPE_SETTING.get(requestSettings).equals(ReplicationType.SEGMENT)) - && (INDEX_REMOTE_STORE_ENABLED_SETTING.exists(requestSettings) == false - || INDEX_REMOTE_STORE_ENABLED_SETTING.get(requestSettings)); - } - public static void validateStoreTypeSettings(Settings settings) { // deprecate simplefs store type: if (IndexModule.Type.SIMPLEFS.match(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings))) { @@ -1033,7 +1041,7 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index /** * Validate and resolve the aliases explicitly set for the index, together with the ones inherited from the specified * templates. - * + * <p> * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -1254,7 +1262,11 @@ private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState public void validateIndexSettings(String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) throws IndexCreationException { List<String> validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings, indexName); + validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); + validateErrors(indexName, validationErrors); + } + private static void validateErrors(String indexName, List<String> validationErrors) { if (validationErrors.isEmpty() == false) { ValidationException validationException = new ValidationException(); validationException.addValidationErrors(validationErrors); @@ -1330,6 +1342,27 @@ private static List<String> validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } + /** + * Validates {@code index.replication.type} is matches with cluster level setting {@code cluster.indices.replication.strategy} + * when {@code cluster.index.restrict.replication.type} is set to true. + * + * @param requestSettings settings passed in during index create request + * @param clusterSettings cluster setting + */ + private static Optional<String> validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) + && requestSettings.hasValue(SETTING_REPLICATION_TYPE) + && requestSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey()) + .equals(clusterSettings.get(CLUSTER_REPLICATION_TYPE_SETTING).name()) == false) { + return Optional.of( + "index setting [index.replication.type] is not allowed to be set as [" + + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey() + + "=true]" + ); + } + return Optional.empty(); + } + /** * Validates the settings and mappings for shrinking an index. * @@ -1513,4 +1546,50 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { } } } + + /** + * Validates {@code index.refresh_interval} is equal or below the {@code cluster.minimum.index.refresh_interval}. + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + public static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false) { + return; + } + TimeValue requestRefreshInterval = IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.get(requestSettings); + TimeValue clusterMinimumRefreshInterval = clusterSettings.get(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + if (requestRefreshInterval.millis() < clusterMinimumRefreshInterval.millis()) { + throw new IllegalArgumentException( + "invalid index.refresh_interval [" + + requestRefreshInterval + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + clusterMinimumRefreshInterval + + "]" + ); + } + } + + /** + * Validates {@code index.translog.durability} is not async if the {@code cluster.remote_store.index.restrict.async-durability} is set to true. + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + static void validateTranslogDurabilitySettings(Settings requestSettings, ClusterSettings clusterSettings, Settings settings) { + if (isRemoteStoreAttributePresent(settings) == false + || IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.exists(requestSettings) == false + || clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING) == false) { + return; + } + Translog.Durability durability = IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.get(requestSettings); + if (durability.equals(Translog.Durability.ASYNC)) { + throw new IllegalArgumentException( + "index setting [index.translog.durability=async] is not allowed as cluster setting [" + + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey() + + "=true]" + ); + } + + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java index 4ee66a9920898..5352a8a3fb994 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataDeleteIndexService.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; @@ -50,6 +49,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.snapshots.RestoreService; import org.opensearch.snapshots.SnapshotInProgressException; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java index 19d2f4eccd7b0..96ba3d60ce9a6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexAliasesService.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.metadata; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; @@ -44,9 +43,10 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index dc985f5842224..e30e878f1b31a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -37,9 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; -import org.opensearch.action.NotifyOnceListener; import org.opensearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.close.CloseIndexResponse; import org.opensearch.action.admin.indices.close.CloseIndexResponse.IndexResult; @@ -77,17 +75,19 @@ import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.NotifyOnceListener; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.RestoreService; import org.opensearch.snapshots.SnapshotInProgressException; import org.opensearch.snapshots.SnapshotsService; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; @@ -166,7 +166,7 @@ public MetadataIndexStateService( /** * Closes one or more indices. - * + * <p> * Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards * to be terminated and finally closes the indices by moving their state to CLOSE. */ @@ -302,7 +302,7 @@ public TimeValue timeout() { /** * Step 1 - Start closing indices by adding a write block - * + * <p> * This step builds the list of indices to close (the ones explicitly requested that are not in CLOSE state) and adds a unique cluster * block (or reuses an existing one) to every index to close in the cluster state. After the cluster state is published, the shards * should start to reject writing operations and we can proceed with step 2. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 4f12313f951bc..5b03d3f7b19ce 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -37,10 +37,9 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.service.ClusterManagerTaskKeys; @@ -50,7 +49,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; import org.opensearch.common.ValidationException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.inject.Inject; import org.opensearch.common.logging.HeaderWarning; @@ -60,11 +58,13 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; @@ -93,6 +93,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; /** @@ -302,9 +303,11 @@ ClusterState addComponentTemplate( if (stringMappings != null) { Map<String, Object> parsedMappings = MapperService.parseMapping(xContentRegistry, stringMappings); if (parsedMappings.size() > 0) { - stringMappings = org.opensearch.common.Strings.toString( - XContentFactory.jsonBuilder().startObject().field(MapperService.SINGLE_MAPPING_NAME, parsedMappings).endObject() - ); + stringMappings = XContentFactory.jsonBuilder() + .startObject() + .field(MapperService.SINGLE_MAPPING_NAME, parsedMappings) + .endObject() + .toString(); } } @@ -591,9 +594,11 @@ public ClusterState addIndexTemplateV2( if (stringMappings != null) { Map<String, Object> parsedMappings = MapperService.parseMapping(xContentRegistry, stringMappings); if (parsedMappings.size() > 0) { - stringMappings = org.opensearch.common.Strings.toString( - XContentFactory.jsonBuilder().startObject().field(MapperService.SINGLE_MAPPING_NAME, parsedMappings).endObject() - ); + stringMappings = XContentFactory.jsonBuilder() + .startObject() + .field(MapperService.SINGLE_MAPPING_NAME, parsedMappings) + .endObject() + .toString(); } } final Template finalTemplate = new Template( @@ -743,7 +748,7 @@ public static Map<String, List<String>> findConflictingV2Templates( /** * Return a map of v2 template names to their index patterns for v2 templates that would overlap * with the given template's index patterns. - * + * <p> * Based on the provided checkPriority and priority parameters this aims to report the overlapping * index templates regardless of the priority (ie. checkPriority == false) or otherwise overlapping * templates with the same priority as the given priority parameter (this is useful when trying to @@ -1151,7 +1156,7 @@ public static List<CompressedXContent> collectMappings(final ClusterState state, Optional.ofNullable(template.getDataStreamTemplate()) .map(ComposableIndexTemplate.DataStreamTemplate::getDataStreamMappingSnippet) .map(mapping -> { - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.value(mapping); return new CompressedXContent(BytesReference.bytes(builder)); } catch (IOException e) { @@ -1502,12 +1507,9 @@ private void validate(String name, @Nullable Settings settings, List<String> ind if (indexPattern.startsWith("_")) { validationErrors.add("index_pattern [" + indexPattern + "] must not start with '_'"); } - if (org.opensearch.common.Strings.validFileNameExcludingAstrix(indexPattern) == false) { + if (Strings.validFileNameExcludingAstrix(indexPattern) == false) { validationErrors.add( - "index_pattern [" - + indexPattern - + "] must not contain the following characters " - + org.opensearch.common.Strings.INVALID_FILENAME_CHARS + "index_pattern [" + indexPattern + "] must not contain the following characters " + Strings.INVALID_FILENAME_CHARS ); } } @@ -1528,6 +1530,9 @@ private void validate(String name, @Nullable Settings settings, List<String> ind Optional.empty() ); validationErrors.addAll(indexSettingsValidation); + + // validate index refresh interval settings + validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); } if (indexPatterns.stream().anyMatch(Regex::isMatchAllPattern)) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java index 845438b2363c3..1406287149e8d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.opensearch.cluster.AckedClusterStateTaskListener; import org.opensearch.cluster.ClusterState; @@ -52,6 +51,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index a5caf3269ef26..7d4c3512ed757 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; import org.opensearch.cluster.AckedClusterStateUpdateTask; @@ -58,6 +57,7 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; @@ -73,6 +73,9 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; /** @@ -125,6 +128,10 @@ public void updateSettings( .put(request.settings()) .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) .build(); + + validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); + validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); final Set<String> skippedSettings = new HashSet<>(); @@ -132,12 +139,16 @@ public void updateSettings( indexScopedSettings.validate( normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards false, // don't validate dependencies here we check it below never allow to change the number of shards + false, + true, // Ignore archived setting. true ); // validate internal or private index settings for (String key : normalizedSettings.keySet()) { Setting setting = indexScopedSettings.get(key); boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); + boolean isArchived = key.startsWith(ARCHIVED_SETTINGS_PREFIX); assert setting != null // we already validated the normalized settings + || isArchived || (isWildcard && normalizedSettings.hasValue(key) == false) : "unknown setting: " + key + " isWildcard: " @@ -145,7 +156,8 @@ public void updateSettings( + " hasValue: " + normalizedSettings.hasValue(key); settingsForClosedIndices.copy(key, normalizedSettings); - if (isWildcard || setting.isDynamic()) { + // Only allow dynamic settings and wildcards for open indices. Skip archived settings. + if (isArchived == false && (isWildcard || setting.isDynamic())) { settingsForOpenIndices.copy(key, normalizedSettings); } else { skippedSettings.add(key); @@ -305,6 +317,8 @@ public ClusterState execute(ClusterState currentState) { Settings finalSettings = indexSettings.build(); indexScopedSettings.validate( finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), + true, + false, true ); metadataBuilder.put(IndexMetadata.builder(indexMetadata).settings(finalSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java b/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java index e3ef553540093..8f2b9c3bbd36a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/ProcessClusterEventTimeoutException.java @@ -33,8 +33,8 @@ package org.opensearch.cluster.metadata; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index e654745e8ccdb..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -38,11 +38,11 @@ import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -54,6 +54,8 @@ import java.util.EnumSet; import java.util.List; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + /** * Contains metadata about registered snapshot repositories * @@ -68,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable<Custom> implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List<RepositoryMetadata> repositories; @@ -208,6 +211,7 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO Settings settings = Settings.EMPTY; long generation = RepositoryData.UNKNOWN_REPO_GEN; long pendingGeneration = RepositoryData.EMPTY_REPO_GEN; + CryptoMetadata cryptoMetadata = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); @@ -231,6 +235,11 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); } pendingGeneration = parser.longValue(); + } else if (CryptoMetadata.CRYPTO_METADATA_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); + } + cryptoMetadata = CryptoMetadata.fromXContent(parser); } else { throw new OpenSearchParseException( "failed to parse repository [{}], unknown field [{}]", @@ -245,7 +254,7 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO if (type == null) { throw new OpenSearchParseException("failed to parse repository [{}], missing repository type", name); } - repository.add(new RepositoryMetadata(name, type, settings, generation, pendingGeneration)); + repository.add(new RepositoryMetadata(name, type, settings, generation, pendingGeneration, cryptoMetadata)); } else { throw new OpenSearchParseException("failed to parse repositories"); } @@ -279,8 +288,15 @@ public EnumSet<Metadata.XContentContext> context() { public static void toXContent(RepositoryMetadata repository, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(repository.name()); builder.field("type", repository.type()); + if (repository.cryptoMetadata() != null) { + repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); + } + Settings settings = repository.settings(); + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { + settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); + } builder.startObject("settings"); - repository.settings().toXContent(builder, params); + settings.toXContent(builder, params); builder.endObject(); if (params.paramAsBoolean(HIDE_GENERATIONS_PARAM, false) == false) { @@ -292,6 +308,6 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java index c9ed07cc9c2a8..b9d2a3edf356f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java @@ -31,10 +31,12 @@ package org.opensearch.cluster.metadata; +import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.repositories.RepositoryData; import java.io.IOException; @@ -43,13 +45,15 @@ /** * Metadata about registered repository * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RepositoryMetadata implements Writeable { private final String name; private final String type; private final Settings settings; + private final CryptoMetadata cryptoMetadata; /** * Safe repository generation. @@ -69,14 +73,29 @@ public class RepositoryMetadata implements Writeable { * @param settings repository settings */ public RepositoryMetadata(String name, String type, Settings settings) { - this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN); + this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN, null); + } + + public RepositoryMetadata(String name, String type, Settings settings, CryptoMetadata cryptoMetadata) { + this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN, cryptoMetadata); } public RepositoryMetadata(RepositoryMetadata metadata, long generation, long pendingGeneration) { - this(metadata.name, metadata.type, metadata.settings, generation, pendingGeneration); + this(metadata.name, metadata.type, metadata.settings, generation, pendingGeneration, metadata.cryptoMetadata); } public RepositoryMetadata(String name, String type, Settings settings, long generation, long pendingGeneration) { + this(name, type, settings, generation, pendingGeneration, null); + } + + public RepositoryMetadata( + String name, + String type, + Settings settings, + long generation, + long pendingGeneration, + CryptoMetadata cryptoMetadata + ) { this.name = name; this.type = type; this.settings = settings; @@ -87,6 +106,7 @@ public RepositoryMetadata(String name, String type, Settings settings, long gene + "] must be greater or equal to generation [" + generation + "]"; + this.cryptoMetadata = cryptoMetadata; } /** @@ -116,6 +136,15 @@ public Settings settings() { return this.settings; } + /** + * Returns crypto metadata of repository + * + * @return crypto metadata of repository + */ + public CryptoMetadata cryptoMetadata() { + return this.cryptoMetadata; + } + /** * Returns the safe repository generation. {@link RepositoryData} for this generation is assumed to exist in the repository. * All operations on the repository must be based on the {@link RepositoryData} at this generation. @@ -146,6 +175,11 @@ public RepositoryMetadata(StreamInput in) throws IOException { settings = Settings.readSettingsFromStream(in); generation = in.readLong(); pendingGeneration = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + cryptoMetadata = in.readOptionalWriteable(CryptoMetadata::new); + } else { + cryptoMetadata = null; + } } /** @@ -160,6 +194,9 @@ public void writeTo(StreamOutput out) throws IOException { Settings.writeSettingsToStream(settings, out); out.writeLong(generation); out.writeLong(pendingGeneration); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(cryptoMetadata); + } } /** @@ -169,7 +206,10 @@ public void writeTo(StreamOutput out) throws IOException { * @return {@code true} if both instances equal in all fields but the generation fields */ public boolean equalsIgnoreGenerations(RepositoryMetadata other) { - return name.equals(other.name) && type.equals(other.type()) && settings.equals(other.settings()); + return name.equals(other.name) + && type.equals(other.type()) + && settings.equals(other.settings()) + && Objects.equals(cryptoMetadata, other.cryptoMetadata()); } @Override @@ -183,16 +223,21 @@ public boolean equals(Object o) { if (!type.equals(that.type)) return false; if (generation != that.generation) return false; if (pendingGeneration != that.pendingGeneration) return false; - return settings.equals(that.settings); + if (!settings.equals(that.settings)) return false; + return Objects.equals(cryptoMetadata, that.cryptoMetadata); } @Override public int hashCode() { - return Objects.hash(name, type, settings, generation, pendingGeneration); + return Objects.hash(name, type, settings, generation, pendingGeneration, cryptoMetadata); } @Override public String toString() { - return "RepositoryMetadata{" + name + "}{" + type + "}{" + settings + "}{" + generation + "}{" + pendingGeneration + "}"; + String toStr = "RepositoryMetadata{" + name + "}{" + type + "}{" + settings + "}{" + generation + "}{" + pendingGeneration + "}"; + if (cryptoMetadata != null) { + return toStr + "{" + cryptoMetadata + "}"; + } + return toStr; } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Template.java b/server/src/main/java/org/opensearch/cluster/metadata/Template.java index 45d11dd9250e8..bd110c6af8975 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Template.java @@ -34,19 +34,19 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -59,8 +59,9 @@ * it is entirely independent from an index. It's a building block forming part of a regular index * template and a {@link ComponentTemplate}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Template extends AbstractDiffable<Template> implements ToXContentObject { private static final ParseField SETTINGS = new ParseField("settings"); private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -77,7 +78,7 @@ public class Template extends AbstractDiffable<Template> implements ToXContentOb PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> Settings.fromXContent(p), SETTINGS); PARSER.declareObject( ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(p.mapOrdered()))), + (p, c) -> new CompressedXContent(MediaTypeRegistry.JSON.contentBuilder().map(p.mapOrdered()).toString()), MAPPINGS ); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { @@ -176,7 +177,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override @@ -188,8 +189,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } if (this.mappings != null) { - Map<String, Object> uncompressedMapping = XContentHelper.convertToMap(this.mappings.uncompressed(), true, XContentType.JSON) - .v2(); + Map<String, Object> uncompressedMapping = XContentHelper.convertToMap( + this.mappings.uncompressed(), + true, + MediaTypeRegistry.JSON + ).v2(); if (uncompressedMapping.size() > 0) { builder.field(MAPPINGS.getPreferredName()); builder.map(reduceMapping(uncompressedMapping)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index c8c28c5db67c5..10f458561bffe 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.action.support.master.AcknowledgedResponse; @@ -46,12 +45,13 @@ import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.gateway.GatewayService; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.plugins.Plugin; @@ -162,7 +162,10 @@ void upgradeTemplates(Map<String, BytesReference> changes, Set<String> deletions } for (Map.Entry<String, BytesReference> change : changes.entrySet()) { - PutIndexTemplateRequest request = new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(change.getKey()).source( + change.getValue(), + MediaTypeRegistry.JSON + ); request.clusterManagerNodeTimeout(TimeValue.timeValueMinutes(1)); client.admin().indices().putTemplate(request, new ActionListener<AcknowledgedResponse>() { @Override @@ -269,7 +272,7 @@ private BytesReference toBytesReference(IndexTemplateMetadata templateMetadata) return XContentHelper.toXContent((builder, params) -> { IndexTemplateMetadata.Builder.toInnerXContentWithTypes(templateMetadata, builder, params); return builder; - }, XContentType.JSON, PARAMS, false); + }, MediaTypeRegistry.JSON, PARAMS, false); } catch (IOException ex) { throw new IllegalStateException("Cannot serialize template [" + templateMetadata.getName() + "]", ex); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/View.java b/server/src/main/java/org/opensearch/cluster/metadata/View.java new file mode 100644 index 0000000000000..1b1639bbca945 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/View.java @@ -0,0 +1,205 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.AbstractDiffable; +import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; + +/** View of data in OpenSearch indices */ +@ExperimentalApi +public class View extends AbstractDiffable<View> implements ToXContentObject { + + private final String name; + private final String description; + private final long createdAt; + private final long modifiedAt; + private final SortedSet<Target> targets; + + public View(final String name, final String description, final Long createdAt, final Long modifiedAt, final Set<Target> targets) { + this.name = Objects.requireNonNull(name, "Name must be provided"); + this.description = description; + this.createdAt = createdAt != null ? createdAt : -1; + this.modifiedAt = modifiedAt != null ? modifiedAt : -1; + this.targets = new TreeSet<>(Objects.requireNonNull(targets, "Targets are required on a view")); + } + + public View(final StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readZLong(), in.readZLong(), new TreeSet<>(in.readList(Target::new))); + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public long getCreatedAt() { + return createdAt; + } + + public long getModifiedAt() { + return modifiedAt; + } + + public SortedSet<Target> getTargets() { + return new TreeSet<>(targets); + } + + public static Diff<View> readDiffFrom(final StreamInput in) throws IOException { + return readDiffFrom(View::new, in); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + View that = (View) o; + return name.equals(that.name) + && description.equals(that.description) + && createdAt == that.createdAt + && modifiedAt == that.modifiedAt + && targets.equals(that.targets); + } + + @Override + public int hashCode() { + return Objects.hash(name, description, createdAt, modifiedAt, targets); + } + + /** The source of data used to project the view */ + @ExperimentalApi + public static class Target implements Writeable, ToXContentObject, Comparable<Target> { + + private final String indexPattern; + + public Target(final String indexPattern) { + this.indexPattern = Objects.requireNonNull(indexPattern, "IndexPattern is required"); + } + + public Target(final StreamInput in) throws IOException { + this(in.readString()); + } + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Target that = (Target) o; + return indexPattern.equals(that.indexPattern); + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + public static final ParseField INDEX_PATTERN_FIELD = new ParseField("indexPattern"); + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_PATTERN_FIELD.getPreferredName(), indexPattern); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser<Target, Void> PARSER = new ConstructingObjectParser<>( + "target", + args -> new Target((String) args[0]) + ); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_PATTERN_FIELD); + } + + public static Target fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(indexPattern); + } + + @Override + public int compareTo(final Target o) { + if (this == o) return 0; + + final Target other = (Target) o; + return this.indexPattern.compareTo(other.indexPattern); + } + } + + public static final ParseField NAME_FIELD = new ParseField("name"); + public static final ParseField DESCRIPTION_FIELD = new ParseField("description"); + public static final ParseField CREATED_AT_FIELD = new ParseField("createdAt"); + public static final ParseField MODIFIED_AT_FIELD = new ParseField("modifiedAt"); + public static final ParseField TARGETS_FIELD = new ParseField("targets"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser<View, Void> PARSER = new ConstructingObjectParser<>( + "view", + args -> new View((String) args[0], (String) args[1], (Long) args[2], (Long) args[3], new TreeSet<>((List<Target>) args[4])) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DESCRIPTION_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, CREATED_AT_FIELD); + PARSER.declareLongOrNull(ConstructingObjectParser.optionalConstructorArg(), -1L, MODIFIED_AT_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), (p, c) -> Target.fromXContent(p), TARGETS_FIELD); + } + + public static View fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + builder.field(CREATED_AT_FIELD.getPreferredName(), createdAt); + builder.field(MODIFIED_AT_FIELD.getPreferredName(), modifiedAt); + builder.field(TARGETS_FIELD.getPreferredName(), targets); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeZLong(createdAt); + out.writeZLong(modifiedAt); + out.writeList(targets.stream().collect(Collectors.toList())); + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java new file mode 100644 index 0000000000000..a89068078be58 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/ViewMetadata.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.Version; +import org.opensearch.cluster.Diff; +import org.opensearch.cluster.DiffableUtils; +import org.opensearch.cluster.NamedDiff; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.cluster.metadata.ComposableIndexTemplateMetadata.MINIMMAL_SUPPORTED_VERSION; + +/** View metadata */ +@ExperimentalApi +public class ViewMetadata implements Metadata.Custom { + + public static final String TYPE = "view"; + private static final ParseField VIEW_FIELD = new ParseField("view"); + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser<ViewMetadata, Void> PARSER = new ConstructingObjectParser<>( + TYPE, + false, + a -> new ViewMetadata((Map<String, View>) a[0]) + ); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> { + Map<String, View> views = new HashMap<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + views.put(p.currentName(), View.fromXContent(p)); + } + return views; + }, VIEW_FIELD); + } + + private final Map<String, View> views; + + public ViewMetadata(final Map<String, View> views) { + this.views = views; + } + + public ViewMetadata(final StreamInput in) throws IOException { + this.views = in.readMap(StreamInput::readString, View::new); + } + + public Map<String, View> views() { + return this.views; + } + + @Override + public Diff<Metadata.Custom> diff(final Metadata.Custom before) { + return new ViewMetadata.ViewMetadataDiff((ViewMetadata) before, this); + } + + public static NamedDiff<Metadata.Custom> readDiffFrom(final StreamInput in) throws IOException { + return new ViewMetadata.ViewMetadataDiff(in); + } + + @Override + public EnumSet<Metadata.XContentContext> context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return MINIMMAL_SUPPORTED_VERSION; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeMap(this.views, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + } + + public static ViewMetadata fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(VIEW_FIELD.getPreferredName()); + for (Map.Entry<String, View> entry : views.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + return builder; + } + + public static Builder builder() { + return new Builder(); + } + + @Override + public int hashCode() { + return Objects.hash(this.views); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ViewMetadata other = (ViewMetadata) obj; + return Objects.equals(this.views, other.views); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + /** + * Builder of view metadata. + */ + @ExperimentalApi + public static class Builder { + + private final Map<String, View> views = new HashMap<>(); + + public Builder putDataStream(final View view) { + views.put(view.getName(), view); + return this; + } + + public ViewMetadata build() { + return new ViewMetadata(views); + } + } + + /** + * A diff between view metadata. + */ + static class ViewMetadataDiff implements NamedDiff<Metadata.Custom> { + + final Diff<Map<String, View>> dataStreamDiff; + + ViewMetadataDiff(ViewMetadata before, ViewMetadata after) { + this.dataStreamDiff = DiffableUtils.diff(before.views, after.views, DiffableUtils.getStringKeySerializer()); + } + + ViewMetadataDiff(StreamInput in) throws IOException { + this.dataStreamDiff = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), View::new, View::readDiffFrom); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + return new ViewMetadata(dataStreamDiff.apply(((ViewMetadata) part).views)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataStreamDiff.writeTo(out); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index 6cfef67f2ceba..b303c3a2034d5 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -8,17 +8,16 @@ package org.opensearch.cluster.metadata; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.cluster.AbstractNamedDiffable; import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.routing.WeightedRouting; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -27,14 +26,15 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.Map; +import java.util.Objects; /** * Contains metadata for weighted routing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WeightedRoutingMetadata extends AbstractNamedDiffable<Metadata.Custom> implements Metadata.Custom { - private static final Logger logger = LogManager.getLogger(WeightedRoutingMetadata.class); public static final String TYPE = "weighted_shard_routing"; public static final String AWARENESS = "awareness"; public static final String VERSION = "_version"; @@ -100,7 +100,7 @@ public static NamedDiff<Metadata.Custom> readDiffFrom(StreamInput in) throws IOE public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws IOException { String attrKey = null; Double attrValue; - String attributeName = null; + String attributeName = ""; Map<String, Double> weights = new HashMap<>(); WeightedRouting weightedRouting; XContentParser.Token token; @@ -163,12 +163,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WeightedRoutingMetadata that = (WeightedRoutingMetadata) o; - return weightedRouting.equals(that.weightedRouting); + return weightedRouting.equals(that.weightedRouting) && version == that.version; } @Override public int hashCode() { - return weightedRouting.hashCode(); + return Objects.hash(weightedRouting.hashCode(), version); } @Override @@ -192,6 +192,6 @@ public static void toXContent(WeightedRouting weightedRouting, XContentBuilder b @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index d6ba0199d193c..5226e9570ac14 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -34,12 +34,13 @@ import org.opensearch.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; @@ -60,12 +61,14 @@ import java.util.stream.Stream; import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX; /** * A discovery node represents a node that is part of the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiscoveryNode implements Writeable, ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -458,6 +461,15 @@ public boolean isSearchNode() { return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); } + /** + * Returns whether the node is a remote store node. + * + * @return true if the node contains remote store node attributes, false otherwise + */ + public boolean isRemoteStoreNode() { + return this.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. * <p> @@ -519,7 +531,13 @@ public String toString() { sb.append('}'); } if (!attributes.isEmpty()) { - sb.append(attributes); + sb.append( + attributes.entrySet() + .stream() + .filter(entry -> !entry.getKey().startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)) // filter remote_store attributes + // from logging to reduce noise. + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } return sb.toString(); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java index 4fd2905495961..b27e1aa49a803 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java @@ -33,12 +33,13 @@ package org.opensearch.cluster.node; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; import java.util.HashMap; import java.util.Map; @@ -48,15 +49,17 @@ /** * Filters Discovery nodes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiscoveryNodeFilters { /** * Operation type. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum OpType { AND, OR diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 07d70b2c6c1b2..0d2b08656c38d 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -35,6 +35,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -52,8 +53,9 @@ /** * Represents a node role. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DiscoveryNodeRole implements Comparable<DiscoveryNodeRole> { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DiscoveryNodeRole.class); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 060c7c5eb8d1a..2ebcd8096893d 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -37,12 +37,13 @@ import org.opensearch.cluster.Diff; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; import java.util.ArrayList; @@ -62,8 +63,9 @@ * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements Iterable<DiscoveryNode> { public static final DiscoveryNodes EMPTY_NODES = builder().build(); @@ -356,7 +358,7 @@ public DiscoveryNode findByAddress(TransportAddress address) { /** * Returns the version of the node with the oldest version in the cluster that is not a client node - * + * <p> * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the oldest version in the cluster @@ -367,7 +369,7 @@ public Version getSmallestNonClientNodeVersion() { /** * Returns the version of the node with the youngest version in the cluster that is not a client node. - * + * <p> * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the youngest version in the cluster @@ -417,16 +419,16 @@ public DiscoveryNode resolveNode(String node) { /** * Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the * user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details. - * + * <p> * Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node * specification may either add or remove nodes. For instance: - * + * <p> * - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes * - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match * - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value * - role:true adds to the subset all nodes with a matching role * - role:false removes from the subset all nodes with a matching role. - * + * <p> * An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default. */ public String[] resolveNodes(String... nodes) { @@ -567,8 +569,9 @@ public String toString() { /** * Delta between nodes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Delta { private final String localNodeId; @@ -735,8 +738,9 @@ public static Builder builder(DiscoveryNodes nodes) { /** * Builder of a map of discovery nodes. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map<String, DiscoveryNode> nodes; @@ -813,7 +817,7 @@ public Builder localNodeId(String localNodeId) { * Checks that a node can be safely added to this node collection. * * @return null if all is OK or an error message explaining why a node can not be added. - * + * <p> * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java b/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java index 0bc434090b719..ade3afa4a500c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java +++ b/server/src/main/java/org/opensearch/cluster/routing/AllocationId.java @@ -33,8 +33,9 @@ package org.opensearch.cluster.routing; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -55,8 +56,9 @@ * relocationId. Once relocation is done, the new allocation id is set to the relocationId. This is similar * behavior to how ShardRouting#currentNodeId is used. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllocationId implements ToXContentObject, Writeable { private static final String ID_KEY = "id"; private static final String RELOCATION_ID_KEY = "relocation_id"; diff --git a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java index 641fb9abf73e0..73137d5b3e538 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/BatchedRerouteService.java @@ -36,13 +36,13 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java index 844b78dccc59b..2e200b6f38612 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java @@ -42,8 +42,8 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.threadpool.Scheduler; diff --git a/server/src/main/java/org/opensearch/cluster/routing/GroupShardsIterator.java b/server/src/main/java/org/opensearch/cluster/routing/GroupShardsIterator.java index 8e9371e6a714c..dcb60e79358e3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/GroupShardsIterator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/GroupShardsIterator.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing; import org.apache.lucene.util.CollectionUtil; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.Countable; import java.util.Iterator; @@ -44,8 +45,9 @@ * ShardsIterators are always returned in ascending order independently of their order at construction * time. The incoming iterators are sorted to ensure consistent iteration behavior across Nodes / JVMs. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class GroupShardsIterator<ShardIt extends Comparable<ShardIt> & Countable> implements Iterable<ShardIt> { private final List<ShardIt> iterators; diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index 781ca5bb2255a..faadc3f7583fb 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -41,9 +41,10 @@ import org.opensearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.opensearch.cluster.routing.RecoverySource.LocalShardsRecoverySource; import org.opensearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; +import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.common.Randomness; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.Index; @@ -75,8 +76,9 @@ * represented as {@link ShardRouting}. * </p> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> implements Iterable<IndexShardRoutingTable> { private final Index index; @@ -367,8 +369,9 @@ public static Builder builder(Index index) { /** * Builder of a routing table. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Index index; @@ -453,7 +456,8 @@ public Builder initializeAsRestore(IndexMetadata indexMetadata, SnapshotRecovery public Builder initializeAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, - Map<ShardId, ShardRouting> activeInitializingShards + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap, + boolean forceRecoverAllPrimaries ) { final UnassignedInfo unassignedInfo = new UnassignedInfo( UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, @@ -465,11 +469,33 @@ public Builder initializeAsRemoteStoreRestore( } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); + if (indexShardRoutingTableMap.containsKey(shardId) == false) { + throw new IllegalStateException("IndexShardRoutingTable is not present for shardId: " + shardId); + } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - if (activeInitializingShards.containsKey(shardId)) { - indexShardRoutingBuilder.addShard(activeInitializingShards.get(shardId)); - } else { + IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingTableMap.get(shardId); + if (forceRecoverAllPrimaries || indexShardRoutingTable.primaryShard().unassigned()) { + // Primary shard to be recovered from remote store. indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo)); + // All the replica shards to be recovered from peer recovery. + for (int replicaNumber = 0; replicaNumber < indexMetadata.getNumberOfReplicas(); replicaNumber++) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } + } else { + // Primary is either active or initializing. Do not trigger restore. + indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard()); + // Replica, if unassigned, trigger peer recovery else no action. + for (ShardRouting shardRouting : indexShardRoutingTable.replicaShards()) { + if (shardRouting.unassigned()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard(shardRouting); + } + } } shards.put(shardNumber, indexShardRoutingBuilder.build()); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index f9f57eeda4e98..36149d014ea84 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -39,10 +39,11 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.ResponseCollectorService; @@ -73,8 +74,9 @@ * referred to as replicas of a shard. Given that, this class encapsulates all * replicas (instances) for a single index shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardRoutingTable implements Iterable<ShardRouting> { final ShardShuffler shuffler; @@ -463,7 +465,7 @@ private static Map<String, Double> rankNodes( * OpenSearch, however, we do not have that sort of broadcast-to-all behavior. In order to prevent a node that gets a high score and * then never gets any more requests, we must ensure it eventually returns to a more normal score and can be a candidate for serving * requests. - * + * <p> * This adjustment takes the "winning" node's statistics and adds the average of those statistics with each non-winning node. Let's say * the winning node had a queue size of 10 and a non-winning node had a queue of 18. The average queue size is (10 + 18) / 2 = 14 so the * non-winning node will have statistics added for a queue size of 14. This is repeated for the response time and service times as well. @@ -921,8 +923,9 @@ public int shardsMatchingPredicateCount(Predicate<ShardRouting> predicate) { /** * Key for WeightedRouting Shard Iterator * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.4.0") public static class WeightedRoutingKey { private final WeightedRouting weightedRouting; diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 24e035e3db643..6a95c98815698 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -38,13 +38,14 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.ResponseCollectorService; import java.util.ArrayList; @@ -59,8 +60,9 @@ /** * Routes cluster operations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OperationRouting { public static final Setting<Boolean> USE_ADAPTIVE_REPLICA_SELECTION_SETTING = Setting.boolSetting( diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 5cef46689ffc7..8d407c6aff5b6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -49,15 +50,16 @@ /** * Represents the recovery source of a shard. Available recovery types are: - * + * <p> * - {@link EmptyStoreRecoverySource} recovery from an empty store * - {@link ExistingStoreRecoverySource} recovery from an existing store * - {@link PeerRecoverySource} recovery from a primary on another node * - {@link SnapshotRecoverySource} recovery from a snapshot * - {@link LocalShardsRecoverySource} recovery from other shards of another index on the same node * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RecoverySource implements Writeable, ToXContentObject { @Override @@ -111,8 +113,9 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { /** * Type of recovery. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { EMPTY_STORE, EXISTING_STORE, @@ -247,8 +250,9 @@ public String toString() { /** * recovery from a snapshot * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class SnapshotRecoverySource extends RecoverySource { public static final String NO_API_RESTORE_UUID = "_no_api_"; @@ -412,8 +416,9 @@ public int hashCode() { /** * Recovery from remote store * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class RemoteStoreRecoverySource extends RecoverySource { private final String restoreUUID; diff --git a/server/src/main/java/org/opensearch/cluster/routing/RerouteService.java b/server/src/main/java/org/opensearch/cluster/routing/RerouteService.java index 407c3bd760c03..b6db2e7c192fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RerouteService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RerouteService.java @@ -31,16 +31,18 @@ package org.opensearch.cluster.routing; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionListener; /** * Asynchronously performs a cluster reroute, updating any shard states and rebalancing the cluster if appropriate. * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface RerouteService { /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RotationShardShuffler.java b/server/src/main/java/org/opensearch/cluster/routing/RotationShardShuffler.java index c49ad09b85344..a15d96e9adc99 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RotationShardShuffler.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RotationShardShuffler.java @@ -32,7 +32,7 @@ package org.opensearch.cluster.routing; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingChangesObserver.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingChangesObserver.java index 1ec572f7f8cc4..614cccb069048 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingChangesObserver.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingChangesObserver.java @@ -32,11 +32,14 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; + /** * Records changes made to {@link RoutingNodes} during an allocation round. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface RoutingChangesObserver { /** * Called when unassigned shard is initialized. Does not include initializing relocation target shards. diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java index e10734eb17e0d..15ec41d5c3fbb 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNode.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -56,8 +57,9 @@ * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards * that are hosted on that nodes. Each {@link RoutingNode} has a unique node id that can be used to identify the node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingNode implements Iterable<ShardRouting> { static class BucketedShards implements Iterable<ShardRouting> { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 5402218664f6f..938a603c459c9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.core.Assertions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -43,7 +42,9 @@ import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.Assertions; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -70,7 +71,7 @@ * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. * It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing * or disallowing changes to its elements. - * + * <p> * The main methods used to update routing entries are: * <ul> * <li> {@link #initializeShard} initializes an unassigned shard. @@ -79,9 +80,11 @@ * <li> {@link #failShard} fails/cancels an assigned shard. * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingNodes implements Iterable<RoutingNode> { + private final Metadata metadata; private final Map<String, RoutingNode> nodesToShards = new HashMap<>(); @@ -107,6 +110,7 @@ public RoutingNodes(ClusterState clusterState) { } public RoutingNodes(ClusterState clusterState, boolean readOnly) { + this.metadata = clusterState.getMetadata(); this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); this.nodesPerAttributeNames = Collections.synchronizedMap(new HashMap<>()); @@ -367,10 +371,10 @@ public ShardRouting activePrimary(ShardId shardId) { /** * Returns one active replica shard for the given shard id or <code>null</code> if * no active replica is found. - * - * Since replicas could possibly be on nodes with a older version of OpenSearch than - * the primary is, this will return replicas on the highest version of OpenSearch. - * + * <p> + * Since replicas could possibly be on nodes with an older version of OpenSearch than + * the primary is, this will return replicas on the highest version of OpenSearch when document + * replication is enabled. */ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { // It's possible for replicaNodeVersion to be null, when disassociating dead nodes @@ -390,6 +394,30 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { .orElse(null); } + /** + * Returns one active replica shard for the given shard id or <code>null</code> if + * no active replica is found. + * <p> + * Since replicas could possibly be on nodes with a higher version of OpenSearch than + * the primary is, this will return replicas on the oldest version of OpenSearch when segment + * replication is enabled to allow for replica to read segments from primary. + * + */ + public ShardRouting activeReplicaWithOldestVersion(ShardId shardId) { + // It's possible for replicaNodeVersion to be null. Therefore, we need to protect against the version being null + // (meaning the node will be going away). + return assignedShards(shardId).stream() + .filter(shr -> !shr.primary() && shr.active()) + .filter(shr -> node(shr.currentNodeId()) != null) + .min( + Comparator.comparing( + shr -> node(shr.currentNodeId()).node(), + Comparator.nullsFirst(Comparator.comparing(DiscoveryNode::getVersion)) + ) + ) + .orElse(null); + } + /** * Returns <code>true</code> iff all replicas are active for the given shard routing. Otherwise <code>false</code> */ @@ -518,9 +546,9 @@ public Tuple<ShardRouting, ShardRouting> relocateShard( /** * Applies the relevant logic to start an initializing shard. - * + * <p> * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. - * + * <p> * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their * recovery source changes * @@ -579,9 +607,9 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro /** * Applies the relevant logic to handle a cancelled or failed shard. - * + * <p> * Moves the shard to unassigned or completely removes the shard (if relocation target). - * + * <p> * - If shard is a primary, this also fails initializing replicas. * - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists). * - If shard is a relocating primary, this also removes the primary relocation target shard. @@ -701,30 +729,18 @@ assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == + " was matched but wasn't removed"; } - public void swapPrimaryWithReplica( - Logger logger, - ShardRouting primaryShard, - ShardRouting replicaShard, - RoutingChangesObserver changes - ) { - assert primaryShard.primary() : "Invalid primary shard provided"; - assert !replicaShard.primary() : "Invalid Replica shard provided"; - - ShardRouting newPrimary = primaryShard.moveActivePrimaryToReplica(); - ShardRouting newReplica = replicaShard.moveActiveReplicaToPrimary(); - updateAssigned(primaryShard, newPrimary); - updateAssigned(replicaShard, newReplica); - logger.info("Swap relocation performed for shard [{}]", newPrimary.shortSummary()); - changes.replicaPromoted(newPrimary); - } - private void unassignPrimaryAndPromoteActiveReplicaIfExists( ShardRouting failedShard, UnassignedInfo unassignedInfo, RoutingChangesObserver routingChangesObserver ) { assert failedShard.primary(); - ShardRouting activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + ShardRouting activeReplica; + if (metadata.isSegmentReplicationEnabled(failedShard.getIndexName())) { + activeReplica = activeReplicaWithOldestVersion(failedShard.shardId()); + } else { + activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + } if (activeReplica == null) { moveToUnassigned(failedShard, unassignedInfo); } else { @@ -912,8 +928,9 @@ public int size() { /** * Unassigned shard list. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class UnassignedShards implements Iterable<ShardRouting> { private final RoutingNodes nodes; @@ -1013,8 +1030,9 @@ public void ignoreShard(ShardRouting shard, AllocationStatus allocationStatus, R /** * An unassigned iterator. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public class UnassignedIterator implements Iterator<ShardRouting>, ExistingShardsAllocator.UnassignedAllocationHandler { private final ListIterator<ShardRouting> iterator; @@ -1310,100 +1328,131 @@ private void ensureMutable() { } /** - * Creates an iterator over shards interleaving between nodes: The iterator returns the first shard from - * the first node, then the first shard of the second node, etc. until one shard from each node has been returned. - * The iterator then resumes on the first node by returning the second shard and continues until all shards from - * all the nodes have been returned. - * @param movePrimaryFirst if true, all primary shards are iterated over before iterating replica for any node - * @return iterator of shard routings + * Returns iterator of shard routings used by {@link #nodeInterleavedShardIterator(ShardMovementStrategy)} + * @param primaryFirst true when ShardMovementStrategy = ShardMovementStrategy.PRIMARY_FIRST, false when it is ShardMovementStrategy.REPLICA_FIRST */ - public Iterator<ShardRouting> nodeInterleavedShardIterator(boolean movePrimaryFirst) { + private Iterator<ShardRouting> buildIteratorForMovementStrategy(boolean primaryFirst) { final Queue<Iterator<ShardRouting>> queue = new ArrayDeque<>(); for (Map.Entry<String, RoutingNode> entry : nodesToShards.entrySet()) { queue.add(entry.getValue().copyShards().iterator()); } - if (movePrimaryFirst) { - return new Iterator<ShardRouting>() { - private Queue<ShardRouting> replicaShards = new ArrayDeque<>(); - private Queue<Iterator<ShardRouting>> replicaIterators = new ArrayDeque<>(); - - public boolean hasNext() { - while (!queue.isEmpty()) { - if (queue.peek().hasNext()) { - return true; - } - queue.poll(); - } - if (!replicaShards.isEmpty()) { + return new Iterator<ShardRouting>() { + private Queue<ShardRouting> shardRoutings = new ArrayDeque<>(); + private Queue<Iterator<ShardRouting>> shardIterators = new ArrayDeque<>(); + + public boolean hasNext() { + while (queue.isEmpty() == false) { + if (queue.peek().hasNext()) { return true; } - while (!replicaIterators.isEmpty()) { - if (replicaIterators.peek().hasNext()) { - return true; - } - replicaIterators.poll(); + queue.poll(); + } + if (!shardRoutings.isEmpty()) { + return true; + } + while (!shardIterators.isEmpty()) { + if (shardIterators.peek().hasNext()) { + return true; } - return false; + shardIterators.poll(); } + return false; + } - public ShardRouting next() { - if (hasNext() == false) { - throw new NoSuchElementException(); - } - while (!queue.isEmpty()) { - Iterator<ShardRouting> iter = queue.poll(); + public ShardRouting next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + while (!queue.isEmpty()) { + Iterator<ShardRouting> iter = queue.poll(); + if (primaryFirst) { if (iter.hasNext()) { ShardRouting result = iter.next(); if (result.primary()) { queue.offer(iter); return result; } - replicaShards.offer(result); - replicaIterators.offer(iter); + shardRoutings.offer(result); + shardIterators.offer(iter); + } + } else { + while (iter.hasNext()) { + ShardRouting result = iter.next(); + if (result.primary() == false) { + queue.offer(iter); + return result; + } + shardRoutings.offer(result); + shardIterators.offer(iter); } } - if (!replicaShards.isEmpty()) { - return replicaShards.poll(); - } - Iterator<ShardRouting> replicaIterator = replicaIterators.poll(); - ShardRouting replicaShard = replicaIterator.next(); - replicaIterators.offer(replicaIterator); - - assert !replicaShard.primary(); - return replicaShard; } - - public void remove() { - throw new UnsupportedOperationException(); + if (!shardRoutings.isEmpty()) { + return shardRoutings.poll(); } - }; + Iterator<ShardRouting> replicaIterator = shardIterators.poll(); + ShardRouting replicaShard = replicaIterator.next(); + shardIterators.offer(replicaIterator); + + assert replicaShard.primary() != primaryFirst; + return replicaShard; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + /** + * Creates an iterator over shards interleaving between nodes: The iterator returns the first shard from + * the first node, then the first shard of the second node, etc. until one shard from each node has been returned. + * The iterator then resumes on the first node by returning the second shard and continues until all shards from + * all the nodes have been returned. + * @param shardMovementStrategy if ShardMovementStrategy.PRIMARY_FIRST, all primary shards are iterated over before iterating replica for any node + * if ShardMovementStrategy.REPLICA_FIRST, all replica shards are iterated over before iterating primary for any node + * if ShardMovementStrategy.NO_PREFERENCE, order of replica and primary shards doesn't matter in iteration + * @return iterator of shard routings + */ + public Iterator<ShardRouting> nodeInterleavedShardIterator(ShardMovementStrategy shardMovementStrategy) { + final Queue<Iterator<ShardRouting>> queue = new ArrayDeque<>(); + for (Map.Entry<String, RoutingNode> entry : nodesToShards.entrySet()) { + queue.add(entry.getValue().copyShards().iterator()); + } + if (shardMovementStrategy == ShardMovementStrategy.PRIMARY_FIRST) { + return buildIteratorForMovementStrategy(true); } else { - return new Iterator<ShardRouting>() { - @Override - public boolean hasNext() { - while (!queue.isEmpty()) { - if (queue.peek().hasNext()) { - return true; + if (shardMovementStrategy == ShardMovementStrategy.REPLICA_FIRST) { + return buildIteratorForMovementStrategy(false); + } else { + return new Iterator<ShardRouting>() { + @Override + public boolean hasNext() { + while (!queue.isEmpty()) { + if (queue.peek().hasNext()) { + return true; + } + queue.poll(); } - queue.poll(); + return false; } - return false; - } - @Override - public ShardRouting next() { - if (hasNext() == false) { - throw new NoSuchElementException(); + @Override + public ShardRouting next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + Iterator<ShardRouting> iter = queue.poll(); + queue.offer(iter); + return iter.next(); } - Iterator<ShardRouting> iter = queue.poll(); - queue.offer(iter); - return iter.next(); - } - public void remove() { - throw new UnsupportedOperationException(); - } - }; + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 7934649a6d3eb..e4095a84be081 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -37,15 +37,16 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; +import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.iterable.Iterables; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.iterable.Iterables; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexNotFoundException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.shard.ShardNotFoundException; import java.io.IOException; @@ -66,8 +67,9 @@ * * @see IndexRoutingTable * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<RoutingTable> { public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); @@ -295,6 +297,26 @@ public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { return allShardsSatisfyingPredicate(indices, shardRouting -> true, true); } + /** + * All the shards on the node which match the predicate + * @param predicate condition to match + * @return iterator over shards matching the predicate + */ + public ShardsIterator allShardsSatisfyingPredicate(Predicate<ShardRouting> predicate) { + String[] indices = indicesRouting.keySet().toArray(new String[0]); + return allShardsSatisfyingPredicate(indices, predicate, false); + } + + /** + * All the shards for the provided indices on the node which match the predicate + * @param indices indices to return all the shards. + * @param predicate condition to match + * @return iterator over shards matching the predicate for the specific indices + */ + public ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate<ShardRouting> predicate) { + return allShardsSatisfyingPredicate(indices, predicate, false); + } + private ShardsIterator allShardsSatisfyingPredicate( String[] indices, Predicate<ShardRouting> predicate, @@ -423,8 +445,9 @@ public static Builder builder(RoutingTable routingTable) { /** * Builder for the routing table. Note that build can only be called one time. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private long version; @@ -565,10 +588,11 @@ public Builder addAsFromOpenToClose(IndexMetadata indexMetadata) { public Builder addAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, - Map<ShardId, ShardRouting> activeInitializingShards + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap, + boolean forceRecoveryPrimary ) { IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, activeInitializingShards); + .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, indexShardRoutingTableMap, forceRecoveryPrimary); add(indexRoutingBuilder); return this; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardIterator.java b/server/src/main/java/org/opensearch/cluster/routing/ShardIterator.java index 623846b6e1dda..95055d2ffe2ad 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardIterator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardIterator.java @@ -32,13 +32,15 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; /** * Allows to iterate over a set of shard instances (routing) within a shard id group. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ShardIterator extends ShardsIterator, Comparable<ShardIterator> { /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java new file mode 100644 index 0000000000000..d1ed94087e20d --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.common.annotation.PublicApi; + +import java.util.Locale; + +/** + * ShardMovementStrategy defines the order in which shard movement occurs. + * <p> + * ShardMovementStrategy values or rather their string representation to be used with + * {@link BalancedShardsAllocator#SHARD_MOVEMENT_STRATEGY_SETTING} via cluster settings. + * + * @opensearch.api + */ +@PublicApi(since = "2.9.0") +public enum ShardMovementStrategy { + /** + * default behavior in which order of shard movement doesn't matter. + */ + NO_PREFERENCE, + + /** + * primary shards are moved first + */ + PRIMARY_FIRST, + + /** + * replica shards are moved first + */ + REPLICA_FIRST; + + public static ShardMovementStrategy parse(String strValue) { + if (strValue == null) { + return null; + } else { + strValue = strValue.toUpperCase(Locale.ROOT); + try { + return ShardMovementStrategy.valueOf(strValue); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Illegal allocation.shard_movement_strategy value [" + strValue + "]"); + } + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index aa893ec735bac..45de045a8fc69 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -36,13 +36,14 @@ import org.opensearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -52,8 +53,9 @@ * {@link ShardRouting} immutably encapsulates information about shard * indexRoutings like id, state, version, etc. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardRouting implements Writeable, ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java index a46464d8727ee..2086159790ba9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRoutingState.java @@ -32,12 +32,15 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; + /** * Represents the current state of a {@link ShardRouting} as defined by the * cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ShardRoutingState { /** * The shard is not assigned to any node. diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardsIterator.java b/server/src/main/java/org/opensearch/cluster/routing/ShardsIterator.java index e715be1676c6f..fc3fefbc3836c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardsIterator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardsIterator.java @@ -31,6 +31,7 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.Countable; import java.util.List; @@ -38,8 +39,9 @@ /** * Allows to iterate over unrelated shards. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ShardsIterator extends Iterable<ShardRouting>, Countable { /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index a4ecbadc34702..cf6dc9cd7306e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -38,14 +38,15 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -62,8 +63,9 @@ /** * Holds additional information as to why the shard is in unassigned state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class UnassignedInfo implements ToXContentFragment, Writeable { public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); @@ -81,8 +83,9 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable { * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Reason { /** * Unassigned as a result of an API creation of an index. @@ -153,12 +156,13 @@ public enum Reason { /** * Captures the status of an unsuccessful allocation attempt for the shard, * causing it to remain in the unassigned state. - * + * <p> * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum AllocationStatus implements Writeable { /** * The shard was denied allocation to a node because the allocation deciders all returned a NO decision diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 01471ab664294..468fac08d2946 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.routing; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -20,8 +21,9 @@ /** * Entity for Weighted Round Robin weights * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.4.0") public class WeightedRouting implements Writeable { private String attributeName; private Map<String, Double> weights; @@ -52,6 +54,7 @@ public boolean isSet() { @Override public void writeTo(StreamOutput out) throws IOException { + out.writeString(attributeName); out.writeGenericValue(weights); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 419d2343f65cd..c613a630806dd 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -12,9 +12,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.cluster.ClusterState; @@ -29,9 +28,9 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; - import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocateUnassignedDecision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocateUnassignedDecision.java index 627d71522f9ed..4e77ab772f390 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocateUnassignedDecision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocateUnassignedDecision.java @@ -37,9 +37,10 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.Decision.Type; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -52,8 +53,9 @@ /** * Represents the allocation decision by an allocator for an unassigned shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllocateUnassignedDecision extends AbstractAllocationDecision { /** a constant representing a shard decision where no decision was taken */ public static final AllocateUnassignedDecision NOT_TAKEN = new AllocateUnassignedDecision( diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index e8ab0738c18da..5375910c57579 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -12,8 +12,8 @@ import java.util.Map; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; -import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isIndexShardsPerNodeBreached; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationDecision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationDecision.java index 3f1c025fce405..6aeb904fed66f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationDecision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationDecision.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -45,8 +46,9 @@ * An enum which represents the various decision types that can be taken by the * allocators and deciders for allocating a shard to a node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum AllocationDecision implements Writeable { /** * The shard can be allocated to a node. diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index 19601483d5607..6fc0e535ef4dc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -26,7 +26,7 @@ * This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes * It takes in effect only iff cluster.routing.allocation.awareness.attributes and * cluster.routing.allocation.awareness.force.zone.values both are specified. - * + * <p> * This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes * Helps in balancing shards across all awareness attributes and ensuring high availability of data. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index f209e993518c1..ae2d4a0926194 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -36,14 +36,14 @@ public class ConstraintTypes { /** * Constraint to control number of shards of an index allocated on a single * node. - * + * <p> * In current weight function implementation, when a node has significantly * fewer shards than other nodes (e.g. during single new node addition or node * replacement), its weight is much less than other nodes. All shard allocations * at this time tend to land on the new node with skewed weight. This breaks * index level balance in the cluster, by creating all shards of the same index * on one node, often resulting in a hotspot on that node. - * + * <p> * This constraint is breached when balancer attempts to allocate more than * average shards per index per node. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index 87cc62b2fd481..e6e5046ea28ee 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterInfo; @@ -53,6 +52,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import java.util.ArrayList; @@ -380,9 +380,21 @@ public void onNewInfo(ClusterInfo info) { if ((state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) == false) && nodes.size() > 0 && nodesOverHighThreshold.size() == nodes.size()) { + logger.warn( + "Putting index create block on cluster as all nodes are breaching high disk watermark. " + + "Number of nodes above high watermark: {}.", + nodesOverHighThreshold.size() + ); setIndexCreateBlock(listener, true); } else if (state.getBlocks().hasGlobalBlockWithId(Metadata.CLUSTER_CREATE_INDEX_BLOCK.id()) - && diskThresholdSettings.isCreateIndexBlockAutoReleaseEnabled()) { + && diskThresholdSettings.isCreateIndexBlockAutoReleaseEnabled() + && nodesOverHighThreshold.size() < nodes.size()) { + logger.warn( + "Removing index create block on cluster as all nodes are no longer breaching high disk watermark. " + + "Number of nodes above high watermark: {}. Total numbers of nodes: {}.", + nodesOverHighThreshold.size(), + nodes.size() + ); setIndexCreateBlock(listener, false); } else { listener.onResponse(null); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java index d54236ada6780..21d471c829787 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -34,13 +34,13 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; -import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.RatioValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import java.util.Arrays; import java.util.Iterator; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ae8d92dae6811..7fc78b05880f3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -59,9 +59,9 @@ /** * Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in * {@link IndexMetadata} once the allocation round has completed. - * + * <p> * Primary terms are updated on primary initialization or when an active primary fails. - * + * <p> * Allocation ids are added for shards that become active and removed for shards that stop being active. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/MoveDecision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/MoveDecision.java index fc39f813ca181..13578d1a11472 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/MoveDecision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/MoveDecision.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.Decision.Type; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,8 +49,9 @@ * Represents a decision to move a started shard, either because it is no longer allowed to remain on its current node * or because moving it to another node will form a better cluster balance. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MoveDecision extends AbstractAllocationDecision { /** a constant representing no decision taken */ public static final MoveDecision NOT_TAKEN = new MoveDecision(null, null, AllocationDecision.NO_ATTEMPT, null, null, 0); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/NodeAllocationResult.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/NodeAllocationResult.java index 946e2d5e5f3dc..4163a5fd4c16f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/NodeAllocationResult.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/NodeAllocationResult.java @@ -36,10 +36,11 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -52,8 +53,9 @@ /** * This class represents the shard allocation decision and its explanation for a single node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeAllocationResult implements ToXContentObject, Writeable, Comparable<NodeAllocationResult> { private static final Comparator<NodeAllocationResult> nodeResultComparator = Comparator.comparing(NodeAllocationResult::getNodeDecision) @@ -191,8 +193,9 @@ public int compareTo(NodeAllocationResult other) { /** * A class that captures metadata about a shard store on a node. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ShardStoreInfo implements ToXContentFragment, Writeable { private final boolean inSync; @Nullable diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java index 6c84957d6a788..c07fb0135262b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RerouteExplanation.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentObject; @@ -45,8 +46,9 @@ * Class encapsulating the explanation for a single {@link AllocationCommand} * taken from the Deciders * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RerouteExplanation implements ToXContentObject { private AllocationCommand command; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingAllocation.java index 9e8e399384467..bf2db57128517 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingAllocation.java @@ -43,6 +43,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.RestoreService.RestoreInProgressUpdater; import org.opensearch.snapshots.SnapshotShardSizeInfo; @@ -60,8 +61,9 @@ * of shards and holds the {@link AllocationDeciders} which are responsible * for the current routing state. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingAllocation { private final AllocationDeciders deciders; @@ -317,8 +319,9 @@ public void setHasPendingAsyncFetch() { /** * Debug mode. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum DebugMode { /** * debug mode is off diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java index 769212703b48b..490eb76ab8563 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RoutingExplanations.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.cluster.routing.allocation.decider.Decision; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; @@ -48,8 +49,9 @@ * Class used to encapsulate a number of {@link RerouteExplanation} * explanations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingExplanations implements ToXContentFragment { private final List<RerouteExplanation> explanations; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ShardAllocationDecision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ShardAllocationDecision.java index e1a7fb43827f3..49e8d82c6eb97 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ShardAllocationDecision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ShardAllocationDecision.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing.allocation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -53,8 +54,9 @@ * then both {@link #getAllocateDecision()} and {@link #getMoveDecision()} will return * objects whose {@code isDecisionTaken()} method returns {@code false}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardAllocationDecision implements ToXContentFragment, Writeable { public static final ShardAllocationDecision NOT_TAKEN = new ShardAllocationDecision( AllocateUnassignedDecision.NOT_TAKEN, diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 59d7fab59c266..41ace0e7661fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -37,6 +37,7 @@ import org.apache.lucene.util.IntroSorter; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.ShardMovementStrategy; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus; @@ -107,8 +108,22 @@ public class BalancedShardsAllocator implements ShardsAllocator { "cluster.routing.allocation.move.primary_first", false, Property.Dynamic, + Property.NodeScope, + Property.Deprecated + ); + + /** + * Decides order in which to move shards from node when shards can not stay on node anymore. {@link LocalShardsBalancer#moveShards()} + * Encapsulates behavior of above SHARD_MOVE_PRIMARY_FIRST_SETTING. + */ + public static final Setting<ShardMovementStrategy> SHARD_MOVEMENT_STRATEGY_SETTING = new Setting<ShardMovementStrategy>( + "cluster.routing.allocation.shard_movement_strategy", + ShardMovementStrategy.NO_PREFERENCE.toString(), + ShardMovementStrategy::parse, + Property.Dynamic, Property.NodeScope ); + public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting( "cluster.routing.allocation.balance.threshold", 1.0f, @@ -131,6 +146,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { ); private volatile boolean movePrimaryFirst; + private volatile ShardMovementStrategy shardMovementStrategy; private volatile boolean preferPrimaryShardBalance; private volatile WeightFunction weightFunction; @@ -145,14 +161,33 @@ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSetting setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); + clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } + /** + * Changes in deprecated setting SHARD_MOVE_PRIMARY_FIRST_SETTING affect value of its replacement setting SHARD_MOVEMENT_STRATEGY_SETTING. + */ private void setMovePrimaryFirst(boolean movePrimaryFirst) { this.movePrimaryFirst = movePrimaryFirst; + setShardMovementStrategy(this.shardMovementStrategy); + } + + /** + * Sets the correct Shard movement strategy to use. + * If users are still using deprecated setting `move_primary_first`, we want behavior to remain unchanged. + * In the event of changing ShardMovementStrategy setting from default setting NO_PREFERENCE to either PRIMARY_FIRST or REPLICA_FIRST, we want that + * to have priority over values set in move_primary_first setting. + */ + private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrategy) { + this.shardMovementStrategy = shardMovementStrategy; + if (shardMovementStrategy == ShardMovementStrategy.NO_PREFERENCE && this.movePrimaryFirst) { + this.shardMovementStrategy = ShardMovementStrategy.PRIMARY_FIRST; + } } private void setWeightFunction(float indexBalance, float shardBalanceFactor) { @@ -183,7 +218,7 @@ public void allocate(RoutingAllocation allocation) { final ShardsBalancer localShardsBalancer = new LocalShardsBalancer( logger, allocation, - movePrimaryFirst, + shardMovementStrategy, weightFunction, threshold, preferPrimaryShardBalance @@ -204,7 +239,7 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f ShardsBalancer localShardsBalancer = new LocalShardsBalancer( logger, allocation, - movePrimaryFirst, + shardMovementStrategy, weightFunction, threshold, preferPrimaryShardBalance @@ -301,7 +336,7 @@ public boolean getPreferPrimaryBalance() { * </li> * </ul> * <code>weight(node, index) = weight<sub>index</sub>(node, index) + weight<sub>node</sub>(node, index)</code> - * + * <p> * package-private for testing */ static class WeightFunction { @@ -455,12 +490,12 @@ public static class Balancer extends LocalShardsBalancer { public Balancer( Logger logger, RoutingAllocation allocation, - boolean movePrimaryFirst, + ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, movePrimaryFirst, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 80b4f720bd104..45f64a5b29b04 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -16,6 +16,7 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; +import org.opensearch.cluster.routing.ShardMovementStrategy; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; @@ -57,14 +58,13 @@ public class LocalShardsBalancer extends ShardsBalancer { private final Map<String, BalancedShardsAllocator.ModelNode> nodes; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; - private final boolean movePrimaryFirst; + private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; private final Metadata metadata; - private final float avgShardsPerNode; private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; @@ -73,19 +73,17 @@ public class LocalShardsBalancer extends ShardsBalancer { public LocalShardsBalancer( Logger logger, RoutingAllocation allocation, - boolean movePrimaryFirst, + ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, boolean preferPrimaryBalance ) { this.logger = logger; this.allocation = allocation; - this.movePrimaryFirst = movePrimaryFirst; this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); this.metadata = allocation.metadata(); - avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); avgPrimaryShardsPerNode = (float) (StreamSupport.stream(metadata.spliterator(), false) .mapToInt(IndexMetadata::getNumberOfShards) .sum()) / routingNodes.size(); @@ -93,6 +91,7 @@ public LocalShardsBalancer( sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); this.preferPrimaryBalance = preferPrimaryBalance; + this.shardMovementStrategy = shardMovementStrategy; } /** @@ -529,7 +528,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { /** * Move started shards that can not be allocated to a node anymore - * + * <p> * For each shard to be moved this function executes a move operation * to the minimal eligible node with respect to the * weight function. If a shard is moved the shard will be set to @@ -549,7 +548,7 @@ void moveShards() { checkAndAddInEligibleTargetNode(currentNode.getRoutingNode()); } boolean primariesThrottled = false; - for (Iterator<ShardRouting> it = allocation.routingNodes().nodeInterleavedShardIterator(movePrimaryFirst); it.hasNext();) { + for (Iterator<ShardRouting> it = allocation.routingNodes().nodeInterleavedShardIterator(shardMovementStrategy); it.hasNext();) { // Verify if the cluster concurrent recoveries have been reached. if (allocation.deciders().canMoveAnyShard(allocation).type() != Decision.Type.YES) { logger.info( @@ -573,8 +572,8 @@ void moveShards() { continue; } - // Ensure that replicas don't relocate if primaries are being throttled and primary first is enabled - if (movePrimaryFirst && primariesThrottled && !shardRouting.primary()) { + // Ensure that replicas don't relocate if primaries are being throttled and primary first shard movement strategy is enabled + if ((shardMovementStrategy == ShardMovementStrategy.PRIMARY_FIRST) && primariesThrottled && !shardRouting.primary()) { logger.info( "Cannot move any replica shard in the cluster as movePrimaryFirst is enabled and primary shards" + "are being throttled. Skipping shard iteration" @@ -662,7 +661,6 @@ MoveDecision decideMove(final ShardRouting shardRouting) { RoutingNode targetNode = null; final List<NodeAllocationResult> nodeExplanationMap = explain ? new ArrayList<>() : null; int weightRanking = 0; - int targetNodeProcessed = 0; for (BalancedShardsAllocator.ModelNode currentNode : sorter.modelNodes) { if (currentNode != sourceNode) { RoutingNode target = currentNode.getRoutingNode(); @@ -676,7 +674,6 @@ MoveDecision decideMove(final ShardRouting shardRouting) { continue; } } - targetNodeProcessed++; // don't use canRebalance as we want hard filtering rules to apply. See #17698 Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); if (explain) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index 1fadd775ab7b5..a05938c176678 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -9,6 +9,7 @@ package org.opensearch.cluster.routing.allocation.allocator; import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; @@ -20,7 +21,6 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.opensearch.common.Randomness; -import org.opensearch.cluster.routing.RecoverySource; import java.util.ArrayDeque; import java.util.ArrayList; @@ -43,6 +43,8 @@ public final class RemoteShardsBalancer extends ShardsBalancer { private final Logger logger; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; + // indicates if there are any nodes being throttled for allocating any unassigned shards + private boolean anyNodesThrottled = false; public RemoteShardsBalancer(Logger logger, RoutingAllocation allocation) { this.logger = logger; @@ -84,24 +86,39 @@ void moveShards() { Queue<RoutingNode> excludedNodes = new ArrayDeque<>(); classifyNodesForShardMovement(eligibleNodes, excludedNodes); - if (excludedNodes.isEmpty()) { - logger.debug("No excluded nodes found. Returning..."); - return; - } - - while (!eligibleNodes.isEmpty() && !excludedNodes.isEmpty()) { - RoutingNode sourceNode = excludedNodes.poll(); - for (ShardRouting ineligibleShard : sourceNode) { - if (ineligibleShard.started() == false) { + // move shards that cannot remain on eligible nodes + final List<ShardRouting> forceMoveShards = new ArrayList<>(); + eligibleNodes.forEach(sourceNode -> { + for (final ShardRouting shardRouting : sourceNode) { + if (ineligibleForMove(shardRouting)) { continue; } - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(ineligibleShard, allocation))) { + if (allocation.deciders().canRemain(shardRouting, sourceNode, allocation) == Decision.NO) { + forceMoveShards.add(shardRouting); + } + } + }); + for (final ShardRouting shard : forceMoveShards) { + if (eligibleNodes.isEmpty()) { + logger.trace("there are no eligible nodes available, return"); + return; + } + + tryShardMovementToEligibleNode(eligibleNodes, shard); + } + + // move shards that are currently assigned on excluded nodes + while (eligibleNodes.isEmpty() == false && excludedNodes.isEmpty() == false) { + RoutingNode sourceNode = excludedNodes.poll(); + for (final ShardRouting ineligibleShard : sourceNode) { + if (ineligibleForMove(ineligibleShard)) { continue; } if (eligibleNodes.isEmpty()) { - break; + logger.trace("there are no eligible nodes available, return"); + return; } tryShardMovementToEligibleNode(eligibleNodes, ineligibleShard); @@ -109,6 +126,10 @@ void moveShards() { } } + private boolean ineligibleForMove(ShardRouting shard) { + return shard.started() == false || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false; + } + /** * Classifies the nodes into eligible and excluded depending on whether node is able or unable for shard assignment * @param eligibleNodes contains the list of classified nodes eligible to accept shards @@ -145,10 +166,23 @@ private void classifyNodesForShardMovement(Queue<RoutingNode> eligibleNodes, Que * @param shard the ineligible shard to be moved */ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, ShardRouting shard) { - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!eligibleNodes.isEmpty()) { - RoutingNode targetNode = eligibleNodes.poll(); - Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); + final Set<String> nodesCheckedForShard = new HashSet<>(); + int numNodesToCheck = eligibleNodes.size(); + while (eligibleNodes.isEmpty() == false) { + assert numNodesToCheck > 0; + final RoutingNode targetNode = eligibleNodes.poll(); + --numNodesToCheck; + // skip the node that the target shard is currently allocated on + if (targetNode.nodeId().equals(shard.currentNodeId())) { + assert nodesCheckedForShard.add(targetNode.nodeId()); + eligibleNodes.offer(targetNode); + if (numNodesToCheck == 0) { + return; + } + continue; + } + + final Decision currentShardDecision = allocation.deciders().canAllocate(shard, targetNode, allocation); if (currentShardDecision.type() == Decision.Type.YES) { if (logger.isDebugEnabled()) { @@ -166,7 +200,7 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh allocation.changes() ); eligibleNodes.offer(targetNode); - break; + return; } else { if (logger.isTraceEnabled()) { logger.trace( @@ -177,18 +211,19 @@ private void tryShardMovementToEligibleNode(Queue<RoutingNode> eligibleNodes, Sh ); } - Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); + final Decision nodeLevelDecision = allocation.deciders().canAllocateAnyShardToNode(targetNode, allocation); if (nodeLevelDecision.type() == Decision.Type.YES) { logger.debug("Node: [{}] can still accept shards. Adding it back to the queue.", targetNode.nodeId()); eligibleNodes.offer(targetNode); - nodesCheckedForShard.add(targetNode.nodeId()); + assert nodesCheckedForShard.add(targetNode.nodeId()); } else { logger.debug("Node: [{}] cannot accept any more shards. Removing it from queue.", targetNode.nodeId()); } - // Break out if all nodes in the queue have been checked for this shard - if (eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { - break; + // Break out if all eligible nodes have been examined + if (numNodesToCheck == 0) { + assert eligibleNodes.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId())); + return; } } } @@ -225,7 +260,7 @@ void balance() { } } - while (!sourceNodes.isEmpty() && !targetNodes.isEmpty()) { + while (sourceNodes.isEmpty() == false && targetNodes.isEmpty() == false) { RoutingNode sourceNode = sourceNodes.poll(); tryRebalanceNode(sourceNode, targetNodes, avgPrimaryPerNode, nodePrimaryShardCount); } @@ -275,11 +310,11 @@ public Map<String, UnassignedIndexShards> groupUnassignedShardsByIndex() { HashMap<String, UnassignedIndexShards> unassignedShardMap = new HashMap<>(); for (ShardRouting shard : routingNodes.unassigned().drain()) { String index = shard.getIndexName(); - if (!RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { routingNodes.unassigned().add(shard); continue; } - if (!unassignedShardMap.containsKey(index)) { + if (unassignedShardMap.containsKey(index) == false) { unassignedShardMap.put(index, new UnassignedIndexShards()); } unassignedShardMap.get(index).addShard(shard); @@ -296,13 +331,15 @@ private void unassignIgnoredRemoteShards(RoutingAllocation routingAllocation) { RoutingNodes.UnassignedShards unassignedShards = routingAllocation.routingNodes().unassigned(); for (ShardRouting shard : unassignedShards.drainIgnored()) { RoutingPool pool = RoutingPool.getShardPool(shard, routingAllocation); - if (pool == RoutingPool.REMOTE_CAPABLE && shard.unassigned() && (shard.primary() || !shard.unassignedInfo().isDelayed())) { + if (pool == RoutingPool.REMOTE_CAPABLE + && shard.unassigned() + && (shard.primary() || shard.unassignedInfo().isDelayed() == false)) { ShardRouting unassignedShard = shard; // Shard when moved to an unassigned state updates the recovery source to be ExistingStoreRecoverySource // Remote shards do not have an existing store to recover from and can be recovered from an empty source // to re-fetch any shard blocks from the repository. if (shard.primary()) { - if (!RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType())) { + if (RecoverySource.Type.SNAPSHOT.equals(shard.recoverySource().getType()) == false) { unassignedShard = shard.updateUnassigned(shard.unassignedInfo(), RecoverySource.EmptyStoreRecoverySource.INSTANCE); } } @@ -323,12 +360,16 @@ private void allocateUnassignedReplicas(Queue<RoutingNode> nodeQueue, Map<String } private void ignoreRemainingShards(Map<String, UnassignedIndexShards> unassignedShardMap) { + // If any nodes are throttled during allocation, mark all remaining unassigned shards as THROTTLED + final UnassignedInfo.AllocationStatus status = anyNodesThrottled + ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED + : UnassignedInfo.AllocationStatus.DECIDERS_NO; for (UnassignedIndexShards indexShards : unassignedShardMap.values()) { for (ShardRouting shard : indexShards.getPrimaries()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } for (ShardRouting shard : indexShards.getReplicas()) { - routingNodes.unassigned().ignoreShard(shard, UnassignedInfo.AllocationStatus.DECIDERS_NO, allocation.changes()); + routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); } } } @@ -353,7 +394,7 @@ private void allocateUnassignedShards( } logger.debug("Allocating shards for index: [{}]", index); - while (!shardsToAllocate.isEmpty() && !nodeQueue.isEmpty()) { + while (shardsToAllocate.isEmpty() == false && nodeQueue.isEmpty() == false) { ShardRouting shard = shardsToAllocate.poll(); if (shard.assignedToNode()) { if (logger.isDebugEnabled()) { @@ -389,11 +430,11 @@ private void allocateUnassignedShards( private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouting shard) { boolean allocated = false; boolean throttled = false; - Set<String> nodesCheckedForShard = new HashSet<>(); - while (!nodeQueue.isEmpty()) { + int numNodesToCheck = nodeQueue.size(); + while (nodeQueue.isEmpty() == false) { RoutingNode node = nodeQueue.poll(); + --numNodesToCheck; Decision allocateDecision = allocation.deciders().canAllocate(shard, node, allocation); - nodesCheckedForShard.add(node.nodeId()); if (allocateDecision.type() == Decision.Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shardShortSummary(shard), node.nodeId()); @@ -406,7 +447,7 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti allocation.metadata(), allocation.routingTable() ); - ShardRouting initShard = routingNodes.initializeShard(shard, node.nodeId(), null, shardSize, allocation.changes()); + routingNodes.initializeShard(shard, node.nodeId(), null, shardSize, allocation.changes()); nodeQueue.offer(node); allocated = true; break; @@ -432,6 +473,10 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } nodeQueue.offer(node); } else { + if (nodeLevelDecision.type() == Decision.Type.THROTTLE) { + anyNodesThrottled = true; + } + if (logger.isTraceEnabled()) { logger.trace( "Cannot allocate any shard to node: [{}]. Removing from queue. Node level decisions: [{}],[{}]", @@ -443,15 +488,14 @@ private void tryAllocateUnassignedShard(Queue<RoutingNode> nodeQueue, ShardRouti } // Break out if all nodes in the queue have been checked for this shard - if (nodeQueue.stream().allMatch(rn -> nodesCheckedForShard.contains(rn.nodeId()))) { - throttled = true; + if (numNodesToCheck == 0) { break; } } } - if (!allocated) { - UnassignedInfo.AllocationStatus status = throttled + if (allocated == false) { + UnassignedInfo.AllocationStatus status = (throttled || anyNodesThrottled) ? UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED : UnassignedInfo.AllocationStatus.DECIDERS_NO; routingNodes.unassigned().ignoreShard(shard, status, allocation.changes()); @@ -471,14 +515,16 @@ private void tryRebalanceNode( // Try to relocate the valid shards on the sourceNode, one at a time; // until either sourceNode is balanced OR no more active primary shard available OR all the target nodes are exhausted - while (shardsToBalance > 0 && shardIterator.hasNext() && !targetNodes.isEmpty()) { + while (shardsToBalance > 0 && shardIterator.hasNext() && targetNodes.isEmpty() == false) { // Find an active primary shard to relocate ShardRouting shard = shardIterator.next(); - if (!shard.started() || !shard.primary() || !RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation))) { + if (shard.started() == false + || shard.primary() == false + || RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) == false) { continue; } - while (!targetNodes.isEmpty()) { + while (targetNodes.isEmpty() == false) { // Find a valid target node that can accommodate the current shard relocation RoutingNode targetNode = targetNodes.poll(); if (primaryCount.get(targetNode.nodeId()) >= avgPrimary) { @@ -486,6 +532,10 @@ private void tryRebalanceNode( continue; } + if (targetNode.getByShardId(shard.shardId()) != null) { + continue; + } + // Try relocate the shard on the target node Decision rebalanceDecision = tryRelocateShard(shard, targetNode); @@ -523,21 +573,10 @@ private void tryRebalanceNode( } /** - * For every primary shard for which this method is invoked, - * swap is attempted with the destination node in case replica shard is present. - * In case replica is not present, relocation of the shard id performed. + * For every primary shard for which this method is invoked, relocation of the shard id performed. */ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNode) { - // Check if there is already a replica for the shard on the destination node. - // Then we can directly swap the replica with the primary shards. - // Invariant: We only allow swap relocation on remote shards. - ShardRouting replicaShard = destinationNode.getByShardId(shard.shardId()); - if (replicaShard != null) { - assert !replicaShard.primary() : "Primary Shard found while expected Replica during shard rebalance"; - return executeSwapShard(shard, replicaShard, allocation); - } - - // Since no replica present on the destinationNode; try relocating the shard to the destination node + assert destinationNode.getByShardId(shard.shardId()) == null; Decision allocationDecision = allocation.deciders().canAllocate(shard, destinationNode, allocation); Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation); logger.trace( @@ -567,15 +606,6 @@ private Decision tryRelocateShard(ShardRouting shard, RoutingNode destinationNod return Decision.NO; } - private Decision executeSwapShard(ShardRouting primaryShard, ShardRouting replicaShard, RoutingAllocation allocation) { - if (!replicaShard.started()) { - return new Decision.Single(Decision.Type.NO); - } - - allocation.routingNodes().swapPrimaryWithReplica(logger, primaryShard, replicaShard, allocation.changes()); - return new Decision.Single(Decision.Type.YES); - } - private void failUnattemptedShards() { RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 63d8c656f5049..29e9acca4e6c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -63,12 +63,12 @@ public interface ShardsAllocator { * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned * state, then the {@link MoveDecision} will be non-null. - * + * <p> * This method is primarily used by the cluster allocation explain API to provide detailed explanations * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method * may use the results of this method implementation to decide on allocating shards in the routing table * to the cluster. - * + * <p> * If an implementation of this interface does not support explaining decisions for a single shard through * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index f5093ef4d9243..9ea9ca617ac9d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -44,10 +44,10 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java index 38a7d83ccc7b5..bce3191240323 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java @@ -35,19 +35,21 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.routing.allocation.RerouteExplanation; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.NetworkModule; +import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContentObject; import java.util.Optional; /** * A command to move shards in some way. - * + * <p> * Commands are registered in {@link NetworkModule}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AllocationCommand extends NamedWriteable, ToXContentObject { /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommands.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommands.java index e72fe26def31b..2ee4124e9d909 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommands.java @@ -36,10 +36,11 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.RoutingExplanations; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ * A simple {@link AllocationCommand} composite managing several * {@link AllocationCommand} implementations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllocationCommands implements ToXContentFragment { private final List<AllocationCommand> commands = new ArrayList<>(); @@ -217,6 +219,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/CancelAllocationCommand.java index 9ec38bc015604..a07f3eb9d95e1 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -46,10 +46,10 @@ import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.Locale; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java index 24c3fd7f34e4a..19faacc3a3ae1 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -38,14 +38,16 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.decider.Decision.Type; +import org.opensearch.common.annotation.PublicApi; /** * {@link AllocationDecider} is an abstract base class that allows to make * dynamic cluster- or index-wide shard allocation decisions on a per-node * basis. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AllocationDecider { /** * Returns a {@link Decision} whether the given shard routing can be @@ -109,7 +111,7 @@ public Decision canRebalance(RoutingAllocation allocation) { * Returns a {@link Decision} whether the given primary shard can be * forcibly allocated on the given node. This method should only be called * for unassigned primary shards where the node has a shard copy on disk. - * + * <p> * Note: all implementations that override this behavior should take into account * the results of {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} * before making a decision on force allocation, because force allocation should only diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java index dca82f75d6fb5..1263efd19ac46 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.annotation.PublicApi; import java.util.Collection; import java.util.Collections; @@ -47,8 +48,9 @@ * A composite {@link AllocationDecider} combining the "decision" of multiple * {@link AllocationDecider} implementations into a single allocation decision. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AllocationDeciders extends AllocationDecider { private static final Logger logger = LogManager.getLogger(AllocationDeciders.class); @@ -255,7 +257,7 @@ public Decision canAllocateAnyShardToNode(RoutingNode node, RoutingAllocation al Decision.Multi ret = new Decision.Multi(); for (AllocationDecider decider : allocations) { Decision decision = decider.canAllocateAnyShardToNode(node, allocation); - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (logger.isTraceEnabled()) { logger.trace("Shard can not be allocated on node [{}] due to [{}]", node.nodeId(), decider.getClass().getSimpleName()); } @@ -277,7 +279,7 @@ public Decision canMoveAway(ShardRouting shardRouting, RoutingAllocation allocat for (AllocationDecider decider : allocations) { Decision decision = decider.canMoveAway(shardRouting, allocation); // short track if a NO is returned. - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (logger.isTraceEnabled()) { logger.trace("Shard [{}] can not be moved away due to [{}]", shardRouting, decider.getClass().getSimpleName()); } @@ -299,7 +301,7 @@ public Decision canMoveAnyShard(RoutingAllocation allocation) { for (AllocationDecider decider : allocations) { Decision decision = decider.canMoveAnyShard(allocation); // short track if a NO is returned. - if (decision.type().canPremptivelyReturn()) { + if (decision.type().canPreemptivelyReturn()) { if (allocation.debugDecision() == false) { return decision; } else { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index f0b79194af438..5344d95b217a7 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -32,13 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; - import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -49,6 +42,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + import static java.util.Collections.emptyList; /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 75c4d2aa3953d..857192d4f3041 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -32,8 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import java.util.Locale; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.ShardRouting; @@ -43,6 +41,8 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import java.util.Locale; + /** * This {@link AllocationDecider} controls re-balancing operations based on the * cluster wide active shard state. This decided can not be configured in diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ConcurrentRecoveriesAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ConcurrentRecoveriesAllocationDecider.java index 817682d8c50f5..62276c470a86b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ConcurrentRecoveriesAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ConcurrentRecoveriesAllocationDecider.java @@ -13,6 +13,8 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -20,8 +22,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; /** * This {@link AllocationDecider} controls the number of currently in-progress diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java index 807ab070b82b1..938c457606c79 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/Decision.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation.decider; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,8 +55,9 @@ * * @see AllocationDecider * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Decision implements ToXContent, Writeable { public static final Decision ALWAYS = new Single(Type.YES); @@ -98,8 +100,9 @@ public static Decision readFrom(StreamInput in) throws IOException { * This enumeration defines the * possible types of decisions * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type implements Writeable { YES(1), THROTTLE(2), @@ -141,7 +144,7 @@ public boolean higherThan(Type other) { return false; } - public boolean canPremptivelyReturn() { + public boolean canPreemptivelyReturn() { return this == THROTTLE || this == NO; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 4b69c05807ae4..2c7df6b81e676 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -47,11 +47,11 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -68,28 +68,28 @@ import static org.opensearch.cluster.routing.RoutingPool.getShardPool; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING; -import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO; +import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially * being allocated to has enough disk space. - * + * <p> * It has three configurable settings, all of which can be changed dynamically: - * + * <p> * <code>cluster.routing.allocation.disk.watermark.low</code> is the low disk * watermark. New shards will not allocated to a node with usage higher than this, * although this watermark may be passed by allocating a shard. It defaults to * 0.85 (85.0%). - * + * <p> * <code>cluster.routing.allocation.disk.watermark.high</code> is the high disk * watermark. If a node has usage higher than this, shards are not allowed to * remain on the node. In addition, if allocating a shard to a node causes the * node to pass this watermark, it will not be allowed. It defaults to * 0.90 (90.0%). - * + * <p> * Both watermark settings are expressed in terms of used disk percentage, or * exact byte values for free space (like "500mb") - * + * <p> * <code>cluster.routing.allocation.disk.threshold_enabled</code> is used to * enable or disable this decider. It defaults to true (enabled). * @@ -119,7 +119,7 @@ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) /** * Returns the size of all shards that are currently being relocated to * the node, but may not be finished transferring yet. - * + * <p> * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards */ public static long sizeOfRelocatingShards( @@ -199,8 +199,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing final FileCacheStats fileCacheStats = clusterInfo.getNodeFileCacheStats().getOrDefault(node.nodeId(), null); final long nodeCacheSize = fileCacheStats != null ? fileCacheStats.getTotal().getBytes() : 0; final long totalNodeRemoteShardSize = currentNodeRemoteShardSize + shardSize; - - if (totalNodeRemoteShardSize > DATA_TO_FILE_CACHE_SIZE_RATIO * nodeCacheSize) { + final double dataToFileCacheSizeRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(allocation.metadata().settings()); + if (dataToFileCacheSizeRatio > 0.0f && totalNodeRemoteShardSize > dataToFileCacheSizeRatio * nodeCacheSize) { return allocation.decision( Decision.NO, NAME, diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index a51546920c6a0..c24346fc0e34e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -32,8 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import java.util.Locale; - import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; @@ -44,6 +42,8 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import java.util.Locale; + /** * This allocation decider allows shard allocations / rebalancing via the cluster wide settings * {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java index c43fb3be214a9..f26612b5ded8e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java @@ -17,8 +17,8 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; import java.util.function.BiPredicate; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 19b7494c000de..9344b4c87830d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -32,12 +32,18 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.util.List; +import java.util.stream.Collectors; /** * An allocation decider that prevents relocation or allocation from nodes @@ -52,9 +58,35 @@ public class NodeVersionAllocationDecider extends AllocationDecider { public static final String NAME = "node_version"; + private final ReplicationType replicationType; + + public NodeVersionAllocationDecider(Settings settings) { + replicationType = IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.get(settings); + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { if (shardRouting.primary()) { + if (replicationType == ReplicationType.SEGMENT) { + List<ShardRouting> replicas = allocation.routingNodes() + .assignedShards(shardRouting.shardId()) + .stream() + .filter(shr -> !shr.primary() && shr.active()) + .collect(Collectors.toList()); + for (ShardRouting replica : replicas) { + // can not allocate if target node version > any existing replica version + RoutingNode replicaNode = allocation.routingNodes().node(replica.currentNodeId()); + if (node.node().getVersion().after(replicaNode.node().getVersion())) { + return allocation.decision( + Decision.NO, + NAME, + "When segment replication is enabled, cannot relocate primary shard to a node with version [%s] if it has a replica on older version [%s]", + node.node().getVersion(), + replicaNode.node().getVersion() + ); + } + } + } if (shardRouting.currentNodeId() == null) { if (shardRouting.recoverySource() != null && shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { // restoring from a snapshot - check that the node can handle the version diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 1680f2d8cad1d..c2eccdbc6ed26 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -44,7 +44,7 @@ /** * An allocation decider that prevents multiple instances of the same shard to * be allocated on the same {@code node}. - * + * <p> * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent * allocation of multiple instances of the same shard on a single {@code host}, * based on host name and host address. Defaults to `false`, meaning that no diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java index c11f5823cf3a7..76f9f44077ad8 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDecider.java @@ -44,7 +44,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing return allocation.decision( Decision.NO, NAME, - "Routing pools are incompatible. Shard pool: [%s], Node Pool: [%s]", + "Routing pools are incompatible. Shard pool: [%s], node pool: [%s]", shardPool, targetNodePool ); @@ -56,21 +56,21 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing shardRouting, shardPool, node.node(), - DiscoveryNodeRole.DATA_ROLE + DiscoveryNodeRole.DATA_ROLE.roleName() ); return allocation.decision( Decision.NO, NAME, - "Routing pools are incompatible. Shard pool: [{}], Node Pool: [{}] without [{}] role", + "Routing pools are incompatible. Shard pool: [%s], node pool: [%s] without [%s] role", shardPool, targetNodePool, - DiscoveryNodeRole.DATA_ROLE + DiscoveryNodeRole.DATA_ROLE.roleName() ); } return allocation.decision( Decision.YES, NAME, - "Routing pools are compatible. Shard pool: [%s], Node Pool: [%s]", + "Routing pools are compatible. Shard pool: [%s], node pool: [%s]", shardPool, targetNodePool ); @@ -106,7 +106,7 @@ private Decision canAllocateInTargetPool(IndexMetadata indexMetadata, DiscoveryN return allocation.decision( Decision.NO, NAME, - "Routing pools are incompatible. Index pool: [%s], Node Pool: [%s]", + "Routing pools are incompatible. Index pool: [%s], node pool: [%s]", indexPool, targetNodePool ); @@ -118,21 +118,21 @@ private Decision canAllocateInTargetPool(IndexMetadata indexMetadata, DiscoveryN indexMetadata.getIndex().getName(), indexPool, node, - DiscoveryNodeRole.DATA_ROLE + DiscoveryNodeRole.DATA_ROLE.roleName() ); return allocation.decision( Decision.NO, NAME, - "Routing pools are incompatible. Index pool: [{}], Node Pool: [{}] without [{}] role", + "Routing pools are incompatible. Index pool: [%s], node pool: [%s] without [%s] role", indexPool, targetNodePool, - DiscoveryNodeRole.DATA_ROLE + DiscoveryNodeRole.DATA_ROLE.roleName() ); } return allocation.decision( Decision.YES, NAME, - "Routing pools are compatible. Index pool: [%s], Node Pool: [%s]", + "Routing pools are compatible. Index pool: [%s], node pool: [%s]", indexPool, targetNodePool ); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 3a9fdf0ea10cf..26a04de31ce39 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -323,7 +323,7 @@ private Decision allocateShardCopies( * - the initializing shard routing if we want to assign the initializing shard to this node instead * - the started shard routing in case if we want to check if we can relocate to this node. * - the relocating shard routing if we want to relocate to this node now instead. - * + * <p> * This method returns the corresponding initializing shard that would be allocated to this node. */ private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java index 939feb89b6054..5b3f7f1001779 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplier.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.service; import org.opensearch.cluster.ClusterState; +import org.opensearch.common.annotation.PublicApi; import java.util.function.Supplier; @@ -59,7 +60,10 @@ public interface ClusterApplier { /** * Listener for results of cluster state application + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface ClusterApplyListener { /** * Called on successful cluster state application diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 7f1c9f01f7e6f..a55721fb13cdc 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -51,15 +51,16 @@ import org.opensearch.common.Priority; import org.opensearch.common.StopWatch; import org.opensearch.common.StopWatch.TimingHandle; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -82,8 +83,9 @@ /** * Service that provides callbacks when cluster state changes * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterApplierService extends AbstractLifecycleComponent implements ClusterApplier { private static final Logger logger = LogManager.getLogger(ClusterApplierService.class); @@ -294,7 +296,7 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. - * + * <p> * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. */ public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index 74b623dd95e6f..e9224596e048d 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.service; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; @@ -15,8 +16,9 @@ /** * Main Cluster Manager Node Service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class ClusterManagerService extends MasterService { public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { super(settings, clusterSettings, threadPool); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 8da6b1b941f83..827f3a12fbce4 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.cluster.ClusterStateTaskExecutor; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -29,7 +30,7 @@ /** * This class does throttling on task submission to cluster manager node, it uses throttling key defined in various executors * as key for throttling. Throttling will be performed over task executor's class level, different task types have different executors class. - * + * <p> * Set specific setting to for setting the threshold of throttling of particular task type. * e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks, * Set it to default value(-1) to disable the throttling for this task type. @@ -117,9 +118,9 @@ public static TimeValue getMaxDelayForRetry() { * * Register task to cluster service with task key, * * override getClusterManagerThrottlingKey method with above task key in task executor. * * Verify that throttled tasks would be retried from data nodes - * + * <p> * Added retry mechanism in TransportClusterManagerNodeAction, so it would be retried for customer generated tasks. - * + * <p> * If tasks are not getting retried then we can register with false flag, so user won't be able to configure threshold limits for it. */ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { @@ -133,7 +134,10 @@ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throt /** * Class to store the throttling key for the tasks of cluster manager + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ThrottlingKey { private String taskThrottlingKey; private boolean throttlingEnabled; @@ -236,7 +240,7 @@ public void onBeginSubmit(List<? extends TaskBatcher.BatchedTask> tasks) { * It may start throwing throttling exception to older nodes in cluster. * Older version nodes will not be equipped to handle the throttling exception and * this may result in unexpected behavior where internal tasks would start failing without any retries. - * + * <p> * For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag. * Once the startThrottling flag is set, it will not perform check for next set of tasks. */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingStats.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingStats.java index d9b292a71f625..6a910e5f88536 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingStats.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerThrottlingStats.java @@ -8,10 +8,11 @@ package org.opensearch.cluster.service; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.metrics.CounterMetric; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -23,7 +24,10 @@ /** * Contains stats of Cluster Manager Task Throttling. * It stores the total cumulative count of throttled tasks per task type. + * + * @opensearch.api */ +@PublicApi(since = "2.5.0") public class ClusterManagerThrottlingStats implements ClusterManagerTaskThrottlerListener, Writeable, ToXContentFragment { private Map<String, CounterMetric> throttledTasksCount; diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index a605c41bdeff8..aa7766979e851 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -45,7 +45,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.RerouteService; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -60,8 +61,9 @@ /** * Main Cluster Service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterService extends AbstractLifecycleComponent { private final ClusterManagerService clusterManagerService; diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java new file mode 100644 index 0000000000000..79263e31b8f49 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Cluster state related stats. + * + * @opensearch.api + */ +@PublicApi(since = "2.12.0") +public class ClusterStateStats implements Writeable, ToXContentObject { + + private AtomicLong updateSuccess = new AtomicLong(0); + private AtomicLong updateTotalTimeInMillis = new AtomicLong(0); + private AtomicLong updateFailed = new AtomicLong(0); + private List<PersistedStateStats> persistenceStats = new ArrayList<>(); + + public ClusterStateStats() {} + + public long getUpdateSuccess() { + return updateSuccess.get(); + } + + public long getUpdateTotalTimeInMillis() { + return updateTotalTimeInMillis.get(); + } + + public long getUpdateFailed() { + return updateFailed.get(); + } + + public List<PersistedStateStats> getPersistenceStats() { + return persistenceStats; + } + + public void stateUpdated() { + updateSuccess.incrementAndGet(); + } + + public void stateUpdateFailed() { + updateFailed.incrementAndGet(); + } + + public void stateUpdateTook(long stateUpdateTime) { + updateTotalTimeInMillis.addAndGet(stateUpdateTime); + } + + public ClusterStateStats setPersistenceStats(List<PersistedStateStats> persistenceStats) { + this.persistenceStats = persistenceStats; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(updateSuccess.get()); + out.writeVLong(updateTotalTimeInMillis.get()); + out.writeVLong(updateFailed.get()); + out.writeVInt(persistenceStats.size()); + for (PersistedStateStats stats : persistenceStats) { + stats.writeTo(out); + } + } + + public ClusterStateStats(StreamInput in) throws IOException { + this.updateSuccess = new AtomicLong(in.readVLong()); + this.updateTotalTimeInMillis = new AtomicLong(in.readVLong()); + this.updateFailed = new AtomicLong(in.readVLong()); + int persistedStatsSize = in.readVInt(); + this.persistenceStats = new ArrayList<>(); + for (int statsNumber = 0; statsNumber < persistedStatsSize; statsNumber++) { + PersistedStateStats stats = new PersistedStateStats(in); + this.persistenceStats.add(stats); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.CLUSTER_STATE_STATS); + builder.startObject(Fields.OVERALL); + builder.field(Fields.UPDATE_COUNT, getUpdateSuccess()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getUpdateTotalTimeInMillis()); + builder.field(Fields.FAILED_COUNT, getUpdateFailed()); + builder.endObject(); + for (PersistedStateStats stats : persistenceStats) { + stats.toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String CLUSTER_STATE_STATS = "cluster_state_stats"; + static final String OVERALL = "overall"; + static final String UPDATE_COUNT = "update_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 790efaef95292..af3e4f8437c43 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.AckedClusterStateTaskListener; @@ -55,18 +54,20 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.DeprecatedApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.text.Text; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.discovery.Discovery; import org.opensearch.node.Node; import org.opensearch.threadpool.Scheduler; @@ -87,10 +88,11 @@ /** * Main Master Node Service * - * @opensearch.internal + * @opensearch.api * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerService}. */ @Deprecated +@DeprecatedApi(since = "2.2.0") public class MasterService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(MasterService.class); @@ -112,7 +114,9 @@ public class MasterService extends AbstractLifecycleComponent { static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} */ + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} + */ @Deprecated static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; @@ -130,6 +134,7 @@ public class MasterService extends AbstractLifecycleComponent { private volatile Batcher taskBatcher; protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; + private final ClusterStateStats stateStats; public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); @@ -147,6 +152,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP this::getMinNodeVersion, throttlingStats ); + this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; } @@ -339,7 +345,7 @@ private TimeValue getTimeSince(long startTimeNanos) { return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); } - protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis) { + protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { final PlainActionFuture<Void> fut = new PlainActionFuture<Void>() { @Override protected boolean blockingAllowed() { @@ -352,8 +358,12 @@ protected boolean blockingAllowed() { try { FutureUtils.get(fut); onPublicationSuccess(clusterChangedEvent, taskOutputs); + final long durationMillis = getTimeSince(startTimeNanos).millis(); + stateStats.stateUpdateTook(durationMillis); + stateStats.stateUpdated(); } catch (Exception e) { - onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeMillis, e); + stateStats.stateUpdateFailed(); + onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); } } @@ -464,7 +474,6 @@ public Builder incrementVersion(ClusterState clusterState) { * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * task - * */ public <T extends ClusterStateTaskConfig & ClusterStateTaskExecutor<T> & ClusterStateTaskListener> void submitStateUpdateTask( String source, @@ -490,7 +499,6 @@ public <T extends ClusterStateTaskConfig & ClusterStateTaskExecutor<T> & Cluster * @param listener callback after the cluster state update task * completes * @param <T> the type of the cluster state update task state - * */ public <T> void submitStateUpdateTask( String source, @@ -947,7 +955,7 @@ void onNoLongerClusterManager() { /** * Functionality for register task key to cluster manager node. * - * @param taskKey - task key of task + * @param taskKey - task key of task * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not * @return throttling task key which needs to be passed while submitting task to cluster manager */ @@ -966,7 +974,6 @@ public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(Stri * that share the same executor will be executed * batches on this executor * @param <T> the type of the cluster state update task state - * */ public <T> void submitStateUpdateTasks( final String source, @@ -996,4 +1003,8 @@ public <T> void submitStateUpdateTasks( } } + public ClusterStateStats getClusterStateStats() { + return stateStats; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java b/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java index 5406fa39e3059..b06c537e7bac5 100644 --- a/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java +++ b/server/src/main/java/org/opensearch/cluster/service/PendingClusterTask.java @@ -33,19 +33,21 @@ package org.opensearch.cluster.service; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.text.Text; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; /** * Represents a task that is pending in the cluster * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PendingClusterTask implements Writeable { private long insertOrder; diff --git a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java index 634bd96547fc2..5e58f495a16fb 100644 --- a/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java +++ b/server/src/main/java/org/opensearch/cluster/service/TaskBatcher.java @@ -36,8 +36,8 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Priority; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import java.util.ArrayList; import java.util.Collections; diff --git a/server/src/main/java/org/opensearch/common/AsyncBiFunction.java b/server/src/main/java/org/opensearch/common/AsyncBiFunction.java index 12e6377682723..575e357665964 100644 --- a/server/src/main/java/org/opensearch/common/AsyncBiFunction.java +++ b/server/src/main/java/org/opensearch/common/AsyncBiFunction.java @@ -31,7 +31,7 @@ package org.opensearch.common; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; /** * A {@link java.util.function.BiFunction}-like interface designed to be used with asynchronous executions. diff --git a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java index 1f8a6aba0c883..2e04f39e48c4c 100644 --- a/server/src/main/java/org/opensearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/opensearch/common/FieldMemoryStats.java @@ -32,10 +32,11 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -47,8 +48,9 @@ /** * A reusable class to encode {@code field -> memory size} mappings * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldMemoryStats implements Writeable, Iterable<Map.Entry<String, Long>> { private final Map<String, Long> stats; diff --git a/server/src/main/java/org/opensearch/common/Priority.java b/server/src/main/java/org/opensearch/common/Priority.java index 09a751362c945..4f03d6e363550 100644 --- a/server/src/main/java/org/opensearch/common/Priority.java +++ b/server/src/main/java/org/opensearch/common/Priority.java @@ -32,6 +32,7 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,8 +41,9 @@ /** * Priority levels. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum Priority { IMMEDIATE((byte) 0), diff --git a/server/src/main/java/org/opensearch/common/Randomness.java b/server/src/main/java/org/opensearch/common/Randomness.java index 2c60e848b9db9..221bc95c41f31 100644 --- a/server/src/main/java/org/opensearch/common/Randomness.java +++ b/server/src/main/java/org/opensearch/common/Randomness.java @@ -127,7 +127,7 @@ public static Random get() { /** * Provides a secure source of randomness. - * + * <p> * This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}. */ public static SecureRandom createSecure() { diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index cae24ddee1388..6f5f1e4328758 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -37,11 +37,14 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.LocalTimeOffset.Gap; import org.opensearch.common.LocalTimeOffset.Overlap; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.round.Roundable; +import org.opensearch.common.round.RoundableFactory; +import org.opensearch.common.time.DateUtils; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; import java.time.Instant; @@ -51,6 +54,7 @@ import java.time.OffsetDateTime; import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.format.TextStyle; import java.time.temporal.ChronoField; import java.time.temporal.ChronoUnit; import java.time.temporal.IsoFields; @@ -58,10 +62,10 @@ import java.time.temporal.TemporalQueries; import java.time.zone.ZoneOffsetTransition; import java.time.zone.ZoneRules; -import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Objects; +import java.util.OptionalLong; import java.util.concurrent.TimeUnit; /** @@ -75,16 +79,18 @@ * blog for some background reading. Its super interesting and the links are * a comedy gold mine. If you like time zones. Or hate them. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Rounding implements Writeable { private static final Logger logger = LogManager.getLogger(Rounding.class); /** * A Date Time Unit * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum DateTimeUnit { WEEK_OF_WEEKYEAR((byte) 1, "week", IsoFields.WEEK_OF_WEEK_BASED_YEAR, true, TimeUnit.DAYS.toMillis(7)) { private final long extraLocalOffsetLookup = TimeUnit.DAYS.toMillis(7); @@ -175,7 +181,7 @@ long roundFloor(long utcMillis) { return DateUtils.roundFloor(utcMillis, ratio); } - long extraLocalOffsetLookup() { + public long extraLocalOffsetLookup() { return ratio; } }; @@ -267,8 +273,9 @@ public void writeTo(StreamOutput out) throws IOException { /** * A strategy for rounding milliseconds since epoch. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Prepared { /** * Rounds the given value. @@ -358,8 +365,9 @@ public static Builder builder(TimeValue interval) { /** * Builder for rounding * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final DateTimeUnit unit; @@ -412,6 +420,14 @@ public Rounding build() { } private abstract class PreparedRounding implements Prepared { + /** + * The maximum limit up to which array-based prepared rounding is used. + * 128 is a power of two that isn't huge. We might be able to do + * better if the limit was based on the actual type of prepared + * rounding but this'll do for now. + */ + private static final int DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD = 128; + /** * Attempt to build a {@link Prepared} implementation that relies on pre-calcuated * "round down" points. If there would be more than {@code max} points then return @@ -435,7 +451,36 @@ protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) values = ArrayUtil.grow(values, i + 1); values[i++] = rounded; } - return new ArrayRounding(values, i, this); + return new ArrayRounding(RoundableFactory.create(values, i), this); + } + } + + /** + * ArrayRounding is an implementation of {@link Prepared} which uses + * pre-calculated round-down points to speed up lookups. + */ + private static class ArrayRounding implements Prepared { + private final Roundable roundable; + private final Prepared delegate; + + public ArrayRounding(Roundable roundable, Prepared delegate) { + this.roundable = roundable; + this.delegate = delegate; + } + + @Override + public long round(long utcMillis) { + return roundable.floor(utcMillis); + } + + @Override + public long nextRoundingValue(long utcMillis) { + return delegate.nextRoundingValue(utcMillis); + } + + @Override + public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { + return delegate.roundingSize(utcMillis, timeUnit); } } @@ -521,12 +566,11 @@ private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { @Override public Prepared prepare(long minUtcMillis, long maxUtcMillis) { - /* - * 128 is a power of two that isn't huge. We might be able to do - * better if the limit was based on the actual type of prepared - * rounding but this'll do for now. - */ - return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128); + return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray( + minUtcMillis, + maxUtcMillis, + PreparedRounding.DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD + ); } private TimeUnitPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) { @@ -1328,41 +1372,38 @@ public static Rounding read(StreamInput in) throws IOException { } /** - * Implementation of {@link Prepared} using pre-calculated "round down" points. - * - * @opensearch.internal + * Extracts the interval value from the {@link Rounding} instance + * @param rounding {@link Rounding} instance + * @return the interval value from the {@link Rounding} instance or {@code OptionalLong.empty()} + * if the interval is not available */ - private static class ArrayRounding implements Prepared { - private final long[] values; - private final int max; - private final Prepared delegate; + public static OptionalLong getInterval(Rounding rounding) { + long interval = 0; - private ArrayRounding(long[] values, int max, Prepared delegate) { - this.values = values; - this.max = max; - this.delegate = delegate; - } - - @Override - public long round(long utcMillis) { - assert values[0] <= utcMillis : "utcMillis must be after " + values[0]; - int idx = Arrays.binarySearch(values, 0, max, utcMillis); - assert idx != -1 : "The insertion point is before the array! This should have tripped the assertion above."; - assert -1 - idx <= values.length : "This insertion point is after the end of the array."; - if (idx < 0) { - idx = -2 - idx; + if (rounding instanceof TimeUnitRounding) { + interval = (((TimeUnitRounding) rounding).unit).extraLocalOffsetLookup(); + if (!isUTCTimeZone(((TimeUnitRounding) rounding).timeZone)) { + // Fast filter aggregation cannot be used if it needs time zone rounding + return OptionalLong.empty(); + } + } else if (rounding instanceof TimeIntervalRounding) { + interval = ((TimeIntervalRounding) rounding).interval; + if (!isUTCTimeZone(((TimeIntervalRounding) rounding).timeZone)) { + // Fast filter aggregation cannot be used if it needs time zone rounding + return OptionalLong.empty(); } - return values[idx]; + } else { + return OptionalLong.empty(); } - @Override - public long nextRoundingValue(long utcMillis) { - return delegate.nextRoundingValue(utcMillis); - } + return OptionalLong.of(interval); + } - @Override - public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { - return delegate.roundingSize(utcMillis, timeUnit); - } + /** + * Helper function for checking if the time zone requested for date histogram + * aggregation is utc or not + */ + private static boolean isUTCTimeZone(final ZoneId zoneId) { + return "Z".equals(zoneId.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); } } diff --git a/server/src/main/java/org/opensearch/common/StreamContext.java b/server/src/main/java/org/opensearch/common/StreamContext.java index 32f095f8488b7..47a3d2b8571ea 100644 --- a/server/src/main/java/org/opensearch/common/StreamContext.java +++ b/server/src/main/java/org/opensearch/common/StreamContext.java @@ -44,9 +44,21 @@ public StreamContext( this.numberOfParts = numberOfParts; } + /** + * Copy constructor for overriding class + */ + protected StreamContext(StreamContext streamContext) { + this.streamSupplier = streamContext.streamSupplier; + this.partSize = streamContext.partSize; + this.numberOfParts = streamContext.numberOfParts; + this.lastPartSize = streamContext.lastPartSize; + } + /** * Vendor plugins can use this method to create new streams only when they are required for processing * New streams won't be created till this method is called with the specific <code>partNumber</code> + * It is the responsibility of caller to ensure that stream is properly closed after consumption + * otherwise it can leak resources. * * @param partNumber The index of the part * @return A stream reference to the part requested diff --git a/server/src/main/java/org/opensearch/common/StreamLimiter.java b/server/src/main/java/org/opensearch/common/StreamLimiter.java new file mode 100644 index 0000000000000..ec203a1c30868 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/StreamLimiter.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.apache.lucene.store.RateLimiter; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * The stream limiter that limits the transfer of bytes + * + * @opensearch.internal + */ +public class StreamLimiter { + + private final Supplier<RateLimiter> rateLimiterSupplier; + + private final StreamLimiter.Listener listener; + + private int bytesSinceLastRateLimit; + + public StreamLimiter(Supplier<RateLimiter> rateLimiterSupplier, Listener listener) { + this.rateLimiterSupplier = rateLimiterSupplier; + this.listener = listener; + } + + public void maybePause(int bytes) throws IOException { + bytesSinceLastRateLimit += bytes; + final RateLimiter rateLimiter = rateLimiterSupplier.get(); + if (rateLimiter != null) { + if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { + long pause = rateLimiter.pause(bytesSinceLastRateLimit); + bytesSinceLastRateLimit = 0; + if (pause > 0) { + listener.onPause(pause); + } + } + } + } + + /** + * Internal listener + * + * @opensearch.internal + */ + public interface Listener { + void onPause(long nanos); + } +} diff --git a/server/src/main/java/org/opensearch/common/Strings.java b/server/src/main/java/org/opensearch/common/Strings.java deleted file mode 100644 index 8e92c86836723..0000000000000 --- a/server/src/main/java/org/opensearch/common/Strings.java +++ /dev/null @@ -1,357 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common; - -import org.apache.lucene.util.BytesRefBuilder; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Iterator; -import java.util.Set; - -import static java.util.Collections.unmodifiableSet; -import static org.opensearch.common.util.set.Sets.newHashSet; - -/** - * String utility class. - * - * @opensearch.internal - */ -public class Strings { - - public static final String[] EMPTY_ARRAY = org.opensearch.core.common.Strings.EMPTY_ARRAY; - - // --------------------------------------------------------------------- - // General convenience methods for working with Strings - // --------------------------------------------------------------------- - - /** - * Check that the given BytesReference is neither <code>null</code> nor of length 0 - * Note: Will return <code>true</code> for a BytesReference that purely consists of whitespace. - * - * @param bytesReference the BytesReference to check (may be <code>null</code>) - * @return <code>true</code> if the BytesReference is not null and has length - * @see org.opensearch.core.common.Strings#hasLength(CharSequence) - */ - public static boolean hasLength(BytesReference bytesReference) { - return (bytesReference != null && bytesReference.length() > 0); - } - - /** - * Test whether the given string matches the given substring - * at the given index. - * - * @param str the original string (or StringBuilder) - * @param index the index in the original string to start matching against - * @param substring the substring to match at the given index - */ - public static boolean substringMatch(CharSequence str, int index, CharSequence substring) { - for (int j = 0; j < substring.length(); j++) { - int i = index + j; - if (i >= str.length() || str.charAt(i) != substring.charAt(j)) { - return false; - } - } - return true; - } - - // --------------------------------------------------------------------- - // Convenience methods for working with formatted Strings - // --------------------------------------------------------------------- - - /** - * Quote the given String with single quotes. - * - * @param str the input String (e.g. "myString") - * @return the quoted String (e.g. "'myString'"), - * or <code>null</code> if the input was <code>null</code> - */ - public static String quote(String str) { - return (str != null ? "'" + str + "'" : null); - } - - public static final Set<Character> INVALID_FILENAME_CHARS = unmodifiableSet( - newHashSet('\\', '/', '*', '?', '"', '<', '>', '|', ' ', ',') - ); - - public static boolean validFileName(String fileName) { - for (int i = 0; i < fileName.length(); i++) { - char c = fileName.charAt(i); - if (INVALID_FILENAME_CHARS.contains(c)) { - return false; - } - } - return true; - } - - public static boolean validFileNameExcludingAstrix(String fileName) { - for (int i = 0; i < fileName.length(); i++) { - char c = fileName.charAt(i); - if (c != '*' && INVALID_FILENAME_CHARS.contains(c)) { - return false; - } - } - return true; - } - - /** - * Split a String at the first occurrence of the delimiter. - * Does not include the delimiter in the result. - * - * @param toSplit the string to split - * @param delimiter to split the string up with - * @return a two element array with index 0 being before the delimiter, and - * index 1 being after the delimiter (neither element includes the delimiter); - * or <code>null</code> if the delimiter wasn't found in the given input String - */ - public static String[] split(String toSplit, String delimiter) { - if (org.opensearch.core.common.Strings.hasLength(toSplit) == false - || org.opensearch.core.common.Strings.hasLength(delimiter) == false) { - return null; - } - int offset = toSplit.indexOf(delimiter); - if (offset < 0) { - return null; - } - String beforeDelimiter = toSplit.substring(0, offset); - String afterDelimiter = toSplit.substring(offset + delimiter.length()); - return new String[] { beforeDelimiter, afterDelimiter }; - } - - /** - * Format the double value with a single decimal points, trimming trailing '.0'. - */ - public static String format1Decimals(double value, String suffix) { - String p = String.valueOf(value); - int ix = p.indexOf('.') + 1; - int ex = p.indexOf('E'); - char fraction = p.charAt(ix); - if (fraction == '0') { - if (ex != -1) { - return p.substring(0, ix - 1) + p.substring(ex) + suffix; - } else { - return p.substring(0, ix - 1) + suffix; - } - } else { - if (ex != -1) { - return p.substring(0, ix) + fraction + p.substring(ex) + suffix; - } else { - return p.substring(0, ix) + fraction + suffix; - } - } - } - - private Strings() {} - - public static byte[] toUTF8Bytes(CharSequence charSequence) { - return toUTF8Bytes(charSequence, new BytesRefBuilder()); - } - - public static byte[] toUTF8Bytes(CharSequence charSequence, BytesRefBuilder spare) { - spare.copyChars(charSequence); - return Arrays.copyOf(spare.bytes(), spare.length()); - } - - /** - * Return substring(beginIndex, endIndex) that is impervious to string length. - */ - public static String substring(String s, int beginIndex, int endIndex) { - if (s == null) { - return s; - } - - int realEndIndex = s.length() > 0 ? s.length() - 1 : 0; - - if (endIndex > realEndIndex) { - return s.substring(beginIndex); - } else { - return s.substring(beginIndex, endIndex); - } - } - - /** - * If an array only consists of zero or one element, which is "*" or "_all" return an empty array - * which is usually used as everything - */ - public static boolean isAllOrWildcard(String[] data) { - return CollectionUtils.isEmpty(data) || data.length == 1 && isAllOrWildcard(data[0]); - } - - /** - * Returns `true` if the string is `_all` or `*`. - */ - public static boolean isAllOrWildcard(String data) { - return "_all".equals(data) || "*".equals(data); - } - - /** - * Return a {@link String} that is the json representation of the provided {@link ToXContent}. - * Wraps the output into an anonymous object if needed. The content is not pretty-printed - * nor human readable. - */ - public static String toString(MediaType mediaType, ToXContent toXContent) { - return toString(mediaType, toXContent, false, false); - } - - /** - * Return a {@link String} that is the json representation of the provided {@link ToXContent}. - * Wraps the output into an anonymous object if needed. - * Allows to configure the params. - * The content is not pretty-printed nor human readable. - */ - public static String toString(MediaType mediaType, ToXContent toXContent, ToXContent.Params params) { - return toString(mediaType, toXContent, params, false, false); - } - - /** - * Returns a string representation of the builder (only applicable for text based xcontent). - * @param xContentBuilder builder containing an object to converted to a string - */ - public static String toString(XContentBuilder xContentBuilder) { - return BytesReference.bytes(xContentBuilder).utf8ToString(); - } - - /** - * Return a {@link String} that is the json representation of the provided {@link ToXContent}. - * Wraps the output into an anonymous object if needed. Allows to control whether the outputted - * json needs to be pretty printed and human readable. - * - */ - public static String toString(MediaType mediaType, ToXContent toXContent, boolean pretty, boolean human) { - return toString(mediaType, toXContent, ToXContent.EMPTY_PARAMS, pretty, human); - } - - /** - * Return a {@link String} that is the json representation of the provided {@link ToXContent}. - * Wraps the output into an anonymous object if needed. - * Allows to configure the params. - * Allows to control whether the outputted json needs to be pretty printed and human readable. - */ - private static String toString(MediaType mediaType, ToXContent toXContent, ToXContent.Params params, boolean pretty, boolean human) { - try { - XContentBuilder builder = createBuilder(mediaType, pretty, human); - if (toXContent.isFragment()) { - builder.startObject(); - } - toXContent.toXContent(builder, params); - if (toXContent.isFragment()) { - builder.endObject(); - } - return toString(builder); - } catch (IOException e) { - try { - XContentBuilder builder = createBuilder(mediaType, pretty, human); - builder.startObject(); - builder.field("error", "error building toString out of XContent: " + e.getMessage()); - builder.field("stack_trace", ExceptionsHelper.stackTrace(e)); - builder.endObject(); - return toString(builder); - } catch (IOException e2) { - throw new OpenSearchException("cannot generate error message for deserialization", e); - } - } - } - - private static XContentBuilder createBuilder(MediaType mediaType, boolean pretty, boolean human) throws IOException { - XContentBuilder builder = XContentBuilder.builder(mediaType.xContent()); - if (pretty) { - builder.prettyPrint(); - } - if (human) { - builder.humanReadable(true); - } - return builder; - } - - /** - * Truncates string to a length less than length. Backtracks to throw out - * high surrogates. - */ - public static String cleanTruncate(String s, int length) { - if (s == null) { - return s; - } - /* - * Its pretty silly for you to truncate to 0 length but just in case - * someone does this shouldn't break. - */ - if (length == 0) { - return ""; - } - if (length >= s.length()) { - return s; - } - if (Character.isHighSurrogate(s.charAt(length - 1))) { - length--; - } - return s.substring(0, length); - } - - public static String padStart(String s, int minimumLength, char c) { - if (s == null) { - throw new NullPointerException("s"); - } - if (s.length() >= minimumLength) { - return s; - } else { - StringBuilder sb = new StringBuilder(minimumLength); - for (int i = s.length(); i < minimumLength; i++) { - sb.append(c); - } - - sb.append(s); - return sb.toString(); - } - } - - public static String toLowercaseAscii(String in) { - StringBuilder out = new StringBuilder(); - Iterator<Integer> iter = in.codePoints().iterator(); - while (iter.hasNext()) { - int codepoint = iter.next(); - if (codepoint > 128) { - out.appendCodePoint(codepoint); - } else { - out.appendCodePoint(Character.toLowerCase(codepoint)); - } - } - return out.toString(); - } -} diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java new file mode 100644 index 0000000000000..97f304d776f5c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; + +/** + * An extension of {@link BlobContainer} that adds {@link AsyncMultiStreamBlobContainer#asyncBlobUpload} to allow + * multipart uploads and performs integrity checks on transferred files + * + * @opensearch.internal + */ +public interface AsyncMultiStreamBlobContainer extends BlobContainer { + + /** + * Reads blob content from multiple streams, each from a specific part of the file, which is provided by the + * StreamContextSupplier in the WriteContext passed to this method. An {@link IOException} is thrown if reading + * any of the input streams fails, or writing to the target blob fails + * + * @param writeContext A WriteContext object encapsulating all information needed to perform the upload + * @param completionListener Listener on which upload events should be published. + * @throws IOException if any of the input streams could not be read, or the target blob could not be written to + */ + void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException; + + /** + * Creates an async callback of a {@link ReadContext} containing the multipart streams for a specified blob within the container. + * @param blobName The name of the blob for which the {@link ReadContext} needs to be fetched. + * @param listener Async listener for {@link ReadContext} object which serves the input streams and other metadata for the blob + */ + @ExperimentalApi + void readBlobAsync(String blobName, ActionListener<ReadContext> listener); + + /* + * Wether underlying blobContainer can verify integrity of data after transfer. If true and if expected + * checksum is provided in WriteContext, then the checksum of transferred data is compared with expected checksum + * by underlying blobContainer. In this case, caller doesn't need to ensure integrity of data. + */ + boolean remoteIntegrityCheckSupported(); +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java new file mode 100644 index 0000000000000..82bc7a0baed50 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.stream.Collectors; + +/** + * EncryptedBlobContainer is an encrypted BlobContainer that is backed by a + * {@link AsyncMultiStreamBlobContainer} + * + * @opensearch.internal + */ +public class AsyncMultiStreamEncryptedBlobContainer<T, U> extends EncryptedBlobContainer<T, U> implements AsyncMultiStreamBlobContainer { + + private final AsyncMultiStreamBlobContainer blobContainer; + private final CryptoHandler<T, U> cryptoHandler; + + public AsyncMultiStreamEncryptedBlobContainer(AsyncMultiStreamBlobContainer blobContainer, CryptoHandler<T, U> cryptoHandler) { + super(blobContainer, cryptoHandler); + this.blobContainer = blobContainer; + this.cryptoHandler = cryptoHandler; + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException { + EncryptedWriteContext<T, U> encryptedWriteContext = new EncryptedWriteContext<>(writeContext, cryptoHandler); + blobContainer.asyncBlobUpload(encryptedWriteContext, completionListener); + } + + @Override + public void readBlobAsync(String blobName, ActionListener<ReadContext> listener) { + try { + final U cryptoContext = cryptoHandler.loadEncryptionMetadata(getEncryptedHeaderContentSupplier(blobName)); + ActionListener<ReadContext> decryptingCompletionListener = ActionListener.map( + listener, + readContext -> new DecryptedReadContext<>(readContext, cryptoHandler, cryptoContext) + ); + + blobContainer.readBlobAsync(blobName, decryptingCompletionListener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public boolean remoteIntegrityCheckSupported() { + return false; + } + + static class EncryptedWriteContext<T, U> extends WriteContext { + + private final T encryptionMetadata; + private final CryptoHandler<T, U> cryptoHandler; + private final long fileSize; + + /** + * Construct a new encrypted WriteContext object + */ + public EncryptedWriteContext(WriteContext writeContext, CryptoHandler<T, U> cryptoHandler) { + super(writeContext); + this.cryptoHandler = cryptoHandler; + this.encryptionMetadata = this.cryptoHandler.initEncryptionMetadata(); + this.fileSize = this.cryptoHandler.estimateEncryptedLengthOfEntireContent(encryptionMetadata, writeContext.getFileSize()); + } + + public StreamContext getStreamProvider(long partSize) { + long adjustedPartSize = cryptoHandler.adjustContentSizeForPartialEncryption(encryptionMetadata, partSize); + StreamContext streamContext = super.getStreamProvider(adjustedPartSize); + return new EncryptedStreamContext<>(streamContext, cryptoHandler, encryptionMetadata); + } + + /** + * @return The total size of the encrypted file + */ + public long getFileSize() { + return fileSize; + } + } + + static class EncryptedStreamContext<T, U> extends StreamContext { + + private final CryptoHandler<T, U> cryptoHandler; + private final T encryptionMetadata; + + /** + * Construct a new encrypted StreamContext object + */ + public EncryptedStreamContext(StreamContext streamContext, CryptoHandler<T, U> cryptoHandler, T encryptionMetadata) { + super(streamContext); + this.cryptoHandler = cryptoHandler; + this.encryptionMetadata = encryptionMetadata; + } + + @Override + public InputStreamContainer provideStream(int partNumber) throws IOException { + InputStreamContainer inputStreamContainer = super.provideStream(partNumber); + return cryptoHandler.createEncryptingStreamOfPart(encryptionMetadata, inputStreamContainer, getNumberOfParts(), partNumber); + } + + } + + /** + * DecryptedReadContext decrypts the encrypted {@link ReadContext} by acting as a transformation wrapper around + * the encrypted object + * @param <T> Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + * @param <U> Parsed Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + */ + static class DecryptedReadContext<T, U> extends ReadContext { + + private final CryptoHandler<T, U> cryptoHandler; + private final U cryptoContext; + private Long blobSize; + + public DecryptedReadContext(ReadContext readContext, CryptoHandler<T, U> cryptoHandler, U cryptoContext) { + super(readContext); + this.cryptoHandler = cryptoHandler; + this.cryptoContext = cryptoContext; + } + + @Override + public long getBlobSize() { + // initializes the value lazily + if (blobSize == null) { + this.blobSize = this.cryptoHandler.estimateDecryptedLength(cryptoContext, super.getBlobSize()); + } + return this.blobSize; + } + + @Override + public List<StreamPartCreator> getPartStreams() { + return super.getPartStreams().stream() + .map(supplier -> (StreamPartCreator) () -> supplier.get().thenApply(this::decryptInputStreamContainer)) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Transforms an encrypted {@link InputStreamContainer} to a decrypted instance + * @param inputStreamContainer encrypted input stream container instance + * @return decrypted input stream container instance + */ + private InputStreamContainer decryptInputStreamContainer(InputStreamContainer inputStreamContainer) { + long startOfStream = inputStreamContainer.getOffset(); + long endOfStream = startOfStream + inputStreamContainer.getContentLength() - 1; + DecryptedRangedStreamProvider decryptedStreamProvider = cryptoHandler.createDecryptingStreamOfRange( + cryptoContext, + startOfStream, + endOfStream + ); + + long adjustedPos = decryptedStreamProvider.getAdjustedRange()[0]; + long adjustedLength = decryptedStreamProvider.getAdjustedRange()[1] - adjustedPos + 1; + final InputStream decryptedStream = decryptedStreamProvider.getDecryptedStreamProvider() + .apply(inputStreamContainer.getInputStream()); + return new InputStreamContainer(decryptedStream, adjustedLength, adjustedPos); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index e626824e7e271..2e25a532b5abf 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -32,7 +32,7 @@ package org.opensearch.common.blobstore; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.io.InputStream; @@ -93,10 +93,10 @@ public interface BlobContainer { /** * Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}. - * + * <p> * Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively * request more data than they need right now and to re-use this stream for future needs if possible. - * + * <p> * Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may * depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by * bounding the length of the data requested. @@ -131,7 +131,7 @@ default long readBlobPreferredLength() { /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. - * + * <p> * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. * @@ -231,11 +231,19 @@ default void listBlobsByPrefixInSortedOrder( throw new IllegalArgumentException("limit should not be a negative value"); } try { - List<BlobMetadata> blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); - blobNames.sort(blobNameSortOrder.comparator()); - listener.onResponse(blobNames.subList(0, Math.min(blobNames.size(), limit))); + listener.onResponse(listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder)); } catch (Exception e) { listener.onFailure(e); } } + + default List<BlobMetadata> listBlobsByPrefixInSortedOrder(String blobNamePrefix, int limit, BlobNameSortOrder blobNameSortOrder) + throws IOException { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + List<BlobMetadata> blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); + blobNames.sort(blobNameSortOrder.comparator()); + return blobNames.subList(0, Math.min(blobNames.size(), limit)); + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index ab40b1e2a082e..0f6646d37f950 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -31,6 +31,8 @@ package org.opensearch.common.blobstore; +import org.opensearch.cluster.metadata.RepositoryMetadata; + import java.io.Closeable; import java.util.Collections; import java.util.Map; @@ -47,10 +49,46 @@ public interface BlobStore extends Closeable { */ BlobContainer blobContainer(BlobPath path); + /** + * Returns statistics on the count of operations that have been performed on this blob store + */ /** * Returns statistics on the count of operations that have been performed on this blob store */ default Map<String, Long> stats() { return Collections.emptyMap(); } + + /** + * Returns details statistics of operations that have been performed on this blob store + */ + default Map<Metric, Map<String, Long>> extendedStats() { + return Collections.emptyMap(); + } + + /** + * Reload the blob store inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Metrics for BlobStore interactions + */ + enum Metric { + REQUEST_SUCCESS("request_success_total"), + REQUEST_FAILURE("request_failures_total"), + REQUEST_LATENCY("request_time_in_millis"), + RETRY_COUNT("request_retry_count_total"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java new file mode 100644 index 0000000000000..d0933741339d9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * EncryptedBlobContainer is a wrapper around BlobContainer that encrypts the data on the fly. + */ +public class EncryptedBlobContainer<T, U> implements BlobContainer { + + private final BlobContainer blobContainer; + private final CryptoHandler<T, U> cryptoHandler; + + public EncryptedBlobContainer(BlobContainer blobContainer, CryptoHandler<T, U> cryptoHandler) { + this.blobContainer = blobContainer; + this.cryptoHandler = cryptoHandler; + } + + @Override + public BlobPath path() { + return blobContainer.path(); + } + + @Override + public boolean blobExists(String blobName) throws IOException { + return blobContainer.blobExists(blobName); + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + InputStream inputStream = blobContainer.readBlob(blobName); + return cryptoHandler.createDecryptingStream(inputStream); + } + + EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) { + return (start, end) -> { + byte[] buffer; + int length = (int) (end - start + 1); + try (InputStream inputStream = blobContainer.readBlob(blobName, start, length)) { + buffer = new byte[length]; + inputStream.readNBytes(buffer, (int) start, buffer.length); + } + return buffer; + }; + } + + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + U encryptionMetadata = cryptoHandler.loadEncryptionMetadata(getEncryptedHeaderContentSupplier(blobName)); + DecryptedRangedStreamProvider decryptedStreamProvider = cryptoHandler.createDecryptingStreamOfRange( + encryptionMetadata, + position, + position + length - 1 + ); + long adjustedPos = decryptedStreamProvider.getAdjustedRange()[0]; + long adjustedLength = decryptedStreamProvider.getAdjustedRange()[1] - adjustedPos + 1; + InputStream encryptedStream = blobContainer.readBlob(blobName, adjustedPos, adjustedLength); + return decryptedStreamProvider.getDecryptedStreamProvider().apply(encryptedStream); + } + + @Override + public long readBlobPreferredLength() { + return blobContainer.readBlobPreferredLength(); + } + + private void executeWrite(InputStream inputStream, long blobSize, CheckedBiConsumer<InputStream, Long, IOException> writeConsumer) + throws IOException { + T cryptoContext = cryptoHandler.initEncryptionMetadata(); + InputStreamContainer streamContainer = new InputStreamContainer(inputStream, blobSize, 0); + InputStreamContainer encryptedStream = cryptoHandler.createEncryptingStream(cryptoContext, streamContainer); + long cryptoLength = cryptoHandler.estimateEncryptedLengthOfEntireContent(cryptoContext, blobSize); + writeConsumer.accept(encryptedStream.getInputStream(), cryptoLength); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + executeWrite( + inputStream, + blobSize, + (encryptedStream, encryptedLength) -> blobContainer.writeBlob(blobName, encryptedStream, encryptedLength, failIfAlreadyExists) + ); + } + + @Override + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + executeWrite( + inputStream, + blobSize, + (encryptedStream, encryptedLength) -> blobContainer.writeBlobAtomic( + blobName, + encryptedStream, + encryptedLength, + failIfAlreadyExists + ) + ); + } + + @Override + public DeleteResult delete() throws IOException { + return blobContainer.delete(); + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException { + blobContainer.deleteBlobsIgnoringIfNotExists(blobNames); + } + + @Override + public Map<String, BlobMetadata> listBlobs() throws IOException { + Map<String, BlobMetadata> blobMetadataMap = blobContainer.listBlobs(); + return convertToEncryptedMetadataMap(blobMetadataMap); + } + + @Override + public Map<String, BlobContainer> children() throws IOException { + Map<String, BlobContainer> children = blobContainer.children(); + if (children != null) { + return children.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> new EncryptedBlobContainer<>(entry.getValue(), cryptoHandler))); + } else { + return null; + } + } + + @Override + public Map<String, BlobMetadata> listBlobsByPrefix(String blobNamePrefix) throws IOException { + Map<String, BlobMetadata> blobMetadataMap = blobContainer.listBlobsByPrefix(blobNamePrefix); + return convertToEncryptedMetadataMap(blobMetadataMap); + } + + private Map<String, BlobMetadata> convertToEncryptedMetadataMap(Map<String, BlobMetadata> blobMetadataMap) { + if (blobMetadataMap == null) { + return null; + } + + return blobMetadataMap.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + entry -> new EncryptedBlobMetadata<>(entry.getValue(), cryptoHandler, getEncryptedHeaderContentSupplier(entry.getKey())) + ) + ); + + } + + @Override + public void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener<List<BlobMetadata>> listener + ) { + ActionListener<List<BlobMetadata>> encryptedMetadataListener = ActionListener.delegateFailure( + listener, + (delegatedListener, metadataList) -> { + if (metadataList != null) { + List<BlobMetadata> encryptedMetadata = metadataList.stream() + .map( + blobMetadata -> new EncryptedBlobMetadata<>( + blobMetadata, + cryptoHandler, + getEncryptedHeaderContentSupplier(blobMetadata.name()) + ) + ) + .collect(Collectors.toList()); + delegatedListener.onResponse(encryptedMetadata); + } else { + delegatedListener.onResponse(null); + } + } + ); + blobContainer.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, encryptedMetadataListener); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java new file mode 100644 index 0000000000000..8917bba806d08 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; + +import java.io.IOException; + +/** + * Adjusts length of encrypted blob to raw length + */ +public class EncryptedBlobMetadata<T, U> implements BlobMetadata { + private final EncryptedHeaderContentSupplier encryptedHeaderContentSupplier; + private final BlobMetadata delegate; + private final CryptoHandler<T, U> cryptoHandler; + + public EncryptedBlobMetadata( + BlobMetadata delegate, + CryptoHandler<T, U> cryptoHandler, + EncryptedHeaderContentSupplier encryptedHeaderContentSupplier + ) { + this.encryptedHeaderContentSupplier = encryptedHeaderContentSupplier; + this.delegate = delegate; + this.cryptoHandler = cryptoHandler; + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public long length() { + U cryptoContext; + try { + cryptoContext = cryptoHandler.loadEncryptionMetadata(encryptedHeaderContentSupplier); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + return cryptoHandler.estimateDecryptedLength(cryptoContext, delegate.length()); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java new file mode 100644 index 0000000000000..a18ca8b9d5c39 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.crypto.CryptoHandlerRegistry; +import org.opensearch.crypto.CryptoRegistryException; + +import java.io.IOException; +import java.util.Map; + +/** + * The EncryptedBlobStore is a decorator class that wraps an existing BlobStore and provides encryption and decryption + * capabilities for the stored data. It uses a CryptoManager to handle encryption and decryption operations based on + * the provided CryptoMetadata. The EncryptedBlobStore ensures that all data written to and read from the underlying + * BlobStore is encrypted and decrypted transparently. + */ +public class EncryptedBlobStore implements BlobStore { + + private final BlobStore blobStore; + private final CryptoHandler<?, ?> cryptoHandler; + + /** + * Constructs an EncryptedBlobStore that wraps the provided BlobStore with encryption capabilities based on the + * given CryptoMetadata. + * + * @param blobStore The underlying BlobStore to be wrapped and used for storing encrypted data. + * @param cryptoMetadata The CryptoMetadata containing information about the key provider and settings for encryption. + * @throws CryptoRegistryException If the CryptoManager is not found during encrypted BlobStore creation. + */ + public EncryptedBlobStore(BlobStore blobStore, CryptoMetadata cryptoMetadata) { + CryptoHandlerRegistry cryptoHandlerRegistry = CryptoHandlerRegistry.getInstance(); + assert cryptoHandlerRegistry != null : "CryptoManagerRegistry is not initialized"; + this.cryptoHandler = cryptoHandlerRegistry.fetchCryptoHandler(cryptoMetadata); + if (cryptoHandler == null) { + throw new CryptoRegistryException( + cryptoMetadata.keyProviderName(), + cryptoMetadata.keyProviderType(), + "Crypto manager not found during encrypted blob store creation." + ); + } + this.blobStore = blobStore; + } + + /** + * Retrieves a BlobContainer from the underlying BlobStore based on the provided BlobPath. The returned BlobContainer + * is wrapped in an EncryptedBlobContainer to enable transparent encryption and decryption of data. + * + * @param path The BlobPath specifying the location of the BlobContainer. + * @return An EncryptedBlobContainer wrapping the BlobContainer retrieved from the underlying BlobStore. + */ + @Override + public BlobContainer blobContainer(BlobPath path) { + BlobContainer blobContainer = blobStore.blobContainer(path); + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + return new AsyncMultiStreamEncryptedBlobContainer<>((AsyncMultiStreamBlobContainer) blobContainer, cryptoHandler); + } + return new EncryptedBlobContainer<>(blobContainer, cryptoHandler); + } + + /** + * Retrieves statistics about the BlobStore. Delegates the call to the underlying BlobStore's stats() method. + * + * @return A map containing statistics about the BlobStore. + */ + @Override + public Map<String, Long> stats() { + return blobStore.stats(); + } + + /** + * Retrieves extended statistics about the BlobStore. Delegates the call to the underlying BlobStore's extendedStats() method. + * + * @return A map containing extended statistics about the BlobStore. + */ + @Override + public Map<Metric, Map<String, Long>> extendedStats() { + return blobStore.extendedStats(); + } + + /** + * Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the + * underlying BlobStore. This ensures proper cleanup of resources. + * + * @throws IOException If an I/O error occurs while closing the BlobStore. + */ + @Override + public void close() throws IOException { + cryptoHandler.close(); + blobStore.close(); + } + +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java deleted file mode 100644 index 0dfcc5c50e4b1..0000000000000 --- a/server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.blobstore; - -import org.opensearch.action.ActionListener; -import org.opensearch.common.blobstore.stream.write.WriteContext; - -import java.io.IOException; - -/** - * An extension of {@link BlobContainer} that adds {@link VerifyingMultiStreamBlobContainer#asyncBlobUpload} to allow - * multipart uploads and performs integrity checks on transferred files - * - * @opensearch.internal - */ -public interface VerifyingMultiStreamBlobContainer extends BlobContainer { - - /** - * Reads blob content from multiple streams, each from a specific part of the file, which is provided by the - * StreamContextSupplier in the WriteContext passed to this method. An {@link IOException} is thrown if reading - * any of the input streams fails, or writing to the target blob fails - * - * @param writeContext A WriteContext object encapsulating all information needed to perform the upload - * @param completionListener Listener on which upload events should be published. - * @throws IOException if any of the input streams could not be read, or the target blob could not be written to - */ - void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException; -} diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java index 394855671688a..b6644ffd16bab 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java @@ -69,7 +69,7 @@ /** * A file system based implementation of {@link org.opensearch.common.blobstore.BlobContainer}. * All blobs in the container are stored on a file system, the location of which is specified by the {@link BlobPath}. - * + * <p> * Note that the methods in this implementation of {@link org.opensearch.common.blobstore.BlobContainer} may * additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager} * does not permit read and/or write access to the underlying files. @@ -258,7 +258,7 @@ public static String tempBlobName(final String blobName) { /** * Returns true if the blob is a leftover temporary blob. - * + * <p> * The temporary blobs might be left after failed atomic write operation. */ public static boolean isTempBlobName(final String blobName) { diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java new file mode 100644 index 0000000000000..1264551401b4c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.io.InputStreamContainer; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; + +/** + * ReadContext is used to encapsulate all data needed by <code>BlobContainer#readBlobAsync</code> + * + * @opensearch.experimental + */ +@ExperimentalApi +public class ReadContext { + private final long blobSize; + private final List<StreamPartCreator> asyncPartStreams; + private final String blobChecksum; + + public ReadContext(long blobSize, List<StreamPartCreator> asyncPartStreams, String blobChecksum) { + this.blobSize = blobSize; + this.asyncPartStreams = asyncPartStreams; + this.blobChecksum = blobChecksum; + } + + public ReadContext(ReadContext readContext) { + this.blobSize = readContext.blobSize; + this.asyncPartStreams = readContext.asyncPartStreams; + this.blobChecksum = readContext.blobChecksum; + } + + public String getBlobChecksum() { + return blobChecksum; + } + + public int getNumberOfParts() { + return asyncPartStreams.size(); + } + + public long getBlobSize() { + return blobSize; + } + + public List<StreamPartCreator> getPartStreams() { + return asyncPartStreams; + } + + /** + * Functional interface defining an instance that can create an async action + * to create a part of an object represented as an InputStreamContainer. + * + * @opensearch.experimental + */ + @FunctionalInterface + @ExperimentalApi + public interface StreamPartCreator extends Supplier<CompletableFuture<InputStreamContainer>> { + /** + * Kicks off a async process to start streaming. + * + * @return When the returned future is completed, streaming has + * just begun. Clients must fully consume the resulting stream. + */ + @Override + CompletableFuture<InputStreamContainer> get(); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java new file mode 100644 index 0000000000000..1a403200249cd --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.io.Channels; +import org.opensearch.common.io.InputStreamContainer; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.function.UnaryOperator; + +/** + * FilePartWriter transfers the provided stream into the specified file path using a {@link FileChannel} + * instance. + */ +@InternalApi +class FilePartWriter { + // 8 MB buffer for transfer + private static final int BUFFER_SIZE = 8 * 1024 * 2024; + + public static void write(Path fileLocation, InputStreamContainer stream, UnaryOperator<InputStream> rateLimiter) throws IOException { + try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { + try (InputStream inputStream = rateLimiter.apply(stream.getInputStream())) { + long streamOffset = stream.getOffset(); + final byte[] buffer = new byte[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); + streamOffset += bytesRead; + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java new file mode 100644 index 0000000000000..c77f2384ace0d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; + +/** + * ReadContextListener orchestrates the async file fetch from the {@link org.opensearch.common.blobstore.BlobContainer} + * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams. + */ +@InternalApi +public class ReadContextListener implements ActionListener<ReadContext> { + private static final Logger logger = LogManager.getLogger(ReadContextListener.class); + private static final String DOWNLOAD_PREFIX = "download."; + private final String blobName; + private final Path fileLocation; + private final String tmpFileName; + private final Path tmpFileLocation; + private final ActionListener<String> completionListener; + private final ThreadPool threadPool; + private final UnaryOperator<InputStream> rateLimiter; + private final int maxConcurrentStreams; + + public ReadContextListener( + String blobName, + Path fileLocation, + ActionListener<String> completionListener, + ThreadPool threadPool, + UnaryOperator<InputStream> rateLimiter, + int maxConcurrentStreams + ) { + this.blobName = blobName; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.threadPool = threadPool; + this.rateLimiter = rateLimiter; + this.maxConcurrentStreams = maxConcurrentStreams; + this.tmpFileName = DOWNLOAD_PREFIX + UUIDs.randomBase64UUID() + "." + blobName; + this.tmpFileLocation = fileLocation.getParent().resolve(tmpFileName); + } + + @Override + public void onResponse(ReadContext readContext) { + logger.debug("Received {} parts for blob {}", readContext.getNumberOfParts(), blobName); + final int numParts = readContext.getNumberOfParts(); + final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(false); + final GroupedActionListener<String> groupedListener = new GroupedActionListener<>(getFileCompletionListener(), numParts); + final Queue<ReadContext.StreamPartCreator> queue = new ConcurrentLinkedQueue<>(readContext.getPartStreams()); + final StreamPartProcessor processor = new StreamPartProcessor( + queue, + anyPartStreamFailed, + tmpFileLocation, + groupedListener, + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY), + rateLimiter + ); + for (int i = 0; i < Math.min(maxConcurrentStreams, queue.size()); i++) { + processor.process(queue.poll()); + } + } + + @SuppressForbidden(reason = "need to fsync once all parts received") + private ActionListener<Collection<String>> getFileCompletionListener() { + return ActionListener.wrap(response -> { + logger.trace("renaming temp file [{}] to [{}]", tmpFileLocation, fileLocation); + try { + IOUtils.fsync(tmpFileLocation, false); + Files.move(tmpFileLocation, fileLocation, StandardCopyOption.ATOMIC_MOVE); + // sync parent dir metadata + IOUtils.fsync(fileLocation.getParent(), true); + completionListener.onResponse(blobName); + } catch (IOException e) { + logger.error("Unable to rename temp file + " + tmpFileLocation, e); + completionListener.onFailure(e); + } + }, e -> { + try { + Files.deleteIfExists(tmpFileLocation); + } catch (IOException ex) { + logger.warn("Unable to clean temp file {}", tmpFileLocation); + } + completionListener.onFailure(e); + }); + } + + /* + * For Tests + */ + Path getTmpFileLocation() { + return tmpFileLocation; + } + + @Override + public void onFailure(Exception e) { + completionListener.onFailure(e); + } + + private static class StreamPartProcessor { + private static final RuntimeException CANCELED_PART_EXCEPTION = new RuntimeException( + "Canceled part download due to previous failure" + ); + private final Queue<ReadContext.StreamPartCreator> queue; + private final AtomicBoolean anyPartStreamFailed; + private final Path fileLocation; + private final GroupedActionListener<String> completionListener; + private final Executor executor; + private final UnaryOperator<InputStream> rateLimiter; + + private StreamPartProcessor( + Queue<ReadContext.StreamPartCreator> queue, + AtomicBoolean anyPartStreamFailed, + Path fileLocation, + GroupedActionListener<String> completionListener, + Executor executor, + UnaryOperator<InputStream> rateLimiter + ) { + this.queue = queue; + this.anyPartStreamFailed = anyPartStreamFailed; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.executor = executor; + this.rateLimiter = rateLimiter; + } + + private void process(ReadContext.StreamPartCreator supplier) { + if (supplier == null) { + return; + } + supplier.get().whenCompleteAsync((blobPartStreamContainer, throwable) -> { + if (throwable != null) { + processFailure(throwable instanceof Exception ? (Exception) throwable : new RuntimeException(throwable)); + } else if (anyPartStreamFailed.get()) { + processFailure(CANCELED_PART_EXCEPTION); + } else { + try { + FilePartWriter.write(fileLocation, blobPartStreamContainer, rateLimiter); + completionListener.onResponse(fileLocation.toString()); + + // Upon successfully completing a file part, pull another + // file part off the queue to trigger asynchronous processing + process(queue.poll()); + } catch (Exception e) { + processFailure(e); + } + } + }, executor); + } + + private void processFailure(Exception e) { + if (anyPartStreamFailed.getAndSet(true) == false) { + completionListener.onFailure(e); + + // Drain the queue of pending part downloads. These can be discarded + // since they haven't started any work yet, but the listener must be + // notified for each part. + Object item = queue.poll(); + while (item != null) { + completionListener.onFailure(CANCELED_PART_EXCEPTION); + item = queue.poll(); + } + } else { + completionListener.onFailure(e); + } + try { + Files.deleteIfExists(fileLocation); + } catch (IOException ex) { + // Die silently + logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java new file mode 100644 index 0000000000000..fe670fe3eb25c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Abstractions for stream based file reads from the blob store. + * Provides listeners for performing the necessary async read operations to perform + * multi stream reads for blobs from the container. + * */ +package org.opensearch.common.blobstore.stream.read.listener; diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java new file mode 100644 index 0000000000000..a9e2ca35c1fa6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Abstractions for stream based file reads from the blob store. + * Provides support for async reads from the blob container. + * */ +package org.opensearch.common.blobstore.stream.read; diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java index ef5e3d1e8c26c..e74462f82400d 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java @@ -61,6 +61,20 @@ public WriteContext( this.expectedChecksum = expectedChecksum; } + /** + * Copy constructor used by overriding class + */ + protected WriteContext(WriteContext writeContext) { + this.fileName = writeContext.fileName; + this.streamContextSupplier = writeContext.streamContextSupplier; + this.fileSize = writeContext.fileSize; + this.failIfAlreadyExists = writeContext.failIfAlreadyExists; + this.writePriority = writeContext.writePriority; + this.uploadFinalizer = writeContext.uploadFinalizer; + this.doRemoteDataIntegrityCheck = writeContext.doRemoteDataIntegrityCheck; + this.expectedChecksum = writeContext.expectedChecksum; + } + /** * @return The file name */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java index b8c0b52f93a3c..3f341c878c3c7 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java @@ -15,5 +15,6 @@ */ public enum WritePriority { NORMAL, - HIGH + HIGH, + URGENT } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java index ca744efae902d..2047c99d9e13b 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java @@ -8,23 +8,31 @@ package org.opensearch.common.blobstore.transfer; -import com.jcraft.jzlib.JZlib; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.blobstore.transfer.stream.RateLimitingOffsetRangeInputStream; import org.opensearch.common.blobstore.transfer.stream.ResettableCheckedInputStream; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.common.util.ByteUtils; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; +import java.util.zip.CRC32; + +import com.jcraft.jzlib.JZlib; /** * RemoteTransferContainer is an encapsulation for managing file transfers. @@ -38,7 +46,7 @@ public class RemoteTransferContainer implements Closeable { private long lastPartSize; private final long contentLength; - private final SetOnce<InputStream[]> inputStreams = new SetOnce<>(); + private final SetOnce<Supplier<Long>[]> checksumSuppliers = new SetOnce<>(); private final String fileName; private final String remoteFileName; private final boolean failTransferIfFileExists; @@ -46,6 +54,7 @@ public class RemoteTransferContainer implements Closeable { private final long expectedChecksum; private final OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier; private final boolean isRemoteDataIntegritySupported; + private final AtomicBoolean readBlock = new AtomicBoolean(); private static final Logger log = LogManager.getLogger(RemoteTransferContainer.class); @@ -115,23 +124,24 @@ StreamContext supplyStreamContext(long partSize) { } } + @SuppressWarnings({ "unchecked" }) private StreamContext openMultipartStreams(long partSize) throws IOException { - if (inputStreams.get() != null) { + if (checksumSuppliers.get() != null) { throw new IOException("Multi-part streams are already created."); } this.partSize = partSize; this.lastPartSize = (contentLength % partSize) != 0 ? contentLength % partSize : partSize; this.numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); - InputStream[] streams = new InputStream[numberOfParts]; - inputStreams.set(streams); + Supplier<Long>[] suppliers = new Supplier[numberOfParts]; + checksumSuppliers.set(suppliers); return new StreamContext(getTransferPartStreamSupplier(), partSize, lastPartSize, numberOfParts); } private CheckedTriFunction<Integer, Long, Long, InputStreamContainer, IOException> getTransferPartStreamSupplier() { return ((partNo, size, position) -> { - assert inputStreams.get() != null : "expected inputStreams to be initialised"; + assert checksumSuppliers.get() != null : "expected container to be initialised"; return getMultipartStreamSupplier(partNo, size, position).get(); }); } @@ -155,10 +165,21 @@ private LocalStreamSupplier<InputStreamContainer> getMultipartStreamSupplier( return () -> { try { OffsetRangeInputStream offsetRangeInputStream = offsetRangeInputStreamSupplier.get(size, position); - InputStream inputStream = !isRemoteDataIntegrityCheckPossible() - ? new ResettableCheckedInputStream(offsetRangeInputStream, fileName) - : offsetRangeInputStream; - Objects.requireNonNull(inputStreams.get())[streamIdx] = inputStream; + if (offsetRangeInputStream instanceof RateLimitingOffsetRangeInputStream) { + RateLimitingOffsetRangeInputStream rangeIndexInputStream = (RateLimitingOffsetRangeInputStream) offsetRangeInputStream; + rangeIndexInputStream.setReadBlock(readBlock); + } + InputStream inputStream; + if (isRemoteDataIntegrityCheckPossible() == false) { + ResettableCheckedInputStream resettableCheckedInputStream = new ResettableCheckedInputStream( + offsetRangeInputStream, + fileName + ); + Objects.requireNonNull(checksumSuppliers.get())[streamIdx] = resettableCheckedInputStream::getChecksum; + inputStream = resettableCheckedInputStream; + } else { + inputStream = offsetRangeInputStream; + } return new InputStreamContainer(inputStream, size, position); } catch (IOException e) { @@ -200,20 +221,14 @@ public long getContentLength() { return contentLength; } - private long getInputStreamChecksum(InputStream inputStream) { - assert inputStream instanceof ResettableCheckedInputStream - : "expected passed inputStream to be instance of ResettableCheckedInputStream"; - return ((ResettableCheckedInputStream) inputStream).getChecksum(); - } - private long getActualChecksum() { - InputStream[] currentInputStreams = Objects.requireNonNull(inputStreams.get()); - long checksum = getInputStreamChecksum(currentInputStreams[0]); - for (int checkSumIdx = 1; checkSumIdx < Objects.requireNonNull(inputStreams.get()).length - 1; checkSumIdx++) { - checksum = JZlib.crc32_combine(checksum, getInputStreamChecksum(currentInputStreams[checkSumIdx]), partSize); + Supplier<Long>[] ckSumSuppliers = Objects.requireNonNull(checksumSuppliers.get()); + long checksum = ckSumSuppliers[0].get(); + for (int checkSumIdx = 1; checkSumIdx < ckSumSuppliers.length - 1; checkSumIdx++) { + checksum = JZlib.crc32_combine(checksum, ckSumSuppliers[checkSumIdx].get(), partSize); } if (numberOfParts > 1) { - checksum = JZlib.crc32_combine(checksum, getInputStreamChecksum(currentInputStreams[numberOfParts - 1]), lastPartSize); + checksum = JZlib.crc32_combine(checksum, ckSumSuppliers[numberOfParts - 1].get(), lastPartSize); } return checksum; @@ -221,26 +236,20 @@ private long getActualChecksum() { @Override public void close() throws IOException { - if (inputStreams.get() == null) { - log.warn("Input streams cannot be closed since they are not yet set for multi stream upload"); - return; - } - - boolean closeStreamException = false; - for (InputStream is : Objects.requireNonNull(inputStreams.get())) { - try { - if (is != null) { - is.close(); - } - } catch (IOException ex) { - closeStreamException = true; - // Attempting to close all streams first before throwing exception. - log.error("Multipart stream failed to close ", ex); - } - } + // Setting a read block on all streams ever created by the container. + readBlock.set(true); + } - if (closeStreamException) { - throw new IOException("Closure of some of the multi-part streams failed."); - } + /** + * Compute final checksum for IndexInput container checksum footer added by {@link CodecUtil} + * @param indexInput IndexInput with checksum in footer + * @param checksumBytesLength length of checksum bytes + * @return final computed checksum of entire indexInput + */ + public static long checksumOfChecksum(IndexInput indexInput, int checksumBytesLength) throws IOException { + long storedChecksum = CodecUtil.retrieveChecksum(indexInput); + CRC32 checksumOfChecksum = new CRC32(); + checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); + return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), checksumBytesLength); } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeIndexInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeIndexInputStream.java index 7518f9ac569b9..520c838ba8a81 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeIndexInputStream.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeIndexInputStream.java @@ -8,10 +8,16 @@ package org.opensearch.common.blobstore.transfer.stream; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.IndexInput; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.lucene.store.InputStreamIndexInput; +import org.opensearch.common.util.concurrent.RunOnce; import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; /** * OffsetRangeIndexInputStream extends InputStream to read from a specified offset using IndexInput @@ -19,9 +25,12 @@ * @opensearch.internal */ public class OffsetRangeIndexInputStream extends OffsetRangeInputStream { - + private static final Logger logger = LogManager.getLogger(OffsetRangeIndexInputStream.class); private final InputStreamIndexInput inputStreamIndexInput; private final IndexInput indexInput; + private AtomicBoolean readBlock; + private final OffsetRangeRefCount offsetRangeRefCount; + private final RunOnce closeOnce; /** * Construct a new OffsetRangeIndexInputStream object @@ -35,16 +44,68 @@ public OffsetRangeIndexInputStream(IndexInput indexInput, long size, long positi indexInput.seek(position); this.indexInput = indexInput; this.inputStreamIndexInput = new InputStreamIndexInput(indexInput, size); + ClosingStreams closingStreams = new ClosingStreams(inputStreamIndexInput, indexInput); + offsetRangeRefCount = new OffsetRangeRefCount(closingStreams); + closeOnce = new RunOnce(offsetRangeRefCount::decRef); + } + + @Override + public void setReadBlock(AtomicBoolean readBlock) { + this.readBlock = readBlock; } @Override public int read(byte[] b, int off, int len) throws IOException { - return inputStreamIndexInput.read(b, off, len); + // There are two levels of check to ensure that we don't read an already closed stream and + // to not close the stream if it is already being read. + // 1. First check is a coarse-grained check outside reference check which allows us to fail fast if read + // was invoked after the stream was closed. We need a separate atomic boolean closed because we don't want a + // future read to succeed when #close has been invoked even if there are on-going reads. On-going reads would + // hold reference and since ref count will not be 0 even after close was invoked, future reads will go through + // without a check on closed. Also, we do need to set closed externally. It is shared across all streams of the + // file. Check on closed in this class makes sure that no other stream allows subsequent reads. closed is + // being set to true in RemoteTransferContainer#close which is invoked when we are done processing all + // parts/file. Processing completes when either all parts are completed successfully or if either of the parts + // failed. In successful case, subsequent read will anyway not go through since all streams would have been + // consumed fully but in case of failure, SDK can continue to invoke read and this would be a wasted compute + // and IO. + // 2. In second check, a tryIncRef is invoked which tries to increment reference under lock and fails if ref + // is already closed. If reference is successfully obtained by the stream then stream will not be closed. + // Ref counting ensures that stream isn't closed in between reads. + // + // All these protection mechanisms are required in order to prevent invalid access to streams happening + // from the new S3 async SDK. + ensureReadable(); + try (OffsetRangeRefCount ignored = getStreamReference()) { + return inputStreamIndexInput.read(b, off, len); + } + } + + private OffsetRangeRefCount getStreamReference() { + boolean successIncrement = offsetRangeRefCount.tryIncRef(); + if (successIncrement == false) { + throw alreadyClosed("OffsetRangeIndexInputStream is already unreferenced."); + } + return offsetRangeRefCount; + } + + private void ensureReadable() { + if (readBlock != null && readBlock.get() == true) { + logger.debug("Read attempted on a stream which was read blocked!"); + throw alreadyClosed("Read blocked stream."); + } + } + + AlreadyClosedException alreadyClosed(String msg) { + return new AlreadyClosedException(msg + this); } @Override public int read() throws IOException { - return inputStreamIndexInput.read(); + ensureReadable(); + try (OffsetRangeRefCount ignored = getStreamReference()) { + return inputStreamIndexInput.read(); + } } @Override @@ -67,9 +128,42 @@ public long getFilePointer() throws IOException { return indexInput.getFilePointer(); } + @Override + public String toString() { + return "OffsetRangeIndexInputStream{" + "indexInput=" + indexInput + ", readBlock=" + readBlock + '}'; + } + + private static class ClosingStreams { + private final InputStreamIndexInput inputStreamIndexInput; + private final IndexInput indexInput; + + public ClosingStreams(InputStreamIndexInput inputStreamIndexInput, IndexInput indexInput) { + this.inputStreamIndexInput = inputStreamIndexInput; + this.indexInput = indexInput; + } + } + + private static class OffsetRangeRefCount extends RefCountedReleasable<ClosingStreams> { + private static final Logger logger = LogManager.getLogger(OffsetRangeRefCount.class); + + public OffsetRangeRefCount(ClosingStreams ref) { + super("OffsetRangeRefCount", ref, () -> { + try { + ref.inputStreamIndexInput.close(); + } catch (IOException ex) { + logger.error("Failed to close indexStreamIndexInput", ex); + } + try { + ref.indexInput.close(); + } catch (IOException ex) { + logger.error("Failed to close indexInput", ex); + } + }); + } + } + @Override public void close() throws IOException { - inputStreamIndexInput.close(); - indexInput.close(); + closeOnce.run(); } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeInputStream.java index e8b889db1f3b0..eacb972586a5a 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeInputStream.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/OffsetRangeInputStream.java @@ -10,6 +10,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.concurrent.atomic.AtomicBoolean; /** * OffsetRangeInputStream is an abstract class that extends from {@link InputStream} @@ -19,4 +20,8 @@ */ public abstract class OffsetRangeInputStream extends InputStream { public abstract long getFilePointer() throws IOException; + + public void setReadBlock(AtomicBoolean readBlock) { + // Nothing + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java new file mode 100644 index 0000000000000..4a511ca1ac155 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +/** + * Rate Limits an {@link OffsetRangeInputStream} + * + * @opensearch.internal + */ +public class RateLimitingOffsetRangeInputStream extends OffsetRangeInputStream { + + private final StreamLimiter streamLimiter; + + private final OffsetRangeInputStream delegate; + + /** + * The ctor for RateLimitingOffsetRangeInputStream + * @param delegate the underlying {@link OffsetRangeInputStream} + * @param rateLimiterSupplier the supplier for {@link RateLimiter} + * @param listener the listener to be invoked on rate limits + */ + public RateLimitingOffsetRangeInputStream( + OffsetRangeInputStream delegate, + Supplier<RateLimiter> rateLimiterSupplier, + StreamLimiter.Listener listener + ) { + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); + this.delegate = delegate; + } + + public void setReadBlock(AtomicBoolean readBlock) { + delegate.setReadBlock(readBlock); + } + + @Override + public int read() throws IOException { + int b = delegate.read(); + streamLimiter.maybePause(1); + return b; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int n = delegate.read(b, off, len); + if (n > 0) { + streamLimiter.maybePause(n); + } + return n; + } + + @Override + public synchronized void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + + @Override + public long getFilePointer() throws IOException { + return delegate.getFilePointer(); + } + + @Override + public synchronized void reset() throws IOException { + delegate.reset(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStream.java index c3e1e815e9eab..2f779b14c48cf 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStream.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStream.java @@ -8,11 +8,11 @@ package org.opensearch.common.blobstore.transfer.stream; -import com.jcraft.jzlib.CRC32; - import java.io.FilterInputStream; import java.io.IOException; +import com.jcraft.jzlib.CRC32; + /** * ResettableCheckedInputStream is a modified implementation of {@link java.util.zip.CheckedInputStream} that supports * mark and reset and modifies the file checksum during mark and reset calls. diff --git a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java index 923f592c6bc79..c9b498c3ec6fa 100644 --- a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -34,7 +34,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.indices.breaker.BreakerSettings; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; @@ -202,7 +204,7 @@ private long limit(long bytes, String label, double overheadConstant, long memor /** * Add an <b>exact</b> number of bytes, not checking for tripping the * circuit breaker. This bypasses the overheadConstant multiplication. - * + * <p> * Also does not check with the parent breaker to see if the parent limit * has been exceeded. * diff --git a/server/src/main/java/org/opensearch/common/breaker/NoopCircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/NoopCircuitBreaker.java deleted file mode 100644 index ddd72280faa4f..0000000000000 --- a/server/src/main/java/org/opensearch/common/breaker/NoopCircuitBreaker.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.breaker; - -/** - * A CircuitBreaker that doesn't increment or adjust, and all operations are - * basically noops - * - * @opensearch.internal - */ -public class NoopCircuitBreaker implements CircuitBreaker { - public static final int LIMIT = -1; - - private final String name; - - public NoopCircuitBreaker(String name) { - this.name = name; - } - - @Override - public void circuitBreak(String fieldName, long bytesNeeded) { - // noop - } - - @Override - public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { - return 0; - } - - @Override - public long addWithoutBreaking(long bytes) { - return 0; - } - - @Override - public long getUsed() { - return 0; - } - - @Override - public long getLimit() { - return LIMIT; - } - - @Override - public double getOverhead() { - return 0; - } - - @Override - public long getTrippedCount() { - return 0; - } - - @Override - public String getName() { - return this.name; - } - - @Override - public Durability getDurability() { - return Durability.PERMANENT; - } - - @Override - public void setLimitAndOverhead(long limit, double overhead) {} -} diff --git a/server/src/main/java/org/opensearch/common/bytes/RecyclingBytesStreamOutput.java b/server/src/main/java/org/opensearch/common/bytes/RecyclingBytesStreamOutput.java index 250ea6092c337..d965899b1fabd 100644 --- a/server/src/main/java/org/opensearch/common/bytes/RecyclingBytesStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/bytes/RecyclingBytesStreamOutput.java @@ -35,14 +35,14 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.Nullable; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.common.util.io.IOUtils; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/opensearch/common/bytes/ReleasableBytesReference.java index e0bc97f282e09..23069c8a377e5 100644 --- a/server/src/main/java/org/opensearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/opensearch/common/bytes/ReleasableBytesReference.java @@ -35,9 +35,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 0ebef1556424b..d8aa4e93735e6 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -32,6 +32,7 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ReleasableLock; @@ -80,8 +81,9 @@ * @param <K> The type of the keys * @param <V> The type of the values * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Cache<K, V> { // positive if entries have an expiration @@ -403,7 +405,7 @@ private V get(K key, long now, Consumer<Entry<K, V>> onExpiration) { * If the specified key is not already associated with a value (or is mapped to null), attempts to compute its * value using the given mapping function and enters it into this map unless null. The load method for a given key * will be invoked at most once. - * + * <p> * Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first * loader function being called and the second will be returned the result provided by the first including any exceptions * thrown during the execution of the first. @@ -422,68 +424,74 @@ public V computeIfAbsent(K key, CacheLoader<K, V> loader) throws ExecutionExcept } }); if (value == null) { - // we need to synchronize loading of a value for a given key; however, holding the segment lock while - // invoking load can lead to deadlock against another thread due to dependent key loading; therefore, we - // need a mechanism to ensure that load is invoked at most once, but we are not invoking load while holding - // the segment lock; to do this, we atomically put a future in the map that can load the value, and then - // get the value from this future on the thread that won the race to place the future into the segment map - CacheSegment<K, V> segment = getCacheSegment(key); - CompletableFuture<Entry<K, V>> future; - CompletableFuture<Entry<K, V>> completableFuture = new CompletableFuture<>(); + value = compute(key, loader); + } + return value; + } - try (ReleasableLock ignored = segment.writeLock.acquire()) { - future = segment.map.putIfAbsent(key, completableFuture); - } + private V compute(K key, CacheLoader<K, V> loader) throws ExecutionException { + long now = now(); + // we need to synchronize loading of a value for a given key; however, holding the segment lock while + // invoking load can lead to deadlock against another thread due to dependent key loading; therefore, we + // need a mechanism to ensure that load is invoked at most once, but we are not invoking load while holding + // the segment lock; to do this, we atomically put a future in the map that can load the value, and then + // get the value from this future on the thread that won the race to place the future into the segment map + CacheSegment<K, V> segment = getCacheSegment(key); + CompletableFuture<Entry<K, V>> future; + CompletableFuture<Entry<K, V>> completableFuture = new CompletableFuture<>(); - BiFunction<? super Entry<K, V>, Throwable, ? extends V> handler = (ok, ex) -> { - if (ok != null) { - try (ReleasableLock ignored = lruLock.acquire()) { - promote(ok, now); - } - return ok.value; - } else { - try (ReleasableLock ignored = segment.writeLock.acquire()) { - CompletableFuture<Entry<K, V>> sanity = segment.map.get(key); - if (sanity != null && sanity.isCompletedExceptionally()) { - segment.map.remove(key); - } - } - return null; - } - }; + try (ReleasableLock ignored = segment.writeLock.acquire()) { + future = segment.map.putIfAbsent(key, completableFuture); + } - CompletableFuture<V> completableValue; - if (future == null) { - future = completableFuture; - completableValue = future.handle(handler); - V loaded; - try { - loaded = loader.load(key); - } catch (Exception e) { - future.completeExceptionally(e); - throw new ExecutionException(e); - } - if (loaded == null) { - NullPointerException npe = new NullPointerException("loader returned a null value"); - future.completeExceptionally(npe); - throw new ExecutionException(npe); - } else { - future.complete(new Entry<>(key, loaded, now)); + BiFunction<? super Entry<K, V>, Throwable, ? extends V> handler = (ok, ex) -> { + if (ok != null) { + try (ReleasableLock ignored = lruLock.acquire()) { + promote(ok, now); } + return ok.value; } else { - completableValue = future.handle(handler); + try (ReleasableLock ignored = segment.writeLock.acquire()) { + CompletableFuture<Entry<K, V>> sanity = segment.map.get(key); + if (sanity != null && sanity.isCompletedExceptionally()) { + segment.map.remove(key); + } + } + return null; } + }; + CompletableFuture<V> completableValue; + if (future == null) { + future = completableFuture; + completableValue = future.handle(handler); + V loaded; try { - value = completableValue.get(); - // check to ensure the future hasn't been completed with an exception - if (future.isCompletedExceptionally()) { - future.get(); // call get to force the exception to be thrown for other concurrent callers - throw new IllegalStateException("the future was completed exceptionally but no exception was thrown"); - } - } catch (InterruptedException e) { - throw new IllegalStateException(e); + loaded = loader.load(key); + } catch (Exception e) { + future.completeExceptionally(e); + throw new ExecutionException(e); } + if (loaded == null) { + NullPointerException npe = new NullPointerException("loader returned a null value"); + future.completeExceptionally(npe); + throw new ExecutionException(npe); + } else { + future.complete(new Entry<>(key, loaded, now)); + } + } else { + completableValue = future.handle(handler); + } + V value; + try { + value = completableValue.get(); + // check to ensure the future hasn't been completed with an exception + if (future.isCompletedExceptionally()) { + future.get(); // call get to force the exception to be thrown for other concurrent callers + throw new IllegalStateException("the future was completed exceptionally but no exception was thrown"); + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); } return value; } @@ -732,8 +740,9 @@ public CacheStats stats() { /** * Cache statistics * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CacheStats { private long hits; private long misses; diff --git a/server/src/main/java/org/opensearch/common/cache/CacheLoader.java b/server/src/main/java/org/opensearch/common/cache/CacheLoader.java index 3c80fe5d66d5d..9fd43a03e7f9d 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheLoader.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheLoader.java @@ -32,12 +32,15 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.PublicApi; + /** * An interface for a cache loader. * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface CacheLoader<K, V> { V load(K key) throws Exception; } diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java new file mode 100644 index 0000000000000..c5aeb7cd1fa40 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Cache types available within OpenSearch. + */ +@ExperimentalApi +public enum CacheType { + INDICES_REQUEST_CACHE("indices.requests.cache"); + + private final String settingPrefix; + + CacheType(String settingPrefix) { + this.settingPrefix = settingPrefix; + } + + public String getSettingPrefix() { + return settingPrefix; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java new file mode 100644 index 0000000000000..f7be46a852631 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.store.config.CacheConfig; + +import java.io.Closeable; +import java.util.Map; + +/** + * Represents a cache interface. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface ICache<K, V> extends Closeable { + V get(K key); + + void put(K key, V value); + + V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception; + + void invalidate(K key); + + void invalidateAll(); + + Iterable<K> keys(); + + long count(); + + void refresh(); + + /** + * Factory to create objects. + */ + @ExperimentalApi + interface Factory { + <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories); + + String getCacheName(); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java new file mode 100644 index 0000000000000..aafd46560021b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Extends a cache loader with awareness of whether the data is loaded or not. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface LoadAwareCacheLoader<K, V> extends CacheLoader<K, V> { + boolean isLoaded(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalNotification.java b/server/src/main/java/org/opensearch/common/cache/RemovalNotification.java index 6d355b2122460..42303b8cfdc73 100644 --- a/server/src/main/java/org/opensearch/common/cache/RemovalNotification.java +++ b/server/src/main/java/org/opensearch/common/cache/RemovalNotification.java @@ -32,11 +32,14 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.PublicApi; + /** * Notification when an element is removed from the cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RemovalNotification<K, V> { private final K key; diff --git a/server/src/main/java/org/opensearch/common/cache/RemovalReason.java b/server/src/main/java/org/opensearch/common/cache/RemovalReason.java index e5d795c093547..514b84a7823ca 100644 --- a/server/src/main/java/org/opensearch/common/cache/RemovalReason.java +++ b/server/src/main/java/org/opensearch/common/cache/RemovalReason.java @@ -8,11 +8,14 @@ package org.opensearch.common.cache; +import org.opensearch.common.annotation.PublicApi; + /** * Reason for notification removal * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum RemovalReason { REPLACED, INVALIDATED, diff --git a/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java new file mode 100644 index 0000000000000..832a65b573aec --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/CacheModule.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.service.CacheService; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Holds all the cache factories and provides a way to fetch them when needed. + */ +@ExperimentalApi +public class CacheModule { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + + private final CacheService cacheService; + private final Settings settings; + + public CacheModule(List<CachePlugin> cachePlugins, Settings settings) { + this.cacheStoreTypeFactories = getCacheStoreTypeFactories(cachePlugins); + this.settings = settings; + this.cacheService = new CacheService(cacheStoreTypeFactories, settings); + } + + private static Map<String, ICache.Factory> getCacheStoreTypeFactories(List<CachePlugin> cachePlugins) { + Map<String, ICache.Factory> cacheStoreTypeFactories = new HashMap<>(); + // Add the core OpenSearchOnHeapCache as well. + cacheStoreTypeFactories.put( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory() + ); + for (CachePlugin cachePlugin : cachePlugins) { + Map<String, ICache.Factory> factoryMap = cachePlugin.getCacheFactoryMap(); + for (Map.Entry<String, ICache.Factory> entry : factoryMap.entrySet()) { + if (cacheStoreTypeFactories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Cache name: " + entry.getKey() + " is " + "already registered"); + } + } + } + return Collections.unmodifiableMap(cacheStoreTypeFactories); + } + + public CacheService getCacheService() { + return this.cacheService; + } + + // Package private for testing. + Map<String, ICache.Factory> getCacheStoreTypeFactories() { + return cacheStoreTypeFactories; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/module/package-info.java b/server/src/main/java/org/opensearch/common/cache/module/package-info.java new file mode 100644 index 0000000000000..95ed25ca21643 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/module/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache providers. */ +package org.opensearch.common.cache.module; diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java new file mode 100644 index 0000000000000..b6710e5e4b424 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; + +import java.util.HashMap; +import java.util.Map; + +/** + * Service responsible to create caches. + */ +@ExperimentalApi +public class CacheService { + + private final Map<String, ICache.Factory> cacheStoreTypeFactories; + private final Settings settings; + private Map<CacheType, ICache<?, ?>> cacheTypeMap; + + public CacheService(Map<String, ICache.Factory> cacheStoreTypeFactories, Settings settings) { + this.cacheStoreTypeFactories = cacheStoreTypeFactories; + this.settings = settings; + this.cacheTypeMap = new HashMap<>(); + } + + public Map<CacheType, ICache<?, ?>> getCacheTypeMap() { + return this.cacheTypeMap; + } + + public <K, V> ICache<K, V> createCache(CacheConfig<K, V> config, CacheType cacheType) { + Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String storeName = cacheSettingForCacheType.get(settings); + if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + // Condition 1: In case feature flag is off, we default to onHeap. + // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older + // settings, so we again fallback to onHeap to maintain backward compatibility. + // It is guaranteed that we will have this store name registered, so + // should be safe. + storeName = OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME; + } + if (!cacheStoreTypeFactories.containsKey(storeName)) { + throw new IllegalArgumentException("No store name: [" + storeName + "] is registered for cache type: " + cacheType); + } + ICache.Factory factory = cacheStoreTypeFactories.get(storeName); + ICache<K, V> iCache = factory.create(config, cacheType, cacheStoreTypeFactories); + cacheTypeMap.put(cacheType, iCache); + return iCache; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/service/package-info.java b/server/src/main/java/org/opensearch/common/cache/service/package-info.java new file mode 100644 index 0000000000000..5fb87f7613627 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/package-info.java @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** Package related to cache service **/ +package org.opensearch.common.cache.service; diff --git a/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java new file mode 100644 index 0000000000000..43a047f0f22c6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/CacheSettings.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.settings; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.settings.Setting; + +/** + * Settings related to cache. + */ +@ExperimentalApi +public class CacheSettings { + + /** + * Used to store cache store name for desired cache types within OpenSearch. + * Setting pattern: {cache_type}.store.name + * Example: indices.request.cache.store.name + */ + public static final Setting.AffixSetting<String> CACHE_TYPE_STORE_NAME = Setting.suffixKeySetting( + "store.name", + (key) -> Setting.simpleString(key, "", Setting.Property.NodeScope) + ); + + public static Setting<String> getConcreteStoreNameSettingForCacheType(CacheType cacheType) { + return CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace(cacheType.getSettingPrefix()); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java new file mode 100644 index 0000000000000..7fa82021c5557 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache settings */ +package org.opensearch.common.cache.settings; diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java new file mode 100644 index 0000000000000..c9bec4ba47def --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store; + +import org.opensearch.common.cache.Cache; +import org.opensearch.common.cache.CacheBuilder; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.builders.ICacheBuilder; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.Map; + +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.EXPIRE_AFTER_ACCESS_KEY; +import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY; + +/** + * This variant of on-heap cache uses OpenSearch custom cache implementation. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +public class OpenSearchOnHeapCache<K, V> implements ICache<K, V>, RemovalListener<K, V> { + + private final Cache<K, V> cache; + private final RemovalListener<K, V> removalListener; + + public OpenSearchOnHeapCache(Builder<K, V> builder) { + CacheBuilder<K, V> cacheBuilder = CacheBuilder.<K, V>builder() + .setMaximumWeight(builder.getMaxWeightInBytes()) + .weigher(builder.getWeigher()) + .removalListener(this); + if (builder.getExpireAfterAcess() != null) { + cacheBuilder.setExpireAfterAccess(builder.getExpireAfterAcess()); + } + cache = cacheBuilder.build(); + this.removalListener = builder.getRemovalListener(); + } + + @Override + public V get(K key) { + V value = cache.get(key); + return value; + } + + @Override + public void put(K key, V value) { + cache.put(key, value); + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader<K, V> loader) throws Exception { + V value = cache.computeIfAbsent(key, key1 -> loader.load(key)); + return value; + } + + @Override + public void invalidate(K key) { + cache.invalidate(key); + } + + @Override + public void invalidateAll() { + cache.invalidateAll(); + } + + @Override + public Iterable<K> keys() { + return cache.keys(); + } + + @Override + public long count() { + return cache.count(); + } + + @Override + public void refresh() { + cache.refresh(); + } + + @Override + public void close() {} + + @Override + public void onRemoval(RemovalNotification<K, V> notification) { + this.removalListener.onRemoval(notification); + } + + /** + * Factory to create OpenSearchOnheap cache. + */ + public static class OpenSearchOnHeapCacheFactory implements Factory { + + public static final String NAME = "opensearch_onheap"; + + @Override + public <K, V> ICache<K, V> create(CacheConfig<K, V> config, CacheType cacheType, Map<String, Factory> cacheFactories) { + Map<String, Setting<?>> settingList = OpenSearchOnHeapCacheSettings.getSettingListForCacheType(cacheType); + Settings settings = config.getSettings(); + ICacheBuilder<K, V> builder = new Builder<K, V>().setMaximumWeightInBytes( + ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes() + ) + .setExpireAfterAccess(((TimeValue) settingList.get(EXPIRE_AFTER_ACCESS_KEY).get(settings))) + .setWeigher(config.getWeigher()) + .setRemovalListener(config.getRemovalListener()); + Setting<String> cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + String storeName = cacheSettingForCacheType.get(settings); + if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + // For backward compatibility as the user intent is to use older settings. + builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); + builder.setExpireAfterAccess(config.getExpireAfterAccess()); + } + return builder.build(); + } + + @Override + public String getCacheName() { + return NAME; + } + } + + /** + * Builder object + * @param <K> Type of key + * @param <V> Type of value + */ + public static class Builder<K, V> extends ICacheBuilder<K, V> { + + @Override + public ICache<K, V> build() { + return new OpenSearchOnHeapCache<K, V>(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java new file mode 100644 index 0000000000000..7ca9080ec1aa6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/ICacheBuilder.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.builders; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.ToLongBiFunction; + +/** + * Builder for store aware cache. + * @param <K> Type of key. + * @param <V> Type of value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public abstract class ICacheBuilder<K, V> { + + private long maxWeightInBytes; + + private ToLongBiFunction<K, V> weigher; + + private TimeValue expireAfterAcess; + + private Settings settings; + + private RemovalListener<K, V> removalListener; + + public ICacheBuilder() {} + + public ICacheBuilder<K, V> setMaximumWeightInBytes(long sizeInBytes) { + this.maxWeightInBytes = sizeInBytes; + return this; + } + + public ICacheBuilder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public ICacheBuilder<K, V> setExpireAfterAccess(TimeValue expireAfterAcess) { + this.expireAfterAcess = expireAfterAcess; + return this; + } + + public ICacheBuilder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public ICacheBuilder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public long getMaxWeightInBytes() { + return maxWeightInBytes; + } + + public TimeValue getExpireAfterAcess() { + return expireAfterAcess; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + public RemovalListener<K, V> getRemovalListener() { + return this.removalListener; + } + + public Settings getSettings() { + return settings; + } + + public abstract ICache<K, V> build(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java new file mode 100644 index 0000000000000..ac4590ae3bff7 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base package for builders. + */ +package org.opensearch.common.cache.store.builders; diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java new file mode 100644 index 0000000000000..fa82e9be72e6e --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/CacheConfig.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.config; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.ToLongBiFunction; + +/** + * Common configurations related to store aware caches. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class CacheConfig<K, V> { + + private final Settings settings; + + /** + * Defines the key type. + */ + private final Class<K> keyType; + + /** + * Defines the value type. + */ + private final Class<V> valueType; + + /** + * Represents a function that calculates the size or weight of a key-value pair. + */ + private final ToLongBiFunction<K, V> weigher; + + private final RemovalListener<K, V> removalListener; + + /** + * Max size in bytes for the cache. This is needed for backward compatibility. + */ + private final long maxSizeInBytes; + + /** + * Defines the expiration time for a cache entry. This is needed for backward compatibility. + */ + private final TimeValue expireAfterAccess; + + private CacheConfig(Builder<K, V> builder) { + this.keyType = builder.keyType; + this.valueType = builder.valueType; + this.settings = builder.settings; + this.removalListener = builder.removalListener; + this.weigher = builder.weigher; + this.maxSizeInBytes = builder.maxSizeInBytes; + this.expireAfterAccess = builder.expireAfterAccess; + } + + public Class<K> getKeyType() { + return keyType; + } + + public Class<V> getValueType() { + return valueType; + } + + public Settings getSettings() { + return settings; + } + + public RemovalListener<K, V> getRemovalListener() { + return removalListener; + } + + public ToLongBiFunction<K, V> getWeigher() { + return weigher; + } + + public Long getMaxSizeInBytes() { + return maxSizeInBytes; + } + + public TimeValue getExpireAfterAccess() { + return expireAfterAccess; + } + + /** + * Builder class to build Cache config related parameters. + * @param <K> Type of key. + * @param <V> Type of value. + */ + public static class Builder<K, V> { + + private Settings settings; + + private Class<K> keyType; + + private Class<V> valueType; + + private RemovalListener<K, V> removalListener; + + private ToLongBiFunction<K, V> weigher; + + private long maxSizeInBytes; + + private TimeValue expireAfterAccess; + + public Builder() {} + + public Builder<K, V> setSettings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder<K, V> setKeyType(Class<K> keyType) { + this.keyType = keyType; + return this; + } + + public Builder<K, V> setValueType(Class<V> valueType) { + this.valueType = valueType; + return this; + } + + public Builder<K, V> setRemovalListener(RemovalListener<K, V> removalListener) { + this.removalListener = removalListener; + return this; + } + + public Builder<K, V> setWeigher(ToLongBiFunction<K, V> weigher) { + this.weigher = weigher; + return this; + } + + public Builder<K, V> setMaxSizeInBytes(long sizeInBytes) { + this.maxSizeInBytes = sizeInBytes; + return this; + } + + public Builder<K, V> setExpireAfterAccess(TimeValue expireAfterAccess) { + this.expireAfterAccess = expireAfterAccess; + return this; + } + + public CacheConfig<K, V> build() { + return new CacheConfig<>(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java new file mode 100644 index 0000000000000..6b662a8af3f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/config/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for store aware cache config */ +package org.opensearch.common.cache.store.config; diff --git a/server/src/main/java/org/opensearch/common/cache/store/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/package-info.java new file mode 100644 index 0000000000000..edc1ecd7d5e7a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for store aware caches. */ +package org.opensearch.common.cache.store; diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java new file mode 100644 index 0000000000000..5a2964ad011bf --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.settings; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.common.settings.Setting.Property.NodeScope; + +/** + * Settings for OpenSearchOnHeap + */ +public class OpenSearchOnHeapCacheSettings { + + /** + * Setting to define maximum size for the cache as a percentage of heap memory available. + * + * Setting pattern: {cache_type}.opensearch_onheap.size + */ + public static final Setting.AffixSetting<ByteSizeValue> MAXIMUM_SIZE_IN_BYTES = Setting.suffixKeySetting( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ".size", + (key) -> Setting.memorySizeSetting(key, "1%", NodeScope) + ); + + /** + * Setting to define expire after access. + * + * Setting pattern: {cache_type}.opensearch_onheap.expire + */ + public static final Setting.AffixSetting<TimeValue> EXPIRE_AFTER_ACCESS_SETTING = Setting.suffixKeySetting( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ".expire", + (key) -> Setting.positiveTimeSetting(key, TimeValue.MAX_VALUE, Setting.Property.NodeScope) + ); + + public static final String MAXIMUM_SIZE_IN_BYTES_KEY = "maximum_size_in_bytes"; + public static final String EXPIRE_AFTER_ACCESS_KEY = "expire_after_access"; + + private static final Map<String, Setting.AffixSetting<?>> KEY_SETTING_MAP = Map.of( + MAXIMUM_SIZE_IN_BYTES_KEY, + MAXIMUM_SIZE_IN_BYTES, + EXPIRE_AFTER_ACCESS_KEY, + EXPIRE_AFTER_ACCESS_SETTING + ); + + public static final Map<CacheType, Map<String, Setting<?>>> CACHE_TYPE_MAP = getCacheTypeMap(); + + private static Map<CacheType, Map<String, Setting<?>>> getCacheTypeMap() { + Map<CacheType, Map<String, Setting<?>>> cacheTypeMap = new HashMap<>(); + for (CacheType cacheType : CacheType.values()) { + Map<String, Setting<?>> settingMap = new HashMap<>(); + for (Map.Entry<String, Setting.AffixSetting<?>> entry : KEY_SETTING_MAP.entrySet()) { + settingMap.put(entry.getKey(), entry.getValue().getConcreteSettingForNamespace(cacheType.getSettingPrefix())); + } + cacheTypeMap.put(cacheType, settingMap); + } + return cacheTypeMap; + } + + public static Map<String, Setting<?>> getSettingListForCacheType(CacheType cacheType) { + Map<String, Setting<?>> cacheTypeSettings = CACHE_TYPE_MAP.get(cacheType); + if (cacheTypeSettings == null) { + throw new IllegalArgumentException( + "No settings exist for cache store name: " + + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + + "associated with " + + "cache type: " + + cacheType + ); + } + return cacheTypeSettings; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java new file mode 100644 index 0000000000000..91613876a5f31 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache setting **/ +package org.opensearch.common.cache.store.settings; diff --git a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java index 5ce77cdc75fe5..de4304f0e1fba 100644 --- a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java @@ -49,15 +49,15 @@ /** * An immutable map whose writes result in a new copy of the map to be created. - * + * <p> * This is essentially a hash array mapped trie: inner nodes use a bitmap in * order to map hashes to slots by counting ones. In case of a collision (two * values having the same 32-bits hash), a leaf node is created which stores * and searches for values sequentially. - * + * <p> * Reads and writes both perform in logarithmic time. Null keys and values are * not supported. - * + * <p> * This structure might need to perform several object creations per write so * it is better suited for work-loads that are not too write-intensive. * @@ -250,7 +250,7 @@ public static <T> T[] insertElement(final T[] array, final T element, final int * and use a bitmap in order to associate hashes to them. For example, if * an inner node contains 5 values, then 5 bits will be set in the bitmap * and the ordinal of the bit set in this bit map will be the slot number. - * + * <p> * As a consequence, the number of slots in an inner node is equal to the * number of one bits in the bitmap. * diff --git a/server/src/main/java/org/opensearch/common/component/package-info.java b/server/src/main/java/org/opensearch/common/component/package-info.java deleted file mode 100644 index 34d034b5a3ffb..0000000000000 --- a/server/src/main/java/org/opensearch/common/component/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Base Lifecycle Component package. */ -package org.opensearch.common.component; diff --git a/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java b/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java index 2873e0d39c6ef..23fc6353dbad3 100644 --- a/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/opensearch/common/compress/CompressedXContent.java @@ -32,15 +32,18 @@ package org.opensearch.common.compress; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.io.OutputStream; @@ -55,8 +58,9 @@ * memory. Note that the compressed string might still sometimes need to be * decompressed in order to perform equality checks or to compute hash codes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CompressedXContent { private static int crc32(BytesReference data) { @@ -85,7 +89,7 @@ private CompressedXContent(byte[] compressed, int crc32) { */ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - OutputStream compressedStream = CompressorFactory.defaultCompressor().threadLocalOutputStream(bStream); + OutputStream compressedStream = CompressorRegistry.defaultCompressor().threadLocalOutputStream(bStream); CRC32 crc32 = new CRC32(); OutputStream checkedStream = new CheckedOutputStream(compressedStream, crc32); try (XContentBuilder builder = XContentFactory.jsonBuilder(checkedStream)) { @@ -107,20 +111,20 @@ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws * that may already be compressed. */ public CompressedXContent(BytesReference data) throws IOException { - Compressor compressor = CompressorFactory.compressor(data); + Compressor compressor = CompressorRegistry.compressor(data); if (compressor != null) { // already compressed... this.bytes = BytesReference.toBytes(data); this.crc32 = crc32(uncompressed()); } else { - this.bytes = BytesReference.toBytes(CompressorFactory.defaultCompressor().compress(data)); + this.bytes = BytesReference.toBytes(CompressorRegistry.defaultCompressor().compress(data)); this.crc32 = crc32(data); } assertConsistent(); } private void assertConsistent() { - assert CompressorFactory.compressor(new BytesArray(bytes)) != null; + assert CompressorRegistry.compressor(new BytesArray(bytes)) != null; assert this.crc32 == crc32(uncompressed()); } @@ -145,7 +149,7 @@ public BytesReference compressedReference() { /** Return the uncompressed bytes. */ public BytesReference uncompressed() { try { - return CompressorFactory.uncompress(new BytesArray(bytes)); + return CompressorRegistry.uncompress(new BytesArray(bytes)); } catch (IOException e) { throw new IllegalStateException("Cannot decompress compressed string", e); } diff --git a/server/src/main/java/org/opensearch/common/compress/CompressorFactory.java b/server/src/main/java/org/opensearch/common/compress/CompressorFactory.java deleted file mode 100644 index 62ec933fe5f37..0000000000000 --- a/server/src/main/java/org/opensearch/common/compress/CompressorFactory.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.compress; - -import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.common.compress.NotXContentException; - -import java.io.IOException; -import java.util.Objects; - -/** - * Factory to create a compressor instance. - * - * @opensearch.internal - */ -public class CompressorFactory { - - public static final Compressor DEFLATE_COMPRESSOR = new DeflateCompressor(); - - public static final Compressor ZSTD_COMPRESSOR = new ZstdCompressor(); - - public static final Compressor NONE_COMPRESSOR = new NoneCompressor(); - - public static boolean isCompressed(BytesReference bytes) { - return compressor(bytes) != null; - } - - public static Compressor defaultCompressor() { - return DEFLATE_COMPRESSOR; - } - - @Nullable - public static Compressor compressor(BytesReference bytes) { - if (DEFLATE_COMPRESSOR.isCompressed(bytes)) { - // bytes should be either detected as compressed or as xcontent, - // if we have bytes that can be either detected as compressed or - // as a xcontent, we have a problem - assert XContentHelper.xContentType(bytes) == null; - return DEFLATE_COMPRESSOR; - } else if (ZSTD_COMPRESSOR.isCompressed(bytes)) { - assert XContentHelper.xContentType(bytes) == null; - return ZSTD_COMPRESSOR; - } - - if (XContentHelper.xContentType(bytes) == null) { - throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes"); - } - - return null; - } - - /** - * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(BytesReference)}. - */ - public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException { - Compressor compressor = compressor(Objects.requireNonNull(bytes, "the BytesReference must not be null")); - return compressor == null ? bytes : compressor.uncompress(bytes); - } - - /** Decompress the provided {@link BytesReference}. */ - public static BytesReference uncompress(BytesReference bytes) throws IOException { - Compressor compressor = compressor(bytes); - if (compressor == null) { - throw new NotCompressedException(); - } - return compressor.uncompress(bytes); - } -} diff --git a/server/src/main/java/org/opensearch/common/compress/CompressorType.java b/server/src/main/java/org/opensearch/common/compress/CompressorType.java deleted file mode 100644 index 65453cd51848e..0000000000000 --- a/server/src/main/java/org/opensearch/common/compress/CompressorType.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.compress; - -/** - * Supported compression types - * - * @opensearch.internal - */ -public enum CompressorType { - - DEFLATE { - @Override - public Compressor compressor() { - return CompressorFactory.DEFLATE_COMPRESSOR; - } - }, - - ZSTD { - @Override - public Compressor compressor() { - return CompressorFactory.ZSTD_COMPRESSOR; - } - }, - - NONE { - @Override - public Compressor compressor() { - return CompressorFactory.NONE_COMPRESSOR; - } - }; - - public abstract Compressor compressor(); -} diff --git a/server/src/main/java/org/opensearch/common/compress/DeflateCompressor.java b/server/src/main/java/org/opensearch/common/compress/DeflateCompressor.java index 54cc3ad9d420e..3ccac1a941741 100644 --- a/server/src/main/java/org/opensearch/common/compress/DeflateCompressor.java +++ b/server/src/main/java/org/opensearch/common/compress/DeflateCompressor.java @@ -32,10 +32,12 @@ package org.opensearch.common.compress; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.Assertions; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -52,7 +54,8 @@ /** * {@link Compressor} implementation based on the DEFLATE compression algorithm. * - * @opensearch.internal + * @opensearch.api - registered name requires BWC support + * @opensearch.experimental - class methods might change */ public class DeflateCompressor implements Compressor { @@ -61,6 +64,15 @@ public class DeflateCompressor implements Compressor { // enough so that no stream starting with these bytes could be detected as // a XContent private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' }; + + /** + * The name to register the compressor by + * + * @opensearch.api - requires BWC support + */ + @PublicApi(since = "2.10.0") + public static String NAME = "DEFLATE"; + // 3 is a good trade-off between speed and compression ratio private static final int LEVEL = 3; // We use buffering on the input and output of in/def-laters in order to diff --git a/server/src/main/java/org/opensearch/common/compress/NoneCompressor.java b/server/src/main/java/org/opensearch/common/compress/NoneCompressor.java deleted file mode 100644 index 775152b0e9388..0000000000000 --- a/server/src/main/java/org/opensearch/common/compress/NoneCompressor.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.compress; - -import org.opensearch.core.common.bytes.BytesReference; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -/** - * {@link Compressor} no compressor implementation. - * - * @opensearch.internal - */ -public class NoneCompressor implements Compressor { - @Override - public boolean isCompressed(BytesReference bytes) { - return false; - } - - @Override - public int headerLength() { - return 0; - } - - @Override - public InputStream threadLocalInputStream(InputStream in) throws IOException { - return in; - } - - @Override - public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { - return out; - } - - @Override - public BytesReference uncompress(BytesReference bytesReference) throws IOException { - return bytesReference; - } - - @Override - public BytesReference compress(BytesReference bytesReference) throws IOException { - return bytesReference; - } - -} diff --git a/server/src/main/java/org/opensearch/common/compress/ZstdCompressor.java b/server/src/main/java/org/opensearch/common/compress/ZstdCompressor.java deleted file mode 100644 index cc3ab57e604b7..0000000000000 --- a/server/src/main/java/org/opensearch/common/compress/ZstdCompressor.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.compress; - -import com.github.luben.zstd.RecyclingBufferPool; -import com.github.luben.zstd.ZstdInputStreamNoFinalizer; -import com.github.luben.zstd.ZstdOutputStreamNoFinalizer; -import org.opensearch.core.common.bytes.BytesReference; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Arrays; - -/** - * {@link Compressor} implementation based on the ZSTD compression algorithm. - * - * @opensearch.internal - */ -public class ZstdCompressor implements Compressor { - // An arbitrary header that we use to identify compressed streams - // It needs to be different from other compressors and to not be specific - // enough so that no stream starting with these bytes could be detected as - // a XContent - private static final byte[] HEADER = new byte[] { 'Z', 'S', 'T', 'D', '\0' }; - - private static final int LEVEL = 3; - - private static final int BUFFER_SIZE = 4096; - - @Override - public boolean isCompressed(BytesReference bytes) { - if (bytes.length() < HEADER.length) { - return false; - } - for (int i = 0; i < HEADER.length; ++i) { - if (bytes.get(i) != HEADER[i]) { - return false; - } - } - return true; - } - - @Override - public int headerLength() { - return HEADER.length; - } - - @Override - public InputStream threadLocalInputStream(InputStream in) throws IOException { - final byte[] header = in.readNBytes(HEADER.length); - if (Arrays.equals(header, HEADER) == false) { - throw new IllegalArgumentException("Input stream is not compressed with ZSTD!"); - } - return new ZstdInputStreamNoFinalizer(new BufferedInputStream(in, BUFFER_SIZE), RecyclingBufferPool.INSTANCE); - } - - @Override - public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { - out.write(HEADER); - return new ZstdOutputStreamNoFinalizer(new BufferedOutputStream(out, BUFFER_SIZE), RecyclingBufferPool.INSTANCE, LEVEL); - } - - @Override - public BytesReference uncompress(BytesReference bytesReference) throws IOException { - throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); - } - - @Override - public BytesReference compress(BytesReference bytesReference) throws IOException { - throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); - } -} diff --git a/server/src/main/java/org/opensearch/common/compress/spi/ServerCompressorProvider.java b/server/src/main/java/org/opensearch/common/compress/spi/ServerCompressorProvider.java new file mode 100644 index 0000000000000..42036f8d88610 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/compress/spi/ServerCompressorProvider.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.compress.spi; + +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.spi.CompressorProvider; + +import java.util.AbstractMap.SimpleEntry; +import java.util.List; +import java.util.Map.Entry; + +/** + * Default {@link Compressor} implementations provided by the + * opensearch core library + * + * @opensearch.internal + * + * @deprecated This class is deprecated and will be removed when the {@link DeflateCompressor} is moved to the compress + * library as a default compression option + */ +@Deprecated +public class ServerCompressorProvider implements CompressorProvider { + /** Returns the concrete {@link Compressor}s provided by the server module */ + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Override + public List<Entry<String, Compressor>> getCompressors() { + return List.of(new SimpleEntry(DeflateCompressor.NAME, new DeflateCompressor())); + } +} diff --git a/server/src/main/java/org/opensearch/common/compress/spi/package-info.java b/server/src/main/java/org/opensearch/common/compress/spi/package-info.java new file mode 100644 index 0000000000000..a8019b23c7d90 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/compress/spi/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Service Provider Interface for registering the{@link org.opensearch.common.compress.DeflateCompressor} with the + * {@link org.opensearch.core.compress.CompressorRegistry}. + * + * Note: this will be refactored to the {@code :libs:opensearch-compress} library after other dependency classes are + * refactored. + */ +package org.opensearch.common.compress.spi; diff --git a/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java b/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java index 8bf620ee2cd50..08ebc2f8786c5 100644 --- a/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java +++ b/server/src/main/java/org/opensearch/common/concurrent/GatedCloseable.java @@ -14,6 +14,7 @@ package org.opensearch.common.concurrent; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.annotation.PublicApi; import java.io.Closeable; import java.io.IOException; @@ -23,8 +24,9 @@ * invoked when {@link #close()} is called. The internal {@link OneWayGate} instance ensures * that this is invoked only once. See also {@link AutoCloseableRefCounted} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GatedCloseable<T> implements Closeable { private final T ref; diff --git a/server/src/main/java/org/opensearch/common/concurrent/RefCountedReleasable.java b/server/src/main/java/org/opensearch/common/concurrent/RefCountedReleasable.java index 77d406a9bb4a3..9ef73a672c1ad 100644 --- a/server/src/main/java/org/opensearch/common/concurrent/RefCountedReleasable.java +++ b/server/src/main/java/org/opensearch/common/concurrent/RefCountedReleasable.java @@ -13,8 +13,8 @@ package org.opensearch.common.concurrent; -import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.concurrent.AbstractRefCounted; /** * Decorator class that wraps an object reference as a {@link AbstractRefCounted} instance. diff --git a/server/src/main/java/org/opensearch/common/document/DocumentField.java b/server/src/main/java/org/opensearch/common/document/DocumentField.java index 8ce672d4fb3fc..5cdc2bba8be16 100644 --- a/server/src/main/java/org/opensearch/common/document/DocumentField.java +++ b/server/src/main/java/org/opensearch/common/document/DocumentField.java @@ -32,6 +32,7 @@ package org.opensearch.common.document; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,8 +57,9 @@ * @see SearchHit * @see GetResult * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocumentField implements Writeable, ToXContentFragment, Iterable<Object> { private final String name; diff --git a/server/src/main/java/org/opensearch/common/geo/GeoDistance.java b/server/src/main/java/org/opensearch/common/geo/GeoDistance.java index 1653f251f9201..37a03bcd237de 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoDistance.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoDistance.java @@ -32,10 +32,10 @@ package org.opensearch.common.geo; +import org.opensearch.common.unit.DistanceUnit; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.DistanceUnit; import java.io.IOException; import java.util.Locale; diff --git a/server/src/main/java/org/opensearch/common/geo/GeoJson.java b/server/src/main/java/org/opensearch/common/geo/GeoJson.java index 6d3bec65ed794..a3cbb4c121acd 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoJson.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoJson.java @@ -34,9 +34,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.parsers.ShapeParser; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContent; diff --git a/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java b/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java index 797b252215d76..1dd35707f0879 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java @@ -37,7 +37,6 @@ import org.opensearch.geometry.MultiPolygon; import org.opensearch.geometry.Point; import org.opensearch.geometry.Polygon; -import org.locationtech.spatial4j.exception.InvalidShapeException; import java.util.ArrayList; import java.util.Arrays; @@ -49,9 +48,11 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import static org.apache.lucene.geo.GeoUtils.orient; +import org.locationtech.spatial4j.exception.InvalidShapeException; + import static org.opensearch.common.geo.GeoUtils.normalizeLat; import static org.opensearch.common.geo.GeoUtils.normalizeLon; +import static org.apache.lucene.geo.GeoUtils.orient; /** * Splits polygons by datelines. diff --git a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java index b9d8bb22504f9..1622457ba27cc 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java @@ -31,7 +31,6 @@ package org.opensearch.common.geo; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.OpenSearchParseException; import org.opensearch.common.geo.builders.CircleBuilder; import org.opensearch.common.geo.builders.CoordinatesBuilder; @@ -46,8 +45,8 @@ import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.geo.builders.ShapeBuilder.Orientation; import org.opensearch.common.geo.parsers.CoordinateNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; import java.util.ArrayList; import java.util.HashMap; @@ -55,6 +54,8 @@ import java.util.Locale; import java.util.Map; +import org.locationtech.jts.geom.Coordinate; + /** * Enumeration that lists all {@link GeoShapeType}s that can be parsed and indexed * @@ -220,11 +221,11 @@ void validateLinearRing(CoordinateNode coordinates, boolean coerce) { @Override CoordinateNode validate(CoordinateNode coordinates, boolean coerce) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ if (coordinates.children == null || coordinates.children.isEmpty()) { throw new OpenSearchParseException( diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 5003619a7de8e..8c566c4191e4f 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -38,11 +38,11 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentSubParser; -import org.opensearch.core.xcontent.MapXContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.geometry.ShapeType; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.GeoPointValues; @@ -665,7 +665,7 @@ public static GeoPoint parseFromString(String val) { /** * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". - * + * <p> * The precision is expressed as a number between 1 and 12 and indicates the length of geohash * used to represent geo points. * @@ -696,7 +696,7 @@ public static int parsePrecision(XContentParser parser) throws IOException, Open /** * Checks that the precision is within range supported by opensearch - between 1 and 12 - * + * <p> * Returns the precision value if it is in the range and throws an IllegalArgumentException if it * is outside the range. */ diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java index 56146fc8197be..93c7f4b93679a 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java @@ -63,7 +63,7 @@ public interface GeometryFormat<ParsedFormat> { /** * Serializes the geometry into a standard Java object. - * + * <p> * For example, the GeoJson format returns the geometry as a map, while WKT returns a string. */ Object toXContentAsObject(ParsedFormat geometry); diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryIO.java b/server/src/main/java/org/opensearch/common/geo/GeometryIO.java index 829ecad2aa67d..83f7fd581fac3 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryIO.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryIO.java @@ -32,9 +32,9 @@ package org.opensearch.common.geo; +import org.opensearch.common.unit.DistanceUnit; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.DistanceUnit; import org.opensearch.geometry.Circle; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryParser.java b/server/src/main/java/org/opensearch/common/geo/GeometryParser.java index e00deffb3a853..ecd1fa9e3acce 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryParser.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryParser.java @@ -34,9 +34,9 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; import org.opensearch.geometry.Point; diff --git a/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java b/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java index 0a5a66ef54c9c..d5b761a531e7f 100644 --- a/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java +++ b/server/src/main/java/org/opensearch/common/geo/ShapeRelation.java @@ -33,6 +33,7 @@ package org.opensearch.common.geo; import org.apache.lucene.document.ShapeField.QueryRelation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ * Enum representing the relationship between a Query / Filter Shape and indexed Shapes * that will be used to determine if a Document should be matched or not * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ShapeRelation implements Writeable { INTERSECTS("intersects"), diff --git a/server/src/main/java/org/opensearch/common/geo/XShapeCollection.java b/server/src/main/java/org/opensearch/common/geo/XShapeCollection.java index e97bf69eae7d5..2957236e9525c 100644 --- a/server/src/main/java/org/opensearch/common/geo/XShapeCollection.java +++ b/server/src/main/java/org/opensearch/common/geo/XShapeCollection.java @@ -32,12 +32,12 @@ package org.opensearch.common.geo; +import java.util.List; + import org.locationtech.spatial4j.context.SpatialContext; import org.locationtech.spatial4j.shape.Shape; import org.locationtech.spatial4j.shape.ShapeCollection; -import java.util.List; - /** * Extends spatial4j ShapeCollection for points_only shape indexing support * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/CircleBuilder.java index a2565f6faefd1..689ae1f291e51 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/CircleBuilder.java @@ -32,21 +32,21 @@ package org.opensearch.common.geo.builders; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.parsers.ShapeParser; -import org.locationtech.spatial4j.shape.Circle; -import org.locationtech.jts.geom.Coordinate; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.common.unit.DistanceUnit.Distance; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Circle; + /** * Builds a circle geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/CoordinatesBuilder.java index 0892e9bd57c8b..ab238230b06ce 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/CoordinatesBuilder.java @@ -33,13 +33,14 @@ package org.opensearch.common.geo.builders; import org.opensearch.OpenSearchException; -import org.locationtech.jts.geom.Coordinate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + /** * A builder for a list of coordinates. * Enables chaining of individual coordinates either as long/lat pairs diff --git a/server/src/main/java/org/opensearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/EnvelopeBuilder.java index 008e38039d64b..cc09eed913d6b 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/EnvelopeBuilder.java @@ -35,9 +35,6 @@ import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.parsers.GeoWKTParser; import org.opensearch.common.geo.parsers.ShapeParser; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.jts.geom.Coordinate; - import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,6 +42,9 @@ import java.io.IOException; import java.util.Objects; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Rectangle; + /** * Builds an envelope geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/GeometryCollectionBuilder.java index 5705f1f21701a..cbfb1a3521116 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/GeometryCollectionBuilder.java @@ -42,13 +42,14 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; -import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import org.locationtech.spatial4j.shape.Shape; + /** * Builds a geometry collection * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java index 393a426c38ecd..94124e7f71ad1 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java @@ -37,17 +37,18 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Line; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; -import org.locationtech.jts.geom.LineString; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LineString; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + /** * Builds a line string geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java index 5ce761f0aa64e..e945341a32111 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java @@ -40,10 +40,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Line; import org.opensearch.geometry.MultiLine; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.LineString; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; @@ -51,6 +47,11 @@ import java.util.List; import java.util.Objects; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + /** * Builds a multi line string geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/MultiPointBuilder.java index b062791221fa0..0bfa91e013bd6 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/MultiPointBuilder.java @@ -38,14 +38,15 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.MultiPoint; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Point; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Point; + /** * Builds a multi point geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/MultiPolygonBuilder.java index 907b590f268f4..102909f93bec4 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/MultiPolygonBuilder.java @@ -41,8 +41,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.MultiPolygon; import org.opensearch.geometry.Polygon; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.util.ArrayList; @@ -50,6 +48,9 @@ import java.util.Locale; import java.util.Objects; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Shape; + /** * Builds a multi polygon geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PointBuilder.java index bce441ae5d1d8..6f68546fea368 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PointBuilder.java @@ -36,11 +36,12 @@ import org.opensearch.common.geo.parsers.ShapeParser; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentBuilder; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Point; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Point; + /** * Builds a point geometry * diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java index 85cbea3142fca..9e118ab2de3a5 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java @@ -35,18 +35,10 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.parsers.ShapeParser; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.xcontent.XContentBuilder; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; -import org.locationtech.jts.geom.LinearRing; -import org.locationtech.jts.geom.MultiPolygon; -import org.locationtech.jts.geom.Polygon; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; @@ -59,6 +51,15 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + import static org.apache.lucene.geo.GeoUtils.orient; /** @@ -176,11 +177,11 @@ public PolygonBuilder close() { } private static void validateLinearRing(LineStringBuilder lineString) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ List<Coordinate> points = lineString.coordinates; if (points.size() < 4) { diff --git a/server/src/main/java/org/opensearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/ShapeBuilder.java index a24f2541eed8a..b3513a83858df 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/ShapeBuilder.java @@ -32,26 +32,18 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; - -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.opensearch.core.Assertions; -import org.opensearch.common.Strings; +import org.apache.logging.log4j.Logger; import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.parsers.GeoWKTParser; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.locationtech.spatial4j.context.jts.JtsSpatialContext; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; @@ -62,6 +54,14 @@ import java.util.Locale; import java.util.Objects; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.spatial4j.context.jts.JtsSpatialContext; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + /** * Basic class for building GeoJSON shapes like Polygons, Linestrings, etc * @@ -521,6 +521,6 @@ public String getWriteableName() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/opensearch/common/geo/parsers/CoordinateNode.java index 99a4ae8304124..670f0dde76ca5 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/CoordinateNode.java @@ -31,7 +31,6 @@ package org.opensearch.common.geo.parsers; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.OpenSearchException; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -39,6 +38,8 @@ import java.io.IOException; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + /** * Node used to represent a tree of coordinates. * <p> diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java index 8ce7cdb8c6fbd..8d473ae6721d2 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java @@ -31,7 +31,6 @@ package org.opensearch.common.geo.parsers; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.OpenSearchParseException; import org.opensearch.common.Explicit; import org.opensearch.common.geo.GeoPoint; @@ -49,9 +48,11 @@ import java.util.ArrayList; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + /** * Parses shape geometry represented in geojson - * + * <p> * complies with geojson specification: https://tools.ietf.org/html/rfc7946 * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java index e77fa0d66adff..b199da0f3691a 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java @@ -31,7 +31,6 @@ package org.opensearch.common.geo.parsers; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.OpenSearchParseException; import org.opensearch.common.Explicit; import org.opensearch.common.geo.GeoPoint; @@ -55,9 +54,11 @@ import java.io.StringReader; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + /** * Parses shape geometry represented in WKT format - * + * <p> * complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard * located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html * diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/ShapeParser.java index f225234ad6d49..fcd139bc0db8a 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/ShapeParser.java @@ -32,13 +32,13 @@ package org.opensearch.common.geo.parsers; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.index.mapper.AbstractGeometryFieldMapper; import org.opensearch.index.mapper.AbstractShapeGeometryFieldMapper; diff --git a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java index 8ba0bd7ee1be4..e481ffd460798 100644 --- a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java +++ b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java @@ -93,7 +93,7 @@ protected static long fmix(long k) { /** * Compute the hash of the MurmurHash3_x64_128 hashing function. - * + * <p> * Note, this hashing function might be used to persist hashes, so if the way hashes are computed * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). */ diff --git a/server/src/main/java/org/opensearch/common/inject/Initializer.java b/server/src/main/java/org/opensearch/common/inject/Initializer.java index e806eba6df707..b88b01c03c018 100644 --- a/server/src/main/java/org/opensearch/common/inject/Initializer.java +++ b/server/src/main/java/org/opensearch/common/inject/Initializer.java @@ -68,9 +68,8 @@ class Initializer { /** * Registers an instance for member injection when that step is performed. * - * @param instance an instance that optionally has members to be injected (each annotated with - * @param source the source location that this injection was requested - * @Inject). + * @param instance an instance that optionally has members to be injected (each annotated with {@code @Inject}). + * @param source the source location that this injection was requested */ public <T> Initializable<T> requestInjection(InjectorImpl injector, T instance, Object source, Set<InjectionPoint> injectionPoints) { Objects.requireNonNull(source); diff --git a/server/src/main/java/org/opensearch/common/io/DiskIoBufferPool.java b/server/src/main/java/org/opensearch/common/io/DiskIoBufferPool.java index 80b5dd353703c..e853a6ddc34d2 100644 --- a/server/src/main/java/org/opensearch/common/io/DiskIoBufferPool.java +++ b/server/src/main/java/org/opensearch/common/io/DiskIoBufferPool.java @@ -32,7 +32,7 @@ package org.opensearch.common.io; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.threadpool.ThreadPool; import java.nio.ByteBuffer; diff --git a/server/src/main/java/org/opensearch/common/io/Streams.java b/server/src/main/java/org/opensearch/common/io/Streams.java index b7f62fd59c3ca..f0d8f8b8bedbf 100644 --- a/server/src/main/java/org/opensearch/common/io/Streams.java +++ b/server/src/main/java/org/opensearch/common/io/Streams.java @@ -32,9 +32,9 @@ package org.opensearch.common.io; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStream; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamOutput; import java.io.BufferedReader; diff --git a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java index 9907e2225c64e..8089d354a2480 100644 --- a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java +++ b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java @@ -8,8 +8,6 @@ package org.opensearch.common.io; -import java.io.IOException; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -19,6 +17,8 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import java.io.IOException; + /** * Manages versioning and checksum for a stream of content. * @param <T> Type of content to be read/written diff --git a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java index c2b6da3c1756a..bfed1f0883672 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/BytesStreamOutput.java @@ -35,13 +35,14 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.common.util.ByteArray; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.ByteArray; import java.io.IOException; @@ -49,8 +50,9 @@ * A @link {@link StreamOutput} that uses {@link BigArrays} to acquire pages of * bytes, which avoids frequent reallocation & copying of the internal data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BytesStreamOutput extends BytesStream { protected final BigArrays bigArrays; diff --git a/server/src/main/java/org/opensearch/common/io/stream/DelayableWriteable.java b/server/src/main/java/org/opensearch/common/io/stream/DelayableWriteable.java index 200f3e9225834..f345a5d3a804f 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/DelayableWriteable.java +++ b/server/src/main/java/org/opensearch/common/io/stream/DelayableWriteable.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -62,8 +63,9 @@ * to force their buffering in serialized format by calling * {@link #asSerialized(Reader, NamedWriteableRegistry)}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DelayableWriteable<T extends Writeable> implements Writeable { /** * Build a {@linkplain DelayableWriteable} that wraps an existing object @@ -151,8 +153,9 @@ private BytesStreamOutput writeToBuffer(Version version) throws IOException { /** * A {@link Writeable} stored in serialized form. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Serialized<T extends Writeable> extends DelayableWriteable<T> implements Accountable { private final Writeable.Reader<T> reader; private final Version serializedAtVersion; diff --git a/server/src/main/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutput.java b/server/src/main/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutput.java index 752ae353cb394..0fd8640305326 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutput.java +++ b/server/src/main/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutput.java @@ -33,10 +33,10 @@ package org.opensearch.common.io.stream; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; /** * An bytes stream output that allows providing a {@link BigArrays} instance diff --git a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java index 97c19f906c544..f1e5f5f22d527 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java +++ b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java @@ -8,17 +8,10 @@ package org.opensearch.common.io.stream; -import org.joda.time.DateTimeZone; -import org.joda.time.ReadableInstant; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.time.DateUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable.WriteableRegistry; -import org.opensearch.script.JodaCompatibleZonedDateTime; - -import java.time.Instant; -import java.time.ZoneId; /** * This utility class registers generic types for streaming over the wire using @@ -47,25 +40,7 @@ public static void registerStreamables() { * Registers writers by class type */ private static void registerWriters() { - /** {@link ReadableInstant} */ - WriteableRegistry.registerWriter(ReadableInstant.class, (o, v) -> { - o.writeByte((byte) 13); - final ReadableInstant instant = (ReadableInstant) v; - o.writeString(instant.getZone().getID()); - o.writeLong(instant.getMillis()); - }); - WriteableRegistry.registerClassAlias(ReadableInstant.class, ReadableInstant.class); - /** {@link JodaCompatibleZonedDateTime} */ - WriteableRegistry.registerWriter(JodaCompatibleZonedDateTime.class, (o, v) -> { - // write the joda compatibility datetime as joda datetime - o.writeByte((byte) 13); - final JodaCompatibleZonedDateTime zonedDateTime = (JodaCompatibleZonedDateTime) v; - String zoneId = zonedDateTime.getZonedDateTime().getZone().getId(); - // joda does not understand "Z" for utc, so we must special case - o.writeString(zoneId.equals("Z") ? DateTimeZone.UTC.getID() : zoneId); - o.writeLong(zonedDateTime.toInstant().toEpochMilli()); - }); - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerWriter(GeoPoint.class, (o, v) -> { o.writeByte((byte) 22); ((GeoPoint) v).writeTo(o); @@ -78,13 +53,7 @@ private static void registerWriters() { * NOTE: see {@code StreamOutput#WRITERS} for all registered ordinals */ private static void registerReaders() { - /** {@link JodaCompatibleZonedDateTime */ - WriteableRegistry.registerReader(Byte.valueOf((byte) 13), (i) -> { - final ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(DateTimeZone.forID(i.readString())); - long millis = i.readLong(); - return new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), zoneId); - }); - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerReader(Byte.valueOf((byte) 22), GeoPoint::new); } } diff --git a/server/src/main/java/org/opensearch/common/joda/Joda.java b/server/src/main/java/org/opensearch/common/joda/Joda.java index 45700661822be..8b466e01b15c7 100644 --- a/server/src/main/java/org/opensearch/common/joda/Joda.java +++ b/server/src/main/java/org/opensearch/common/joda/Joda.java @@ -32,12 +32,13 @@ package org.opensearch.common.joda; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.FormatNames; import org.opensearch.common.util.LazyInitializable; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.joda.time.Chronology; import org.joda.time.DateTime; import org.joda.time.DateTimeField; @@ -57,7 +58,6 @@ import org.joda.time.format.DateTimePrinter; import org.joda.time.format.ISODateTimeFormat; import org.joda.time.format.StrictISODateTimeFormat; -import org.opensearch.core.common.Strings; import java.io.IOException; import java.io.Writer; diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java index 12d48a0b362ce..bf25e5b1b3923 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java @@ -125,6 +125,11 @@ public String pattern() { return pattern; } + @Override + public String printPattern() { + throw new UnsupportedOperationException("JodaDateFormatter does not have a print pattern"); + } + @Override public Locale locale() { return printer.getLocale(); diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java index 131c9b001f733..ae38e9a6a8073 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java @@ -32,12 +32,12 @@ package org.opensearch.common.joda; -import org.joda.time.DateTimeZone; -import org.joda.time.MutableDateTime; -import org.joda.time.format.DateTimeFormatter; import org.opensearch.OpenSearchParseException; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.time.DateUtils; +import org.joda.time.DateTimeZone; +import org.joda.time.MutableDateTime; +import org.joda.time.format.DateTimeFormatter; import java.time.Instant; import java.time.ZoneId; @@ -46,7 +46,7 @@ /** * A parser for date/time formatted text with optional date math. - * + * <p> * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * <code>||[+-/](\d+)?[yMwdhHms]</code>. diff --git a/server/src/main/java/org/opensearch/common/logging/DeprecatedMessage.java b/server/src/main/java/org/opensearch/common/logging/DeprecatedMessage.java index e5843d9333ab5..25c1ba9675600 100644 --- a/server/src/main/java/org/opensearch/common/logging/DeprecatedMessage.java +++ b/server/src/main/java/org/opensearch/common/logging/DeprecatedMessage.java @@ -32,11 +32,10 @@ package org.opensearch.common.logging; -import java.util.Map; - import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.Strings; +import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; diff --git a/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java index 98b4ebbc330da..d4dbb953ffe12 100644 --- a/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/opensearch/common/logging/DeprecationLogger.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; /** * A logger that logs deprecation notices. Logger should be initialized with a parent logger which name will be used @@ -50,8 +51,9 @@ * key is combined with the <code>X-Opaque-Id</code> request header value, if supplied, which allows for per-client * message limiting. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DeprecationLogger { /** @@ -108,8 +110,9 @@ public DeprecationLoggerBuilder deprecate(final String key, final String msg, fi /** * The builder for the deprecation logger * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public class DeprecationLoggerBuilder { public DeprecationLoggerBuilder withDeprecation(String key, String msg, Object[] params) { diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index f3ac7162e242e..ed324e4e62d8f 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -30,6 +30,7 @@ package org.opensearch.common.logging; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.plugins.Plugin; @@ -46,7 +47,7 @@ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] - * + * <p> * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from * LoggingEvent into a multiline string * diff --git a/server/src/main/java/org/opensearch/common/logging/Loggers.java b/server/src/main/java/org/opensearch/common/logging/Loggers.java index 7b19bd4b4be25..9a88afef90867 100644 --- a/server/src/main/java/org/opensearch/common/logging/Loggers.java +++ b/server/src/main/java/org/opensearch/common/logging/Loggers.java @@ -46,7 +46,7 @@ import java.util.Map; -import static org.opensearch.common.util.CollectionUtils.asArrayList; +import static org.opensearch.core.common.util.CollectionUtils.asArrayList; /** * A set of utilities around Logging. diff --git a/server/src/main/java/org/opensearch/common/logging/NodeNamePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/NodeNamePatternConverter.java index b0ecbb6f9407f..8e61492c57388 100644 --- a/server/src/main/java/org/opensearch/common/logging/NodeNamePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/NodeNamePatternConverter.java @@ -32,8 +32,6 @@ package org.opensearch.common.logging; -import java.util.Arrays; - import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.pattern.ConverterKeys; @@ -41,6 +39,8 @@ import org.apache.logging.log4j.core.pattern.PatternConverter; import org.opensearch.common.SetOnce; +import java.util.Arrays; + /** * Converts {@code %node_name} in log4j patterns into the current node name. * We can't use a system property for this because the node name system diff --git a/server/src/main/java/org/opensearch/index/SlowLogLevel.java b/server/src/main/java/org/opensearch/common/logging/SlowLogLevel.java similarity index 94% rename from server/src/main/java/org/opensearch/index/SlowLogLevel.java rename to server/src/main/java/org/opensearch/common/logging/SlowLogLevel.java index 0a28edd59d491..9f744cceaa14d 100644 --- a/server/src/main/java/org/opensearch/index/SlowLogLevel.java +++ b/server/src/main/java/org/opensearch/common/logging/SlowLogLevel.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.index; +package org.opensearch.common.logging; import java.util.Locale; @@ -54,7 +54,7 @@ public static SlowLogLevel parse(String level) { return valueOf(level.toUpperCase(Locale.ROOT)); } - boolean isLevelEnabledFor(SlowLogLevel levelToBeUsed) { + public boolean isLevelEnabledFor(SlowLogLevel levelToBeUsed) { // example: this.info(2) tries to log with levelToBeUsed.warn(3) - should allow return this.specificity <= levelToBeUsed.specificity; } diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 24cc922f15a30..2c7b6b552b43f 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -84,11 +84,11 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.iterable.Iterables; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; @@ -110,7 +110,7 @@ * @opensearch.internal */ public class Lucene { - public static final String LATEST_CODEC = "Lucene95"; + public static final String LATEST_CODEC = "Lucene99"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java index 0ffd633e5a967..17b75ab22f3ed 100644 --- a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java +++ b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java @@ -55,7 +55,7 @@ * mappings as segments that were not known before are added and prevents the * structure from growing indefinitely by registering close listeners on these * segments so that at any time it only tracks live segments. - * + * <p> * NOTE: This is heavy. Avoid using this class unless absolutely required. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/lucene/index/FreqTermsEnum.java b/server/src/main/java/org/opensearch/common/lucene/index/FreqTermsEnum.java index 906ca6a098c2e..eabdc25f1125a 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/FreqTermsEnum.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/FreqTermsEnum.java @@ -37,12 +37,12 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java index 77609822d3d90..f9a87b9e74214 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java @@ -36,26 +36,33 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.io.IOException; +import java.util.Optional; +import java.util.UUID; /** * A {@link org.apache.lucene.index.FilterDirectoryReader} that exposes * OpenSearch internal per shard / index information like the shard ID. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class OpenSearchDirectoryReader extends FilterDirectoryReader { private final ShardId shardId; private final FilterDirectoryReader.SubReaderWrapper wrapper; + private final DelegatingCacheHelper delegatingCacheHelper; + private OpenSearchDirectoryReader(DirectoryReader in, FilterDirectoryReader.SubReaderWrapper wrapper, ShardId shardId) throws IOException { super(in, wrapper); this.wrapper = wrapper; this.shardId = shardId; + this.delegatingCacheHelper = new DelegatingCacheHelper(in.getReaderCacheHelper()); } /** @@ -68,7 +75,61 @@ public ShardId shardId() { @Override public CacheHelper getReaderCacheHelper() { // safe to delegate since this reader does not alter the index - return in.getReaderCacheHelper(); + return this.delegatingCacheHelper; + } + + public DelegatingCacheHelper getDelegatingCacheHelper() { + return this.delegatingCacheHelper; + } + + /** + * Wraps existing IndexReader cache helper which internally provides a way to wrap CacheKey. + * @opensearch.internal + */ + public class DelegatingCacheHelper implements CacheHelper { + private final CacheHelper cacheHelper; + private final DelegatingCacheKey serializableCacheKey; + + DelegatingCacheHelper(CacheHelper cacheHelper) { + this.cacheHelper = cacheHelper; + this.serializableCacheKey = new DelegatingCacheKey(Optional.ofNullable(cacheHelper).map(key -> getKey()).orElse(null)); + } + + @Override + public CacheKey getKey() { + return this.cacheHelper.getKey(); + } + + public DelegatingCacheKey getDelegatingCacheKey() { + return this.serializableCacheKey; + } + + @Override + public void addClosedListener(ClosedListener listener) { + this.cacheHelper.addClosedListener(listener); + } + } + + /** + * Wraps internal IndexReader.CacheKey and attaches a uniqueId to it which can be eventually be used instead of + * object itself for serialization purposes. + */ + public class DelegatingCacheKey { + private final CacheKey cacheKey; + private final String uniqueId; + + DelegatingCacheKey(CacheKey cacheKey) { + this.cacheKey = cacheKey; + this.uniqueId = UUID.randomUUID().toString(); + } + + public CacheKey getCacheKey() { + return this.cacheKey; + } + + public String getId() { + return uniqueId; + } } @Override diff --git a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java index 0d7a8866f7788..b5c0e84a10308 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/FilteredCollector.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.common.lucene.Lucene; +import org.opensearch.search.profile.query.ProfileWeight; import java.io.IOException; @@ -64,6 +65,9 @@ public Collector getCollector() { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (filter instanceof ProfileWeight) { + ((ProfileWeight) filter).associateCollectorToLeaves(context, collector); + } final ScorerSupplier filterScorerSupplier = filter.scorerSupplier(context); final LeafCollector in = collector.getLeafCollector(context); final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorerSupplier); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MoreLikeThisQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MoreLikeThisQuery.java index b4440b85e0037..ef07f6ea8052c 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MoreLikeThisQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MoreLikeThisQuery.java @@ -36,12 +36,12 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.similarities.ClassicSimilarity; @@ -144,12 +144,12 @@ public boolean equals(Object obj) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } - XMoreLikeThis mlt = new XMoreLikeThis(reader, similarity == null ? new ClassicSimilarity() : similarity); + XMoreLikeThis mlt = new XMoreLikeThis(searcher.getIndexReader(), similarity == null ? new ClassicSimilarity() : similarity); mlt.setFieldNames(moreLikeFields); mlt.setAnalyzer(analyzer); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java index 98ebb34fc040e..cc0468efb243e 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -39,6 +39,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.Query; @@ -159,8 +160,8 @@ public int[] getPositions() { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } @@ -177,7 +178,7 @@ public Query rewrite(IndexReader reader) throws IOException { int position = positions.get(sizeMinus1); Set<Term> terms = new HashSet<>(); for (Term term : suffixTerms) { - getPrefixTerms(terms, term, reader); + getPrefixTerms(terms, term, searcher.getIndexReader()); if (terms.size() > maxExpansions) { break; } diff --git a/server/src/main/java/org/opensearch/common/lucene/search/TopDocsAndMaxScore.java b/server/src/main/java/org/opensearch/common/lucene/search/TopDocsAndMaxScore.java index 5f8c52676b481..5b7bae54a6fad 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/TopDocsAndMaxScore.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/TopDocsAndMaxScore.java @@ -33,13 +33,15 @@ package org.opensearch.common.lucene.search; import org.apache.lucene.search.TopDocs; +import org.opensearch.common.annotation.PublicApi; /** * Wrapper around a {@link TopDocs} instance and the maximum score. * - * @opensearch.internal + * @opensearch.api */ // TODO: Remove this class when https://github.com/elastic/elasticsearch/issues/32981 is addressed. +@PublicApi(since = "1.0.0") public final class TopDocsAndMaxScore { public final TopDocs topDocs; diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/function/FunctionScoreQuery.java index ebd155b8a5679..cb93e80288a98 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/FunctionScoreQuery.java @@ -46,10 +46,10 @@ import org.apache.lucene.util.Bits; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import java.io.IOException; import java.util.ArrayList; @@ -128,7 +128,7 @@ protected int doHashCode() { @Override protected ScoreFunction rewrite(IndexReader reader) throws IOException { - Query newFilter = filter.rewrite(reader); + Query newFilter = filter.rewrite(new IndexSearcher(reader)); if (newFilter == filter) { return this; } @@ -322,16 +322,16 @@ public void visit(QueryVisitor visitor) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query rewritten = super.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query rewritten = super.rewrite(searcher); if (rewritten != this) { return rewritten; } - Query newQ = subQuery.rewrite(reader); + Query newQ = subQuery.rewrite(searcher); ScoreFunction[] newFunctions = new ScoreFunction[functions.length]; boolean needsRewrite = (newQ != subQuery); for (int i = 0; i < functions.length; i++) { - newFunctions[i] = functions[i].rewrite(reader); + newFunctions[i] = functions[i].rewrite(searcher.getIndexReader()); needsRewrite |= (newFunctions[i] != functions[i]); } if (needsRewrite) { diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreFunction.java index 533d74e916c09..38c356a8be4b0 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreFunction.java @@ -35,11 +35,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Scorable; +import org.opensearch.Version; +import org.opensearch.common.Nullable; import org.opensearch.script.ExplainableScoreScript; import org.opensearch.script.ScoreScript; import org.opensearch.script.Script; -import org.opensearch.Version; -import org.opensearch.common.Nullable; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java index 8bf5fc0f89d31..5aff09d715622 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/ScriptScoreQuery.java @@ -32,21 +32,21 @@ package org.opensearch.common.lucene.search.function; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.Version; import org.opensearch.common.Nullable; @@ -105,12 +105,12 @@ public ScriptScoreQuery( } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query newQ = subQuery.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query newQ = subQuery.rewrite(searcher); if (newQ != subQuery) { return new ScriptScoreQuery(newQ, queryName, script, scriptBuilder, minScore, indexName, shardId, indexVersion); } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override @@ -303,6 +303,11 @@ public DocIdSetIterator iterator() { return subQueryScorer.iterator(); } + @Override + public TwoPhaseIterator twoPhaseIterator() { + return subQueryScorer.twoPhaseIterator(); + } + @Override public float getMaxScore(int upTo) { return Float.MAX_VALUE; // TODO: what would be a good upper bound? diff --git a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java index 6eb613daf5133..1804a9ac05a29 100644 --- a/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java +++ b/server/src/main/java/org/opensearch/common/lucene/store/ByteArrayIndexInput.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.BitUtil; import java.io.EOFException; import java.io.IOException; @@ -121,51 +122,29 @@ public void readBytes(final byte[] b, final int offset, int len) throws IOExcept @Override public byte readByte(long pos) throws IOException { validatePos(pos, Byte.BYTES); - return internalReadByte(pos); + return bytes[offset + (int) pos]; } @Override public short readShort(long pos) throws IOException { validatePos(pos, Short.BYTES); - return internalReadShort(pos); + return (short) BitUtil.VH_LE_SHORT.get(bytes, offset + (int) pos); } @Override public int readInt(long pos) throws IOException { validatePos(pos, Integer.BYTES); - return internalReadInt(pos); + return (int) BitUtil.VH_LE_INT.get(bytes, offset + (int) pos); } @Override public long readLong(long pos) throws IOException { validatePos(pos, Long.BYTES); - return internalReadLong(pos); - } - - private byte internalReadByte(long pos) { - return bytes[offset + (int) pos]; - } - - private short internalReadShort(long pos) { - final byte p1 = internalReadByte(pos); - final byte p2 = internalReadByte(pos + 1); - return (short) (((p2 & 0xFF) << 8) | (p1 & 0xFF)); - } - - private int internalReadInt(long pos) { - final short p1 = internalReadShort(pos); - final short p2 = internalReadShort(pos + Short.BYTES); - return ((p2 & 0xFFFF) << 16) | (p1 & 0xFFFF); - } - - public long internalReadLong(long pos) { - final int p1 = internalReadInt(pos); - final int p2 = internalReadInt(pos + Integer.BYTES); - return (((long) p2) << 32) | (p1 & 0xFFFFFFFFL); + return (long) BitUtil.VH_LE_LONG.get(bytes, offset + (int) pos); } private void validatePos(long pos, int len) throws EOFException { - if (pos < 0 || pos + len > length + offset) { + if (pos < 0 || pos + len > length) { throw new EOFException("seek past EOF"); } } diff --git a/server/src/main/java/org/opensearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/opensearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 57524bc3657d9..4958267b1775b 100644 --- a/server/src/main/java/org/opensearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/opensearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.util.CloseableThreadLocal; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.concurrent.ConcurrentCollections; import java.io.IOException; @@ -110,8 +111,9 @@ private VersionsAndSeqNoResolver() {} /** * Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and a version. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DocIdAndVersion { public final int docId; public final long version; diff --git a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java index 5c48c1f772ff0..cb181840406a5 100644 --- a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java @@ -62,4 +62,5 @@ public void dec(long n) { public long count() { return counter.sum(); } + } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 33f12c8cb42d3..359facdce633b 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -79,4 +79,5 @@ public void clear() { counter.reset(); sum.reset(); } + } diff --git a/server/src/main/java/org/opensearch/common/network/CloseableChannel.java b/server/src/main/java/org/opensearch/common/network/CloseableChannel.java index 704b616bcabc9..cc00bf5d1ba65 100644 --- a/server/src/main/java/org/opensearch/common/network/CloseableChannel.java +++ b/server/src/main/java/org/opensearch/common/network/CloseableChannel.java @@ -32,10 +32,10 @@ package org.opensearch.common.network; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import java.io.Closeable; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index d93f8d7c98b32..f97d5b2f80eeb 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -40,23 +40,25 @@ import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -130,7 +132,7 @@ public final class NetworkModule { private final Map<String, Supplier<Transport>> transportFactories = new HashMap<>(); private final Map<String, Supplier<HttpServerTransport>> transportHttpFactories = new HashMap<>(); - private final List<TransportInterceptor> transportIntercetors = new ArrayList<>(); + private final List<TransportInterceptor> transportInterceptors = new ArrayList<>(); /** * Creates a network module that custom networking classes can be plugged into. @@ -147,7 +149,9 @@ public NetworkModule( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer, + List<TransportInterceptor> transportInterceptors ) { this.settings = settings; for (NetworkPlugin plugin : plugins) { @@ -160,7 +164,8 @@ public NetworkModule( xContentRegistry, networkService, dispatcher, - clusterSettings + clusterSettings, + tracer ); for (Map.Entry<String, Supplier<HttpServerTransport>> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); @@ -171,19 +176,24 @@ public NetworkModule( pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, - networkService + networkService, + tracer ); for (Map.Entry<String, Supplier<Transport>> entry : transportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); } - List<TransportInterceptor> transportInterceptors = plugin.getTransportInterceptors( + List<TransportInterceptor> pluginTransportInterceptors = plugin.getTransportInterceptors( namedWriteableRegistry, threadPool.getThreadContext() ); - for (TransportInterceptor interceptor : transportInterceptors) { + for (TransportInterceptor interceptor : pluginTransportInterceptors) { registerTransportInterceptor(interceptor); } } + // Adding last because interceptors are triggered from last to first order from the list + if (transportInterceptors != null) { + transportInterceptors.forEach(this::registerTransportInterceptor); + } } /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ @@ -260,7 +270,7 @@ public Supplier<Transport> getTransportSupplier() { * Registers a new {@link TransportInterceptor} */ private void registerTransportInterceptor(TransportInterceptor interceptor) { - this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); + this.transportInterceptors.add(Objects.requireNonNull(interceptor, "interceptor must not be null")); } /** @@ -268,7 +278,7 @@ private void registerTransportInterceptor(TransportInterceptor interceptor) { * @see #registerTransportInterceptor(TransportInterceptor) */ public TransportInterceptor getTransportInterceptor() { - return new CompositeTransportInterceptor(this.transportIntercetors); + return new CompositeTransportInterceptor(this.transportInterceptors); } static final class CompositeTransportInterceptor implements TransportInterceptor { @@ -291,6 +301,30 @@ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( return actualHandler; } + /** + * Intercept the transport action and perform admission control if applicable + * @param action The action the request handler is associated with + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param actualHandler The handler itself that implements the request handling + * @param admissionControlActionType Admission control based on resource usage limits of provided action type + * @return returns the actual TransportRequestHandler after intercepting all previous handlers + * @param <T> transport request type + */ + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler, + AdmissionControlActionType admissionControlActionType + ) { + for (TransportInterceptor interceptor : this.transportInterceptors) { + actualHandler = interceptor.interceptHandler(action, executor, forceExecution, actualHandler, admissionControlActionType); + } + return actualHandler; + } + @Override public AsyncSender interceptSender(AsyncSender sender) { for (TransportInterceptor interceptor : this.transportInterceptors) { diff --git a/server/src/main/java/org/opensearch/common/network/NetworkService.java b/server/src/main/java/org/opensearch/common/network/NetworkService.java index 0fb299ef66e70..deec184f702bf 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkService.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkService.java @@ -34,8 +34,8 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import java.io.IOException; import java.net.InetAddress; diff --git a/server/src/main/java/org/opensearch/common/path/PathTrie.java b/server/src/main/java/org/opensearch/common/path/PathTrie.java index 7cb7b46acfafe..0b516fa037c48 100644 --- a/server/src/main/java/org/opensearch/common/path/PathTrie.java +++ b/server/src/main/java/org/opensearch/common/path/PathTrie.java @@ -37,6 +37,7 @@ import java.util.Iterator; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Stack; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -405,4 +406,45 @@ public T next() { } }; } + + public Iterator<T> retrieveAll() { + Stack<TrieNode> stack = new Stack<>(); + stack.add(root); + + return new Iterator<T>() { + @Override + public boolean hasNext() { + while (!stack.empty()) { + TrieNode node = stack.peek(); + + if (node.value != null) { + return true; + } + + advance(); + } + + return false; + } + + @Override + public T next() { + while (!stack.empty()) { + TrieNode node = advance(); + + if (node.value != null) { + return node.value; + } + } + + throw new NoSuchElementException("called next() without validating hasNext()! no more nodes available"); + } + + private TrieNode advance() { + TrieNode node = stack.pop(); + stack.addAll(node.children.values()); + return node; + } + }; + } } diff --git a/server/src/main/java/org/opensearch/common/regex/Regex.java b/server/src/main/java/org/opensearch/common/regex/Regex.java index 14716fdff6d2e..323b460af62df 100644 --- a/server/src/main/java/org/opensearch/common/regex/Regex.java +++ b/server/src/main/java/org/opensearch/common/regex/Regex.java @@ -122,42 +122,42 @@ public static boolean simpleMatch(String pattern, String str, boolean caseInsens return false; } if (caseInsensitive) { - pattern = org.opensearch.common.Strings.toLowercaseAscii(pattern); - str = org.opensearch.common.Strings.toLowercaseAscii(str); + pattern = Strings.toLowercaseAscii(pattern); + str = Strings.toLowercaseAscii(str); } return simpleMatchWithNormalizedStrings(pattern, str); } private static boolean simpleMatchWithNormalizedStrings(String pattern, String str) { - final int firstIndex = pattern.indexOf('*'); - if (firstIndex == -1) { - return pattern.equals(str); - } - if (firstIndex == 0) { - if (pattern.length() == 1) { - return true; - } - final int nextIndex = pattern.indexOf('*', firstIndex + 1); - if (nextIndex == -1) { - // str.endsWith(pattern.substring(1)), but avoiding the construction of pattern.substring(1): - return str.regionMatches(str.length() - pattern.length() + 1, pattern, 1, pattern.length() - 1); - } else if (nextIndex == 1) { - // Double wildcard "**" - skipping the first "*" - return simpleMatchWithNormalizedStrings(pattern.substring(1), str); - } - final String part = pattern.substring(1, nextIndex); - int partIndex = str.indexOf(part); - while (partIndex != -1) { - if (simpleMatchWithNormalizedStrings(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { - return true; - } - partIndex = str.indexOf(part, partIndex + 1); + int sIdx = 0, pIdx = 0, match = 0, wildcardIdx = -1; + while (sIdx < str.length()) { + // both chars matching, incrementing both pointers + if (pIdx < pattern.length() && str.charAt(sIdx) == pattern.charAt(pIdx)) { + sIdx++; + pIdx++; + } else if (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + // wildcard found, only incrementing pattern pointer + wildcardIdx = pIdx; + match = sIdx; + pIdx++; + } else if (wildcardIdx != -1) { + // last pattern pointer was a wildcard, incrementing string pointer + pIdx = wildcardIdx + 1; + match++; + sIdx = match; + } else { + // current pattern pointer is not a wildcard, last pattern pointer was also not a wildcard + // characters do not match + return false; } - return false; } - return str.regionMatches(0, pattern, 0, firstIndex) - && (firstIndex == pattern.length() - 1 // only wildcard in pattern is at the end, so no need to look at the rest of the string - || simpleMatchWithNormalizedStrings(pattern.substring(firstIndex), str.substring(firstIndex))); + + // check for remaining characters in pattern + while (pIdx < pattern.length() && pattern.charAt(pIdx) == '*') { + pIdx++; + } + + return pIdx == pattern.length(); } /** diff --git a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java b/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java deleted file mode 100644 index 47e182b3caf84..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.common.joda.Joda; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; - -import java.util.function.Function; - -/** - * Main date time unit class. - * - * @opensearch.internal - */ -public enum DateTimeUnit { - - WEEK_OF_WEEKYEAR((byte) 1, tz -> ISOChronology.getInstance(tz).weekOfWeekyear()), - YEAR_OF_CENTURY((byte) 2, tz -> ISOChronology.getInstance(tz).yearOfCentury()), - QUARTER((byte) 3, tz -> Joda.QuarterOfYear.getField(ISOChronology.getInstance(tz))), - MONTH_OF_YEAR((byte) 4, tz -> ISOChronology.getInstance(tz).monthOfYear()), - DAY_OF_MONTH((byte) 5, tz -> ISOChronology.getInstance(tz).dayOfMonth()), - HOUR_OF_DAY((byte) 6, tz -> ISOChronology.getInstance(tz).hourOfDay()), - MINUTES_OF_HOUR((byte) 7, tz -> ISOChronology.getInstance(tz).minuteOfHour()), - SECOND_OF_MINUTE((byte) 8, tz -> ISOChronology.getInstance(tz).secondOfMinute()); - - private final byte id; - private final Function<DateTimeZone, DateTimeField> fieldFunction; - - DateTimeUnit(byte id, Function<DateTimeZone, DateTimeField> fieldFunction) { - this.id = id; - this.fieldFunction = fieldFunction; - } - - public byte id() { - return id; - } - - /** - * @return the {@link DateTimeField} for the provided {@link DateTimeZone} for this time unit - */ - public DateTimeField field(DateTimeZone tz) { - return fieldFunction.apply(tz); - } - - public static DateTimeUnit resolve(byte id) { - switch (id) { - case 1: - return WEEK_OF_WEEKYEAR; - case 2: - return YEAR_OF_CENTURY; - case 3: - return QUARTER; - case 4: - return MONTH_OF_YEAR; - case 5: - return DAY_OF_MONTH; - case 6: - return HOUR_OF_DAY; - case 7: - return MINUTES_OF_HOUR; - case 8: - return SECOND_OF_MINUTE; - default: - throw new OpenSearchException("Unknown date time unit id [" + id + "]"); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/rounding/Rounding.java b/server/src/main/java/org/opensearch/common/rounding/Rounding.java deleted file mode 100644 index 9af64b8320453..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/Rounding.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.IllegalInstantException; - -import java.io.IOException; -import java.util.Objects; - -/** - * A strategy for rounding long values. - * - * Use the java based Rounding class where applicable - * - * @opensearch.internal - */ -@Deprecated -public abstract class Rounding implements Writeable { - - public abstract byte id(); - - /** - * Rounds the given value. - */ - public abstract long round(long value); - - /** - * Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with - * interval based rounding, if the interval is 3, {@code nextRoundValue(6) = 9 }. - * - * @param value The current rounding value - * @return The next rounding value; - */ - public abstract long nextRoundingValue(long value); - - @Override - public abstract boolean equals(Object obj); - - @Override - public abstract int hashCode(); - - public static Builder builder(DateTimeUnit unit) { - return new Builder(unit); - } - - public static Builder builder(TimeValue interval) { - return new Builder(interval); - } - - /** - * Builder for rounding - * - * @opensearch.internal - */ - public static class Builder { - - private final DateTimeUnit unit; - private final long interval; - - private DateTimeZone timeZone = DateTimeZone.UTC; - - public Builder(DateTimeUnit unit) { - this.unit = unit; - this.interval = -1; - } - - public Builder(TimeValue interval) { - this.unit = null; - if (interval.millis() < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval.millis(); - } - - public Builder timeZone(DateTimeZone timeZone) { - if (timeZone == null) { - throw new IllegalArgumentException("Setting null as timezone is not supported"); - } - this.timeZone = timeZone; - return this; - } - - public Rounding build() { - Rounding timeZoneRounding; - if (unit != null) { - timeZoneRounding = new TimeUnitRounding(unit, timeZone); - } else { - timeZoneRounding = new TimeIntervalRounding(interval, timeZone); - } - return timeZoneRounding; - } - } - - /** - * Rounding time units - * - * @opensearch.internal - */ - static class TimeUnitRounding extends Rounding { - - static final byte ID = 1; - - private final DateTimeUnit unit; - private final DateTimeField field; - private final DateTimeZone timeZone; - private final boolean unitRoundsToMidnight; - - TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { - this.unit = unit; - this.field = unit.field(timeZone); - unitRoundsToMidnight = this.field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - this.timeZone = timeZone; - } - - TimeUnitRounding(StreamInput in) throws IOException { - unit = DateTimeUnit.resolve(in.readByte()); - timeZone = DateTimeZone.forID(in.readString()); - field = unit.field(timeZone); - unitRoundsToMidnight = field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - } - - @Override - public byte id() { - return ID; - } - - /** - * @return The latest timestamp T which is strictly before utcMillis - * and such that timeZone.getOffset(T) != timeZone.getOffset(utcMillis). - * If there is no such T, returns Long.MAX_VALUE. - */ - private long previousTransition(long utcMillis) { - final int offsetAtInputTime = timeZone.getOffset(utcMillis); - do { - // Some timezones have transitions that do not change the offset, so we have to - // repeatedly call previousTransition until a nontrivial transition is found. - - long previousTransition = timeZone.previousTransition(utcMillis); - if (previousTransition == utcMillis) { - // There are no earlier transitions - return Long.MAX_VALUE; - } - assert previousTransition < utcMillis; // Progress was made - utcMillis = previousTransition; - } while (timeZone.getOffset(utcMillis) == offsetAtInputTime); - - return utcMillis; - } - - @Override - public long round(long utcMillis) { - - // field.roundFloor() works as long as the offset doesn't change. It is worth getting this case out of the way first, as - // the calculations for fixing things near to offset changes are a little expensive and are unnecessary in the common case - // of working in UTC. - if (timeZone.isFixed()) { - return field.roundFloor(utcMillis); - } - - // When rounding to hours we consider any local time of the form 'xx:00:00' as rounded, even though this gives duplicate - // bucket names for the times when the clocks go back. Shorter units behave similarly. However, longer units round down to - // midnight, and on the days where there are two midnights we would rather pick the earlier one, so that buckets are - // uniquely identified by the date. - if (unitRoundsToMidnight) { - final long anyLocalStartOfDay = field.roundFloor(utcMillis); - // `anyLocalStartOfDay` is _supposed_ to be the Unix timestamp for the start of the day in question in the current time - // zone. Mostly this just means "midnight", which is fine, and on days with no local midnight it's the first time that - // does occur on that day which is also ok. However, on days with >1 local midnight this is _one_ of the midnights, but - // may not be the first. Check whether this is happening, and fix it if so. - - final long previousTransition = previousTransition(anyLocalStartOfDay); - - if (previousTransition == Long.MAX_VALUE) { - // No previous transitions, so there can't be another earlier local midnight. - return anyLocalStartOfDay; - } - - final long currentOffset = timeZone.getOffset(anyLocalStartOfDay); - final long previousOffset = timeZone.getOffset(previousTransition); - assert currentOffset != previousOffset; - - // NB we only assume interference from one previous transition. It's theoretically possible to have two transitions in - // quick succession, both of which have a midnight in them, but this doesn't appear to happen in the TZDB so (a) it's - // pointless to implement and (b) it won't be tested. I recognise that this comment is tempting fate and will likely - // cause this very situation to occur in the near future, and eagerly look forward to fixing this using a loop over - // previous transitions when it happens. - - final long alsoLocalStartOfDay = anyLocalStartOfDay + currentOffset - previousOffset; - // `alsoLocalStartOfDay` is the Unix timestamp for the start of the day in question if the previous offset were in - // effect. - - if (alsoLocalStartOfDay <= previousTransition) { - // Therefore the previous offset _is_ in effect at `alsoLocalStartOfDay`, and it's earlier than anyLocalStartOfDay, - // so this is the answer to use. - return alsoLocalStartOfDay; - } else { - // The previous offset is not in effect at `alsoLocalStartOfDay`, so the current offset must be. - return anyLocalStartOfDay; - } - - } else { - do { - long rounded = field.roundFloor(utcMillis); - - // field.roundFloor() mostly works as long as the offset hasn't changed in [rounded, utcMillis], so look at where - // the offset most recently changed. - - final long previousTransition = previousTransition(utcMillis); - - if (previousTransition == Long.MAX_VALUE || previousTransition < rounded) { - // The offset did not change in [rounded, utcMillis], so roundFloor() worked as expected. - return rounded; - } - - // The offset _did_ change in [rounded, utcMillis]. Put differently, this means that none of the times in - // [previousTransition+1, utcMillis] were rounded, so the rounded time must be <= previousTransition. This means - // it's sufficient to try and round previousTransition down. - assert previousTransition < utcMillis; - utcMillis = previousTransition; - } while (true); - } - } - - @Override - public long nextRoundingValue(long utcMillis) { - long floor = round(utcMillis); - // add one unit and round to get to next rounded value - long next = round(field.add(floor, 1)); - if (next == floor) { - // in rare case we need to add more than one unit - next = round(field.add(floor, 2)); - } - return next; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(unit.id()); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(unit, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeUnitRounding other = (TimeUnitRounding) obj; - return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone); - } - - @Override - public String toString() { - return "[" + timeZone + "][" + unit + "]"; - } - } - - /** - * Rounding time intervals - * - * @opensearch.internal - */ - static class TimeIntervalRounding extends Rounding { - - static final byte ID = 2; - - private final long interval; - private final DateTimeZone timeZone; - - TimeIntervalRounding(long interval, DateTimeZone timeZone) { - if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval; - this.timeZone = timeZone; - } - - TimeIntervalRounding(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateTimeZone.forID(in.readString()); - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = roundKey(timeLocal, interval) * interval; - long roundedUTC; - if (isInDSTGap(rounded) == false) { - roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); - // check if we crossed DST transition, in this case we want the - // last rounded value before the transition - long transition = timeZone.previousTransition(utcMillis); - if (transition != utcMillis && transition > roundedUTC) { - roundedUTC = round(transition - 1); - } - } else { - /* - * Edge case where the rounded local time is illegal and landed - * in a DST gap. In this case, we choose 1ms tick after the - * transition date. We don't want the transition date itself - * because those dates, when rounded themselves, fall into the - * previous interval. This would violate the invariant that the - * rounding operation should be idempotent. - */ - roundedUTC = timeZone.previousTransition(utcMillis) + 1; - } - return roundedUTC; - } - - private static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - /** - * Determine whether the local instant is a valid instant in the given - * time zone. The logic for this is taken from - * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the - * `strict` mode case, but instead of throwing an - * {@link IllegalInstantException}, which is costly, we want to return a - * flag indicating that the value is illegal in that time zone. - */ - private boolean isInDSTGap(long instantLocal) { - if (timeZone.isFixed()) { - return false; - } - // get the offset at instantLocal (first estimate) - int offsetLocal = timeZone.getOffset(instantLocal); - // adjust instantLocal using the estimate and recalc the offset - int offset = timeZone.getOffset(instantLocal - offsetLocal); - // if the offsets differ, we must be near a DST boundary - if (offsetLocal != offset) { - // determine if we are in the DST gap - long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); - if (nextLocal == (instantLocal - offsetLocal)) { - nextLocal = Long.MAX_VALUE; - } - long nextAdjusted = timeZone.nextTransition(instantLocal - offset); - if (nextAdjusted == (instantLocal - offset)) { - nextAdjusted = Long.MAX_VALUE; - } - if (nextLocal != nextAdjusted) { - // we are in the DST gap - return true; - } - } - return false; - } - - @Override - public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, false); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(interval, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeIntervalRounding other = (TimeIntervalRounding) obj; - return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone); - } - } - - /** - * Rounding streams - * - * @opensearch.internal - */ - public static class Streams { - - public static void write(Rounding rounding, StreamOutput out) throws IOException { - out.writeByte(rounding.id()); - rounding.writeTo(out); - } - - public static Rounding read(StreamInput in) throws IOException { - Rounding rounding; - byte id = in.readByte(); - switch (id) { - case TimeUnitRounding.ID: - rounding = new TimeUnitRounding(in); - break; - case TimeIntervalRounding.ID: - rounding = new TimeIntervalRounding(in); - break; - default: - throw new OpenSearchException("unknown rounding id [" + id + "]"); - } - return rounding; - } - - } - -} diff --git a/server/src/main/java/org/opensearch/common/rounding/package-info.java b/server/src/main/java/org/opensearch/common/rounding/package-info.java deleted file mode 100644 index 5fa3e39c6a786..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Base DateTime rounding package. */ -package org.opensearch.common.rounding; diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 8b7a2a82e5cb1..117ed66fcb451 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -254,7 +254,7 @@ public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consu /** * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. - * + * <p> * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer<Settings> consumer, List<? extends Setting<?>> settings) { @@ -265,7 +265,7 @@ public synchronized void addSettingsUpdateConsumer(Consumer<Settings> consumer, * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. The validator is run across all specified settings before the settings are * applied. - * + * <p> * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer( diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 46a43842451d9..de7b25b12739a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -32,30 +32,11 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.LogManager; -import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; -import org.opensearch.action.search.CreatePitController; -import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.index.IndexModule; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.IndexingPressure; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureSettings; -import org.opensearch.index.SegmentReplicationPressureService; -import org.opensearch.index.ShardIndexingPressureMemoryManager; -import org.opensearch.index.ShardIndexingPressureSettings; -import org.opensearch.index.ShardIndexingPressureStore; -import org.opensearch.search.backpressure.settings.NodeDuressSettings; -import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; -import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; -import org.opensearch.search.backpressure.settings.SearchTaskSettings; -import org.opensearch.tasks.TaskCancellationMonitoringSettings; -import org.opensearch.tasks.TaskManager; -import org.opensearch.tasks.TaskResourceTrackingService; -import org.opensearch.tasks.consumer.TopNSearchTasksLogger; -import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; +import org.opensearch.action.search.CreatePitController; +import org.opensearch.action.search.SearchRequestSlowLog; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.DestructiveOperations; @@ -81,6 +62,7 @@ import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.OperationRouting; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; @@ -90,17 +72,22 @@ import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.cluster.service.ClusterApplierService; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ThreadContext; @@ -114,7 +101,18 @@ import org.opensearch.gateway.DanglingIndicesState; import org.opensearch.gateway.GatewayService; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.http.HttpTransportSettings; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.IndexingPressure; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.ShardIndexingPressureMemoryManager; +import org.opensearch.index.ShardIndexingPressureSettings; +import org.opensearch.index.ShardIndexingPressureStore; +import org.opensearch.index.remote.RemoteStorePressureSettings; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; @@ -135,24 +133,39 @@ import org.opensearch.node.Node; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; import org.opensearch.search.SearchModule; import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.search.backpressure.settings.NodeDuressSettings; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; +import org.opensearch.search.backpressure.settings.SearchTaskSettings; import org.opensearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.opensearch.snapshots.InternalSnapshotsInfoService; import org.opensearch.snapshots.SnapshotsService; +import org.opensearch.tasks.TaskCancellationMonitoringSettings; +import org.opensearch.tasks.TaskManager; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.tasks.consumer.TopNSearchTasksLogger; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ProxyConnectionStrategy; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.RemoteConnectionStrategy; import org.opensearch.transport.SniffConnectionStrategy; import org.opensearch.transport.TransportSettings; +import org.opensearch.watcher.ResourceWatcherService; import java.util.Arrays; import java.util.Collections; @@ -165,8 +178,9 @@ /** * Encapsulates all valid cluster level settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(final Settings nodeSettings, final Set<Setting<?>> settingsSet) { @@ -239,6 +253,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, + BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, @@ -258,6 +273,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING, + IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, + IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING, IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING, IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING, IndicesService.CLUSTER_REPLICATION_TYPE_SETTING, @@ -278,6 +295,9 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, + RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -335,6 +355,7 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT, + HttpTransportSettings.SETTING_HTTP_CONNECT_TIMEOUT, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, HttpTransportSettings.OLD_SETTING_HTTP_TCP_NO_DELAY, HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY, @@ -364,6 +385,9 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, + TransportSearchAction.SEARCH_QUERY_METRICS_ENABLED_SETTING, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, + SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, @@ -447,6 +471,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, @@ -638,24 +663,60 @@ public void apply(Settings value, Settings current, Settings previous) { SearchBackpressureSettings.SETTING_CANCELLATION_BURST, // deprecated SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED, SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS, - SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING, + SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING, + SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS, + // Settings related to resource trackers + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING, + // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, + FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, // Settings related to Remote Refresh Segment Pressure - RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, - RemoteRefreshSegmentPressureSettings.BYTES_LAG_VARIANCE_FACTOR, - RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR, - RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT, - RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, + RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, + RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR, + RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR, + RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT, + + // Settings related to Remote Store stats + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE, // Related to monitoring of task cancellation TaskCancellationMonitoringSettings.IS_ENABLED_SETTING, - TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING + TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING, + + // Search request slow log settings + SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING, + SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING, + SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING, + SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING, + SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL, + + // Remote cluster state settings + RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, + IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, + IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, + + // Concurrent segment search settings + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ) ) ); @@ -668,15 +729,15 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map<List<String>, List<Setting>> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.REMOTE_STORE), + List.of(FeatureFlags.TELEMETRY), List.of( - IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING + TelemetrySettings.TRACER_ENABLED_SETTING, + TelemetrySettings.TRACER_SAMPLER_PROBABILITY, + TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, + TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, + TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING ), - List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), - List.of(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING), - List.of(FeatureFlags.TELEMETRY), - List.of(TelemetrySettings.TRACER_ENABLED_SETTING) + List.of(FeatureFlags.PLUGGABLE_CACHE), + List.of(CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE)) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index 76f5721c85efd..ecd38810e8636 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -46,6 +46,7 @@ import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; + import java.nio.charset.StandardCharsets; import java.security.NoSuchAlgorithmException; import java.security.spec.InvalidKeySpecException; diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index ed01347b115ea..4cf7f22c014dd 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -8,13 +8,11 @@ package org.opensearch.common.settings; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.util.FeatureFlags; +import java.util.Set; + /** * Encapsulates all valid feature flag level settings. * @@ -31,16 +29,14 @@ protected FeatureFlagSettings( super(settings, settingsSet, settingUpgraders, scope); } - public static final Set<Setting<?>> BUILT_IN_FEATURE_FLAGS = Collections.unmodifiableSet( - new HashSet<>( - Arrays.asList( - FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, - FeatureFlags.REMOTE_STORE_SETTING, - FeatureFlags.EXTENSIONS_SETTING, - FeatureFlags.IDENTITY_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, - FeatureFlags.TELEMETRY_SETTING - ) - ) + public static final Set<Setting<?>> BUILT_IN_FEATURE_FLAGS = Set.of( + FeatureFlags.EXTENSIONS_SETTING, + FeatureFlags.IDENTITY_SETTING, + FeatureFlags.TELEMETRY_SETTING, + FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, + FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING, + FeatureFlags.DOC_ID_FUZZY_SET_SETTING, + FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + FeatureFlags.PLUGGABLE_CACHE_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 3cc7c351fe1bf..49bb3abf1decd 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -38,16 +38,18 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -70,12 +72,15 @@ * Encapsulates all valid index level settings. * @see Property#IndexScope * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexScopedSettings extends AbstractScopedSettings { public static final Predicate<String> INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetadata.INDEX_SETTING_PREFIX); + public static final Predicate<String> ARCHIVED_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(ARCHIVED_SETTINGS_PREFIX); + public static final Set<Setting<?>> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( @@ -118,14 +123,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -144,6 +149,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING, IndexSettings.MAX_ANALYZED_OFFSET_SETTING, IndexSettings.MAX_TERMS_COUNT_SETTING, + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING, IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING, IndexSettings.DEFAULT_FIELD_SETTING, IndexSettings.QUERY_STRING_LENIENT_SETTING, @@ -169,6 +175,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING, IndexSettings.INDEX_SEARCH_IDLE_AFTER, IndexSettings.INDEX_SEARCH_THROTTLED, + IndexSettings.INDEX_UNREFERENCED_FILE_CLEANUP, IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY, FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, @@ -183,6 +190,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS, + IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS, IndexModule.INDEX_RECOVERY_TYPE_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING, @@ -198,6 +206,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -208,6 +223,18 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, + + // Settings for remote store enablement + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, + + IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, + IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + + // Settings for concurrent segment search + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { @@ -231,16 +258,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * is ready for production release, the feature flag can be removed, and the * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ - public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.REMOTE_STORE, - List.of( - IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING - ), - FeatureFlags.CONCURRENT_SEGMENT_SEARCH, - List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); + public static final Map<String, List<Setting>> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of(); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); @@ -272,7 +290,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index 257c4c28f0045..1ad3b7ab8875a 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -56,6 +56,7 @@ import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -87,7 +88,7 @@ /** * A disk based container for sensitive settings in OpenSearch. - * + * <p> * Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call * {@link #decrypt(char[])} with the keystore password, or an empty char array if * {@link #hasPassword()} is {@code false}. Loading and decrypting should happen @@ -146,7 +147,7 @@ private static class Entry { /** * The number of bits for the cipher key. - * + * <p> * Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits. * This can be increased to 256 bits once minimum java 9 is the minimum java version. * See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce @@ -233,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { /** * Loads information about the OpenSearch keystore from the provided config directory. - * + * <p> * {@link #decrypt(char[])} must be called before reading or writing any entries. * Returns {@code null} if no keystore exists. */ @@ -357,7 +358,7 @@ private Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv) /** * Decrypts the underlying keystore data. - * + * <p> * This may only be called once. */ public void decrypt(char[] password) throws GeneralSecurityException, IOException { diff --git a/server/src/main/java/org/opensearch/common/settings/PropertyPlaceholder.java b/server/src/main/java/org/opensearch/common/settings/PropertyPlaceholder.java index 76b5642949e03..655ffc461cd91 100644 --- a/server/src/main/java/org/opensearch/common/settings/PropertyPlaceholder.java +++ b/server/src/main/java/org/opensearch/common/settings/PropertyPlaceholder.java @@ -32,7 +32,7 @@ package org.opensearch.common.settings; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import java.util.HashSet; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java index 139c6f651a6f1..1855270b016b3 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java @@ -32,21 +32,20 @@ package org.opensearch.common.settings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.opensearch.common.Booleans; import org.opensearch.common.util.ArrayUtils; +import org.opensearch.core.common.settings.SecureString; import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.EnumSet; import java.util.Set; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.core.common.settings.SecureString; - /** * A secure setting. - * + * <p> * This class allows access to settings from the OpenSearch keystore. * * @opensearch.internal @@ -153,7 +152,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett /** * A setting which contains a sensitive string. - * + * <p> * This may be any sensitive string, e.g. a username, a password, an auth token, etc. */ public static Setting<SecureString> secureString(String name, Setting<SecureString> fallback, Property... properties) { @@ -180,7 +179,7 @@ public static Setting<SecureString> insecureString(String name, String secureNam /** * A setting which contains a file. Reading the setting opens an input stream to the file. - * + * <p> * This may be any sensitive file, e.g. a set of credentials normally in plaintext. */ public static Setting<InputStream> secureFile(String name, Setting<InputStream> fallback, Property... properties) { diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSettings.java b/server/src/main/java/org/opensearch/common/settings/SecureSettings.java index 2fe7d4834c92a..3732478243dab 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSettings.java @@ -32,6 +32,7 @@ package org.opensearch.common.settings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.settings.SecureString; import java.io.Closeable; @@ -43,8 +44,9 @@ /** * An accessor for settings which are securely stored. See {@link SecureSetting}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SecureSettings extends Closeable { /** Returns true iff the settings are loaded and retrievable. */ diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 3bf2988e88e5a..fea4c165809ba 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -38,22 +38,24 @@ import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.regex.Regex; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.MemorySizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; @@ -101,15 +103,17 @@ * } * </pre> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Setting<T> implements ToXContentObject { /** * Property of the setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Property { /** * should be filtered in some api (mask password/credentials) @@ -602,7 +606,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } /** @@ -634,8 +638,9 @@ public Setting<T> getConcreteSetting(String key) { * Allows a setting to declare a dependency on another setting being set. Optionally, a setting can validate the value of the dependent * setting. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface SettingDependency { /** @@ -783,8 +788,9 @@ public String toString() { /** * Allows an affix setting to declare a dependency on another affix setting. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface AffixSettingDependency extends SettingDependency { @Override @@ -795,8 +801,9 @@ public interface AffixSettingDependency extends SettingDependency { /** * An affix setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AffixSetting<T> extends Setting<T> { private final AffixKey key; private final BiFunction<String, String, Setting<T>> delegateFactory; @@ -971,6 +978,9 @@ private Setting<T> getConcreteSetting(String namespace, String key) { * Get a setting with the given namespace filled in for prefix and suffix. */ public Setting<T> getConcreteSettingForNamespace(String namespace) { + if (namespace == null) { + throw new IllegalArgumentException("Namespace should not be null"); + } String fullKey = key.toConcreteKey(namespace).toString(); return getConcreteSetting(namespace, fullKey); } @@ -1025,9 +1035,10 @@ public Map<String, T> getAsMap(Settings settings) { * * @param <T> the type of the {@link Setting} * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface Validator<T> { /** @@ -1104,7 +1115,7 @@ public String innerGetRaw(final Settings settings) { builder.startObject(); subSettings.toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { throw new RuntimeException(e); } @@ -2047,7 +2058,7 @@ static boolean parseBoolean(String b, String key, boolean isFiltered) { } public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) { - return byteSizeSetting(key, (s) -> value.toString(), properties); + return byteSizeSetting(key, (s) -> value.getBytes() + ByteSizeUnit.BYTES.getSuffix(), properties); } public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSetting, Property... properties) { @@ -2337,7 +2348,7 @@ public static <T> Setting<List<T>> listSetting( private static List<String> parseableStringToList(String parsableString) { // fromXContent doesn't use named xcontent or deprecation. try ( - XContentParser xContentParser = XContentType.JSON.xContent() + XContentParser xContentParser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, parsableString) ) { XContentParser.Token token = xContentParser.nextToken(); @@ -2359,13 +2370,13 @@ private static List<String> parseableStringToList(String parsableString) { private static String arrayToParsableString(List<String> array) { try { - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startArray(); for (String element : array) { builder.value(element); } builder.endArray(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException ex) { throw new OpenSearchException(ex); } @@ -2796,6 +2807,12 @@ public static <T> AffixSetting<T> prefixKeySetting(String prefix, Function<Strin return affixKeySetting(new AffixKey(prefix), delegateFactoryWithNamespace); } + public static <T> AffixSetting<T> suffixKeySetting(String suffix, Function<String, Setting<T>> delegateFactory) { + BiFunction<String, String, Setting<T>> delegateFactoryWithNamespace = (ns, k) -> delegateFactory.apply(k); + AffixKey affixKey = new AffixKey(null, suffix); + return affixKeySetting(affixKey, delegateFactoryWithNamespace); + } + /** * This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, affix key settings don't support updaters @@ -2833,8 +2850,9 @@ private static <T> AffixSetting<T> affixKeySetting( /** * Key for the setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Key { boolean match(String key); } @@ -2842,8 +2860,9 @@ public interface Key { /** * A simple key for a setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class SimpleKey implements Key { protected final String key; @@ -2917,8 +2936,9 @@ public boolean match(String toTest) { * A key that allows for static pre and suffix. This is used for settings * that have dynamic namespaces like for different accounts etc. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class AffixKey implements Key { private final Pattern pattern; private final String prefix; @@ -2932,12 +2952,14 @@ public static final class AffixKey implements Key { assert prefix != null || suffix != null : "Either prefix or suffix must be non-null"; this.prefix = prefix; - if (prefix.endsWith(".") == false) { + if (prefix != null && prefix.endsWith(".") == false) { throw new IllegalArgumentException("prefix must end with a '.'"); } this.suffix = suffix; if (suffix == null) { pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); + } else if (prefix == null) { + pattern = Pattern.compile("((?:[-\\w]+[.])*[-\\w]+\\." + Pattern.quote(suffix) + ")"); } else { // the last part of this regexp is to support both list and group keys pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\..*)?"); diff --git a/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java index 1dabf020d8398..dac0b9b867768 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java @@ -32,6 +32,8 @@ package org.opensearch.common.settings; +import org.opensearch.common.annotation.PublicApi; + import java.util.List; /** @@ -39,8 +41,9 @@ * * @param <T> the type of the underlying setting * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SettingUpgrader<T> { /** diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index a7649a1cd22c5..9da47ff3aa700 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -38,27 +38,28 @@ import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.logging.LogConfigurator; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.MemorySizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.settings.SecureString; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.xcontent.XContentParserUtils; import java.io.IOException; import java.io.InputStream; @@ -79,6 +80,7 @@ import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; @@ -88,17 +90,19 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.common.unit.ByteSizeValue.parseBytesSizeValue; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.common.unit.TimeValue.parseTimeValue; +import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; /** * An immutable settings implementation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map<String, Object> settings; @@ -426,7 +430,7 @@ public List<String> getAsList(String key, List<String> defaultValue, Boolean com if (valueFromPrefix instanceof List) { return Collections.unmodifiableList((List<String>) valueFromPrefix); } else if (commaDelimited) { - String[] strings = org.opensearch.core.common.Strings.splitStringByCommaToArray(get(key)); + String[] strings = Strings.splitStringByCommaToArray(get(key)); if (strings.length > 0) { for (String string : strings) { result.add(string.trim()); @@ -454,7 +458,7 @@ public Map<String, Settings> getGroups(String settingPrefix) throws SettingsExce * Returns group settings for the given setting prefix. */ public Map<String, Settings> getGroups(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { - if (!org.opensearch.core.common.Strings.hasLength(settingPrefix)) { + if (!Strings.hasLength(settingPrefix)) { throw new IllegalArgumentException("illegal setting prefix " + settingPrefix); } if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { @@ -749,11 +753,12 @@ public Set<String> keySet() { * settings implementation. Use {@link Settings#builder()} in order to * construct it. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map<String, Object> map = new TreeMap<>(); @@ -1081,9 +1086,9 @@ private void processLegacyLists(Map<String, Object> map) { */ public Builder loadFromMap(Map<String, ?> map) { // TODO: do this without a serialization round-trip - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.map(map); - return loadFromSource(Strings.toString(builder), builder.contentType()); + return loadFromSource(builder.toString(), builder.contentType()); } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + map + "]", e); } @@ -1092,9 +1097,9 @@ public Builder loadFromMap(Map<String, ?> map) { /** * Loads settings from the actual string content that represents them using {@link #fromXContent(XContentParser)} */ - public Builder loadFromSource(String source, MediaType xContentType) { + public Builder loadFromSource(String source, MediaType mediaType) { try ( - XContentParser parser = XContentFactory.xContent(xContentType) + XContentParser parser = mediaType.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, source) ) { this.put(fromXContent(parser, true, true)); @@ -1117,17 +1122,17 @@ public Builder loadFromPath(Path path) throws IOException { * Loads settings from a stream that represents them using {@link #fromXContent(XContentParser)} */ public Builder loadFromStream(String resourceName, InputStream is, boolean acceptNullValues) throws IOException { - final XContentType xContentType; + final MediaType mediaType; if (resourceName.endsWith(".json")) { - xContentType = XContentType.JSON; + mediaType = MediaTypeRegistry.JSON; } else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) { - xContentType = XContentType.YAML; + mediaType = XContentType.YAML; } else { throw new IllegalArgumentException("unable to detect content type from resource name [" + resourceName + "]"); } // fromXContent doesn't use named xcontent or deprecation. try ( - XContentParser parser = XContentFactory.xContent(xContentType) + XContentParser parser = mediaType.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, is) ) { if (parser.currentToken() == null) { @@ -1207,8 +1212,14 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { String value = propertyPlaceholder.replacePlaceholders(Settings.toString(entry.getValue()), placeholderResolver); // if the values exists and has length, we should maintain it in the map // otherwise, the replace process resolved into removing it - if (org.opensearch.core.common.Strings.hasLength(value)) { - entry.setValue(value); + if (Strings.hasLength(value) == true) { + // try to parse the value as a list first + final Optional<List<String>> optList = tryParseableStringToList(value); + if (optList.isPresent()) { + entry.setValue(optList.get()); + } else { + entry.setValue(value); + } } else { entryItr.remove(); } @@ -1217,8 +1228,8 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { } /** - * Checks that all settings in the builder start with the specified prefix. - * + * Checks that all settings(except archived settings and wildcards) in the builder start with the specified prefix. + * <p> * If a setting doesn't start with the prefix, the builder appends the prefix to such setting. */ public Builder normalizePrefix(String prefix) { @@ -1227,7 +1238,7 @@ public Builder normalizePrefix(String prefix) { while (iterator.hasNext()) { Map.Entry<String, Object> entry = iterator.next(); String key = entry.getKey(); - if (key.startsWith(prefix) == false && key.endsWith("*") == false) { + if (key.startsWith(prefix) == false && key.endsWith("*") == false && key.startsWith(ARCHIVED_SETTINGS_PREFIX) == false) { replacements.put(prefix + key, entry.getValue()); iterator.remove(); } @@ -1244,6 +1255,34 @@ public Settings build() { processLegacyLists(map); return new Settings(map, secureSettings.get()); } + + /** + * Tries to parse the placeholder value as a list (fe [], ["a", "b", "c"]) + * @param parsableString placeholder value to parse + * @return the {@link Optional} result of the parsing attempt + */ + private static Optional<List<String>> tryParseableStringToList(String parsableString) { + // fromXContent doesn't use named xcontent or deprecation. + try ( + XContentParser xContentParser = MediaTypeRegistry.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, parsableString) + ) { + XContentParser.Token token = xContentParser.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + return Optional.empty(); + } + ArrayList<String> list = new ArrayList<>(); + while ((token = xContentParser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + return Optional.empty(); + } + list.add(xContentParser.text()); + } + return Optional.of(list); + } catch (IOException e) { + return Optional.empty(); + } + } } // TODO We could use an FST internally to make things even faster and more compact @@ -1427,11 +1466,11 @@ public void close() throws IOException { @Override public String toString() { - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); toXContent(builder, new MapParams(Collections.singletonMap("flat_settings", "true"))); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsException.java b/server/src/main/java/org/opensearch/common/settings/SettingsException.java index d1b924827a651..5e1d2ada2529d 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsException.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsException.java @@ -33,6 +33,7 @@ package org.opensearch.common.settings; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -41,8 +42,9 @@ /** * A generic failure to handle settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SettingsException extends OpenSearchException { public SettingsException(String message) { diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index dd5502054684a..023e8f14e9f8a 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -32,15 +32,14 @@ package org.opensearch.common.settings; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.opensearch.common.Strings; +import org.apache.logging.log4j.Logger; import org.opensearch.common.inject.Binder; import org.opensearch.common.inject.Module; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -157,12 +156,12 @@ public SettingsModule( builder.append(System.lineSeparator()); builder.append(System.lineSeparator()); builder.append("curl -XPUT 'http://localhost:9200/_all/_settings?preserve_existing=true' -d '"); - try (XContentBuilder xContentBuilder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder xContentBuilder = MediaTypeRegistry.JSON.contentBuilder()) { xContentBuilder.prettyPrint(); xContentBuilder.startObject(); indexSettings.toXContent(xContentBuilder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true"))); xContentBuilder.endObject(); - builder.append(Strings.toString(xContentBuilder)); + builder.append(xContentBuilder); } builder.append("'"); builder.append(System.lineSeparator()); diff --git a/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java b/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java index 8664b14119694..4ca1650611caa 100644 --- a/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/WriteableSetting.java @@ -10,9 +10,6 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting.ByteSizeValueParser; import org.opensearch.common.settings.Setting.DoubleParser; import org.opensearch.common.settings.Setting.FloatParser; @@ -23,8 +20,12 @@ import org.opensearch.common.settings.Setting.MinTimeValueParser; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Setting.RegexValidator; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; + import java.io.IOException; import java.util.Arrays; import java.util.EnumSet; diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatter.java b/server/src/main/java/org/opensearch/common/time/DateFormatter.java index 57fee6a33bdb6..0712b7b050bec 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatter.java @@ -32,8 +32,9 @@ package org.opensearch.common.time; -import org.joda.time.DateTime; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; +import org.joda.time.DateTime; import java.time.Instant; import java.time.ZoneId; @@ -49,8 +50,9 @@ /** * Base Date formatter * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DateFormatter { /** @@ -126,6 +128,14 @@ default String formatJoda(DateTime dateTime) { */ String pattern(); + /** + * A name based format for this formatter. Can be one of the registered formatters like <code>epoch_millis</code> or + * a configured format like <code>HH:mm:ss</code> + * + * @return The name of this formatter + */ + String printPattern(); + /** * Returns the configured locale of the date formatter * @@ -147,7 +157,7 @@ default String formatJoda(DateTime dateTime) { */ DateMathParser toDateMathParser(); - static DateFormatter forPattern(String input) { + static DateFormatter forPattern(String input, String printPattern, Boolean canCacheFormatter) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); @@ -158,7 +168,28 @@ static DateFormatter forPattern(String input) { List<String> patterns = splitCombinedPatterns(format); List<DateFormatter> formatters = patterns.stream().map(DateFormatters::forPattern).collect(Collectors.toList()); - return JavaDateFormatter.combined(input, formatters); + DateFormatter printFormatter = formatters.get(0); + if (Strings.hasLength(printPattern)) { + String printFormat = strip8Prefix(printPattern); + try { + printFormatter = DateFormatters.forPattern(printFormat); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid print format: " + e.getMessage(), e); + } + } + return JavaDateFormatter.combined(input, formatters, printFormatter, canCacheFormatter); + } + + static DateFormatter forPattern(String input) { + return forPattern(input, null, false); + } + + static DateFormatter forPattern(String input, String printPattern) { + return forPattern(input, printPattern, false); + } + + static DateFormatter forPattern(String input, Boolean canCacheFormatter) { + return forPattern(input, null, canCacheFormatter); } static String strip8Prefix(String input) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index 6c8b9282d8a77..527dce7677dd8 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -1299,6 +1299,41 @@ public class DateFormatters { .withResolverStyle(ResolverStyle.STRICT) ); + /** + * Returns RFC 3339 a popular ISO 8601 profile compatible date time formatter and parser. + * This is not fully compatible to the existing spec, its more linient and closely follows w3c note on datetime + */ + + public static final DateFormatter RFC3339_LENIENT_DATE_FORMATTER = new JavaDateFormatter( + "rfc3339_lenient", + new OpenSearchDateTimeFormatter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new RFC3339CompatibleDateTimeFormatter( + new DateTimeFormatterBuilder().append(DATE_FORMATTER) + .optionalStart() + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 1, 2, SignStyle.NOT_NEGATIVE) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(',') + .appendFraction(NANO_OF_SECOND, 1, 9, false) + .optionalEnd() + .optionalStart() + .appendOffsetId() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ) + ); + private static final DateTimeFormatter HOUR_MINUTE_SECOND_FORMATTER = new DateTimeFormatterBuilder().append(HOUR_MINUTE_FORMATTER) .appendLiteral(":") .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) @@ -2152,6 +2187,8 @@ static DateFormatter forPattern(String input) { return STRICT_YEAR_MONTH; } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) { return STRICT_YEAR_MONTH_DAY; + } else if (FormatNames.RFC3339_LENIENT.matches(input)) { + return RFC3339_LENIENT_DATE_FORMATTER; } else { try { return new JavaDateFormatter( @@ -2172,10 +2209,10 @@ static DateFormatter forPattern(String input) { * or Instant.from(accessor). This results in a huge performance penalty and should be prevented * This method prevents exceptions by querying the accessor for certain capabilities * and then act on it accordingly - * + * <p> * This action assumes that we can reliably fall back to some defaults if not all parts of a * zoned date time are set - * + * <p> * - If a zoned date time is passed, it is returned * - If no timezone is found, ZoneOffset.UTC is used * - If we find a time and a date, converting to a ZonedDateTime is straight forward, diff --git a/server/src/main/java/org/opensearch/common/time/DateMathParser.java b/server/src/main/java/org/opensearch/common/time/DateMathParser.java index f6573eaa90286..8c123d7d2ebc9 100644 --- a/server/src/main/java/org/opensearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/DateMathParser.java @@ -32,6 +32,7 @@ package org.opensearch.common.time; +import org.opensearch.common.annotation.PublicApi; import org.joda.time.DateTimeZone; import java.time.Instant; @@ -41,8 +42,9 @@ /** * An abstraction over date math parsing to allow different implementation for joda and java time. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DateMathParser { /** @@ -64,12 +66,12 @@ default Instant parse(String text, LongSupplier now, boolean roundUpProperty, Da /** * Parse text, that potentially contains date math into the milliseconds since the epoch - * + * <p> * Examples are - * + * <p> * <code>2014-11-18||-2y</code> subtracts two years from the input date * <code>now/m</code> rounds the current time to minute granularity - * + * <p> * Supported rounding units are * y year * M month diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 021b8a3be8b23..7ab395a1117e7 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -342,7 +342,7 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { /** * Rounds the given utc milliseconds sicne the epoch down to the next unit millis - * + * <p> * Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day * In order to ensure the performance of this methods, there are no guards or checks in it * diff --git a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java index f3459a5857b9e..7fc39e063efb5 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java @@ -32,12 +32,12 @@ * This class has been copied from different locations within the joda time package, as * these methods fast when used for rounding, as they do not require conversion to java * time objects - * + * <p> * This code has been copied from jodatime 2.10.1 * The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1 - * + * <p> * See following methods have been copied (along with required helper variables) - * + * <p> * - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year) * - org.joda.time.chrono.BasicChronology.getYear(int year) * - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year) diff --git a/server/src/main/java/org/opensearch/common/time/FormatNames.java b/server/src/main/java/org/opensearch/common/time/FormatNames.java index ba0a8fcf4a17a..ec5e825fc933e 100644 --- a/server/src/main/java/org/opensearch/common/time/FormatNames.java +++ b/server/src/main/java/org/opensearch/common/time/FormatNames.java @@ -44,6 +44,7 @@ */ public enum FormatNames { ISO8601(null, "iso8601"), + RFC3339_LENIENT(null, "rfc3339_lenient"), BASIC_DATE("basicDate", "basic_date"), BASIC_DATE_TIME("basicDateTime", "basic_date_time"), BASIC_DATE_TIME_NO_MILLIS("basicDateTimeNoMillis", "basic_date_time_no_millis"), diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index 07013a3dc75f2..033ea280e6172 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -32,9 +32,11 @@ package org.opensearch.common.time; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import java.text.ParsePosition; +import java.time.DateTimeException; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -67,9 +69,12 @@ class JavaDateFormatter implements DateFormatter { } private final String format; - private final DateTimeFormatter printer; - private final List<DateTimeFormatter> parsers; + private final String printFormat; + private final OpenSearchDateTimePrinter printer; + private final List<OpenSearchDateTimeFormatter> parsers; private final JavaDateFormatter roundupParser; + private final Boolean canCacheLastParsedFormatter; + private volatile OpenSearchDateTimeFormatter lastParsedformatter = null; /** * A round up formatter @@ -78,11 +83,11 @@ class JavaDateFormatter implements DateFormatter { */ static class RoundUpFormatter extends JavaDateFormatter { - RoundUpFormatter(String format, List<DateTimeFormatter> roundUpParsers) { + RoundUpFormatter(String format, List<OpenSearchDateTimeFormatter> roundUpParsers) { super(format, firstFrom(roundUpParsers), null, roundUpParsers); } - private static DateTimeFormatter firstFrom(List<DateTimeFormatter> roundUpParsers) { + private static OpenSearchDateTimeFormatter firstFrom(List<OpenSearchDateTimeFormatter> roundUpParsers) { return roundUpParsers.get(0); } @@ -93,8 +98,22 @@ JavaDateFormatter getRoundupParser() { } // named formatters use default roundUpParser + JavaDateFormatter( + String format, + String printFormat, + OpenSearchDateTimePrinter printer, + Boolean canCacheLastParsedFormatter, + OpenSearchDateTimeFormatter... parsers + ) { + this(format, printFormat, printer, ROUND_UP_BASE_FIELDS, canCacheLastParsedFormatter, parsers); + } + JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { - this(format, printer, ROUND_UP_BASE_FIELDS, parsers); + this(format, format, wrapFormatter(printer), false, wrapAllFormatters(parsers)); + } + + JavaDateFormatter(String format, OpenSearchDateTimePrinter printer, OpenSearchDateTimeFormatter... parsers) { + this(format, format, printer, false, parsers); } private static final BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> ROUND_UP_BASE_FIELDS = (builder, parser) -> { @@ -111,31 +130,44 @@ JavaDateFormatter getRoundupParser() { // subclasses override roundUpParser JavaDateFormatter( String format, - DateTimeFormatter printer, + String printFormat, + OpenSearchDateTimePrinter printer, BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, - DateTimeFormatter... parsers + Boolean canCacheLastParsedFormatter, + OpenSearchDateTimeFormatter... parsers ) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } - long distinctZones = Arrays.stream(parsers).map(DateTimeFormatter::getZone).distinct().count(); + long distinctZones = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getZone).distinct().count(); if (distinctZones > 1) { throw new IllegalArgumentException("formatters must have the same time zone"); } - long distinctLocales = Arrays.stream(parsers).map(DateTimeFormatter::getLocale).distinct().count(); + long distinctLocales = Arrays.stream(parsers).map(OpenSearchDateTimeFormatter::getLocale).distinct().count(); if (distinctLocales > 1) { throw new IllegalArgumentException("formatters must have the same locale"); } this.printer = printer; this.format = format; + this.printFormat = printFormat; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; if (parsers.length == 0) { - this.parsers = Collections.singletonList(printer); + this.parsers = Collections.singletonList((OpenSearchDateTimeFormatter) printer); } else { this.parsers = Arrays.asList(parsers); } List<DateTimeFormatter> roundUp = createRoundUpParser(format, roundupParserConsumer); - this.roundupParser = new RoundUpFormatter(format, roundUp); + this.roundupParser = new RoundUpFormatter(format, wrapAllFormatters(roundUp)); + } + + JavaDateFormatter( + String format, + DateTimeFormatter printer, + BiConsumer<DateTimeFormatterBuilder, DateTimeFormatter> roundupParserConsumer, + DateTimeFormatter... parsers + ) { + this(format, format, wrapFormatter(printer), roundupParserConsumer, false, wrapAllFormatters(parsers)); } /** @@ -153,7 +185,8 @@ private List<DateTimeFormatter> createRoundUpParser( ) { if (format.contains("||") == false) { List<DateTimeFormatter> roundUpParsers = new ArrayList<>(); - for (DateTimeFormatter parser : this.parsers) { + for (OpenSearchDateTimeFormatter customparser : this.parsers) { + DateTimeFormatter parser = customparser.getFormatter(); DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); builder.append(parser); roundupParserConsumer.accept(builder, parser); @@ -164,43 +197,77 @@ private List<DateTimeFormatter> createRoundUpParser( return null; } - public static DateFormatter combined(String input, List<DateFormatter> formatters) { + public static DateFormatter combined( + String input, + List<DateFormatter> formatters, + DateFormatter printFormatter, + Boolean canCacheLastParsedFormatter + ) { assert formatters.size() > 0; + assert printFormatter != null; - List<DateTimeFormatter> parsers = new ArrayList<>(formatters.size()); - List<DateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>(formatters.size()); + List<OpenSearchDateTimeFormatter> roundUpParsers = new ArrayList<>(formatters.size()); - DateTimeFormatter printer = null; + assert printFormatter instanceof JavaDateFormatter; + JavaDateFormatter javaPrintFormatter = (JavaDateFormatter) printFormatter; + OpenSearchDateTimePrinter printer = javaPrintFormatter.getPrinter(); for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; - if (printer == null) { - printer = javaDateFormatter.getPrinter(); - } parsers.addAll(javaDateFormatter.getParsers()); roundUpParsers.addAll(javaDateFormatter.getRoundupParser().getParsers()); } - return new JavaDateFormatter(input, printer, roundUpParsers, parsers); + return new JavaDateFormatter( + input, + javaPrintFormatter.format, + printer, + roundUpParsers, + parsers, + canCacheLastParsedFormatter & FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ); // check if caching is enabled } private JavaDateFormatter( String format, - DateTimeFormatter printer, - List<DateTimeFormatter> roundUpParsers, - List<DateTimeFormatter> parsers + String printFormat, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers, + Boolean canCacheLastParsedFormatter ) { this.format = format; + this.printFormat = printFormat; this.printer = printer; this.roundupParser = roundUpParsers != null ? new RoundUpFormatter(format, roundUpParsers) : null; this.parsers = parsers; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; + } + + private JavaDateFormatter( + String format, + DateTimeFormatter printer, + List<DateTimeFormatter> roundUpParsers, + List<DateTimeFormatter> parsers + ) { + this(format, format, wrapFormatter(printer), wrapAllFormatters(roundUpParsers), wrapAllFormatters(parsers), false); + } + + private JavaDateFormatter( + String format, + OpenSearchDateTimePrinter printer, + List<OpenSearchDateTimeFormatter> roundUpParsers, + List<OpenSearchDateTimeFormatter> parsers + ) { + this(format, format, printer, roundUpParsers, parsers, false); } JavaDateFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getPrinter() { + OpenSearchDateTimePrinter getPrinter() { return printer; } @@ -212,7 +279,7 @@ public TemporalAccessor parse(String input) { try { return doParse(input); - } catch (DateTimeParseException e) { + } catch (DateTimeException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } @@ -222,7 +289,7 @@ public TemporalAccessor parse(String input) { * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. * This also means that this method depends on <code>DateTimeFormatter.ClassicFormat.parseObject</code> * which does not throw exceptions when parsing failed. - * + * <p> * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) * patterns. Joda does not suffer from this. * https://bugs.openjdk.java.net/browse/JDK-8188771 @@ -233,13 +300,23 @@ public TemporalAccessor parse(String input) { */ private TemporalAccessor doParse(String input) { if (parsers.size() > 1) { - for (DateTimeFormatter formatter : parsers) { + Object object = null; + if (canCacheLastParsedFormatter && lastParsedformatter != null) { ParsePosition pos = new ParsePosition(0); - Object object = formatter.toFormat().parseObject(input, pos); + object = lastParsedformatter.parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { return (TemporalAccessor) object; } } + for (OpenSearchDateTimeFormatter formatter : parsers) { + ParsePosition pos = new ParsePosition(0); + object = formatter.parseObject(input, pos); + if (parsingSucceeded(object, input, pos)) { + lastParsedformatter = formatter; + return (TemporalAccessor) object; + } + } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); } return this.parsers.get(0).parse(input); @@ -249,18 +326,32 @@ private boolean parsingSucceeded(Object object, String input, ParsePosition pos) return object != null && pos.getIndex() == input.length(); } + private static OpenSearchDateTimeFormatter wrapFormatter(DateTimeFormatter formatter) { + return new OpenSearchDateTimeFormatter(formatter); + } + + private static OpenSearchDateTimeFormatter[] wrapAllFormatters(DateTimeFormatter... formatters) { + return Arrays.stream(formatters).map(JavaDateFormatter::wrapFormatter).toArray(OpenSearchDateTimeFormatter[]::new); + } + + private static List<OpenSearchDateTimeFormatter> wrapAllFormatters(List<DateTimeFormatter> formatters) { + return formatters.stream().map(JavaDateFormatter::wrapFormatter).collect(Collectors.toList()); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily if (zoneId.equals(zone())) { return this; } - List<DateTimeFormatter> parsers = this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( + this.parsers.stream().map(p -> p.withZone(zoneId)).collect(Collectors.toList()) + ); + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withZone(zoneId)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withZone(zoneId), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withZone(zoneId), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -269,12 +360,14 @@ public DateFormatter withLocale(Locale locale) { if (locale.equals(locale())) { return this; } - List<DateTimeFormatter> parsers = this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()); - List<DateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() + List<OpenSearchDateTimeFormatter> parsers = new ArrayList<>( + this.parsers.stream().map(p -> p.withLocale(locale)).collect(Collectors.toList()) + ); + List<OpenSearchDateTimeFormatter> roundUpParsers = this.roundupParser.getParsers() .stream() .map(p -> p.withLocale(locale)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withLocale(locale), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withLocale(locale), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -287,6 +380,11 @@ public String pattern() { return format; } + @Override + public String printPattern() { + return printFormat; + } + @Override public Locale locale() { return this.printer.getLocale(); @@ -324,7 +422,7 @@ public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } - Collection<DateTimeFormatter> getParsers() { + Collection<OpenSearchDateTimeFormatter> getParsers() { return parsers; } } diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java index 0536324b6516b..340901e7ac8e2 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java @@ -51,7 +51,7 @@ /** * A parser for date/time formatted text with optional date math. - * + * <p> * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * <code>||[+-/](\d+)?[yMwdhHms]</code>. diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java new file mode 100644 index 0000000000000..3a629d8843949 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimeFormatter.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.text.Format; +import java.text.ParsePosition; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQuery; +import java.util.Locale; + +/** +* Wrapper class for DateTimeFormatter{@link java.time.format.DateTimeFormatter} +* to allow for custom implementations for datetime parsing/formatting + */ +class OpenSearchDateTimeFormatter implements OpenSearchDateTimePrinter { + private final DateTimeFormatter formatter; + + public OpenSearchDateTimeFormatter(String pattern) { + this.formatter = DateTimeFormatter.ofPattern(pattern, Locale.ROOT); + } + + public OpenSearchDateTimeFormatter(String pattern, Locale locale) { + this.formatter = DateTimeFormatter.ofPattern(pattern, locale); + } + + public OpenSearchDateTimeFormatter(DateTimeFormatter formatter) { + this.formatter = formatter; + } + + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new OpenSearchDateTimeFormatter(getFormatter().withLocale(locale)); + } + + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new OpenSearchDateTimeFormatter(getFormatter().withZone(zoneId)); + } + + public String format(TemporalAccessor temporal) { + return this.getFormatter().format(temporal); + } + + public TemporalAccessor parse(CharSequence text, ParsePosition position) { + return this.getFormatter().parse(text, position); + } + + public TemporalAccessor parse(CharSequence text) { + return this.getFormatter().parse(text); + } + + public <T> T parse(CharSequence text, TemporalQuery<T> query) { + return this.getFormatter().parse(text, query); + } + + public ZoneId getZone() { + return this.getFormatter().getZone(); + } + + public Locale getLocale() { + return this.getFormatter().getLocale(); + } + + public TemporalAccessor parse(String input) { + return formatter.parse(input); + } + + public DateTimeFormatter getFormatter() { + return formatter; + } + + public Format toFormat() { + return getFormatter().toFormat(); + } + + public Object parseObject(String text, ParsePosition pos) { + return getFormatter().toFormat().parseObject(text, pos); + } +} diff --git a/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java new file mode 100644 index 0000000000000..350bae21b22b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/OpenSearchDateTimePrinter.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.time; + +import java.time.ZoneId; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; + +/** + * Interface for DateTimeFormatter{@link java.time.format.DateTimeFormatter} + * to allow for custom implementations for datetime formatting + */ +interface OpenSearchDateTimePrinter { + + public OpenSearchDateTimePrinter withLocale(Locale locale); + + public OpenSearchDateTimePrinter withZone(ZoneId zoneId); + + public String format(TemporalAccessor temporal); + + public Locale getLocale(); + + public ZoneId getZone(); +} diff --git a/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java new file mode 100644 index 0000000000000..98b87efd2380b --- /dev/null +++ b/server/src/main/java/org/opensearch/common/time/RFC3339CompatibleDateTimeFormatter.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Internet Time Utility project (https://github.com/ethlo/itu) under the Apache License, version 2.0. + * Copyright (C) 2017 Morten Haraldsen (ethlo) + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.common.time; + +import java.text.ParsePosition; +import java.time.DateTimeException; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeParseException; +import java.time.temporal.TemporalAccessor; +import java.util.Arrays; +import java.util.Locale; + +/** + * Defines a close profile of RFC3339 datetime format where the date is mandatory and the time is optional. + * <p> + * The returned formatter can only be used for parsing, printing is unsupported. + * <p> + * This parser can parse zoned datetimes. + * The parser is strict by default, thus time string {@code 24:00} cannot be parsed. + * <p> + * It accepts formats described by the following syntax: + * <pre> + * Year: + * YYYY (eg 1997) + * Year and month: + * YYYY-MM (eg 1997-07) + * Complete date: + * YYYY-MM-DD (eg 1997-07-16) + * Complete date plus hours and minutes: + * YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) + * Complete date plus hours, minutes and seconds: + * YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) + * Complete date plus hours, minutes, seconds and a decimal fraction of a second + * YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) + * YYYY-MM-DDThh:mm:ss,sTZD (eg 1997-07-16T19:20:30,45+01:00) + * where: + * + * YYYY = four-digit year + * MM = two-digit month (01=January, etc.) + * DD = two-digit day of month (01 through 31) + * hh = two digits of hour (00 through 23) (am/pm NOT allowed) + * mm = two digits of minute (00 through 59) + * ss = two digits of second (00 through 59) + * s = one or more(max 9) digits representing a decimal fraction of a second + * TZD = time zone designator (Z or z or +hh:mm or -hh:mm) + * </pre> + */ +final class RFC3339CompatibleDateTimeFormatter extends OpenSearchDateTimeFormatter { + public static final char DATE_SEPARATOR = '-'; + public static final char TIME_SEPARATOR = ':'; + public static final char SEPARATOR_UPPER = 'T'; + private static final char PLUS = '+'; + private static final char MINUS = '-'; + private static final char SEPARATOR_LOWER = 't'; + private static final char SEPARATOR_SPACE = ' '; + private static final char FRACTION_SEPARATOR_1 = '.'; + private static final char FRACTION_SEPARATOR_2 = ','; + private static final char ZULU_UPPER = 'Z'; + private static final char ZULU_LOWER = 'z'; + + private ZoneId zone; + + public RFC3339CompatibleDateTimeFormatter(String pattern) { + super(pattern); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter) { + super(formatter); + } + + public RFC3339CompatibleDateTimeFormatter(java.time.format.DateTimeFormatter formatter, ZoneId zone) { + super(formatter); + this.zone = zone; + } + + @Override + public OpenSearchDateTimeFormatter withZone(ZoneId zoneId) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withZone(zoneId), zoneId); + } + + @Override + public OpenSearchDateTimeFormatter withLocale(Locale locale) { + return new RFC3339CompatibleDateTimeFormatter(getFormatter().withLocale(locale)); + } + + @Override + public Object parseObject(String text, ParsePosition pos) { + try { + return parse(text); + } catch (DateTimeException e) { + return null; + } + } + + @Override + public TemporalAccessor parse(final String dateTime) { + OffsetDateTime parsedDatetime = parse(dateTime, new ParsePosition(0)); + return zone == null ? parsedDatetime : parsedDatetime.atZoneSameInstant(zone); + } + + public OffsetDateTime parse(String date, ParsePosition pos) { + if (date == null) { + throw new IllegalArgumentException("date cannot be null"); + } + + final int len = date.length() - pos.getIndex(); + if (len <= 0) { + throw new DateTimeParseException("out of bound parse position", date, pos.getIndex()); + } + final char[] chars = date.substring(pos.getIndex()).toCharArray(); + + // Date portion + + // YEAR + final int years = getYear(chars, pos); + if (4 == len) { + return OffsetDateTime.of(years, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // MONTH + consumeChar(chars, pos, DATE_SEPARATOR); + final int months = getMonth(chars, pos); + if (7 == len) { + return OffsetDateTime.of(years, months, 1, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // DAY + consumeChar(chars, pos, DATE_SEPARATOR); + final int days = getDay(chars, pos); + if (10 == len) { + return OffsetDateTime.of(years, months, days, 0, 0, 0, 0, ZoneOffset.UTC); + } + + // HOURS + consumeChar(chars, pos, SEPARATOR_UPPER, SEPARATOR_LOWER, SEPARATOR_SPACE); + final int hours = getHour(chars, pos); + + // MINUTES + consumeChar(chars, pos, TIME_SEPARATOR); + final int minutes = getMinute(chars, pos); + if (16 == len) { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + // SECONDS or TIMEZONE + return handleTime(chars, pos, years, months, days, hours, minutes); + } + + private static boolean isDigit(char c) { + return (c >= '0' && c <= '9'); + } + + private static int digit(char c) { + return c - '0'; + } + + private static int readInt(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + if (end > strNum.length) { + pos.setErrorIndex(end); + throw new DateTimeParseException("Unexpected end of expression at position " + strNum.length, new String(strNum), end); + } + + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + if (isDigit(c) == false) { + pos.setErrorIndex(i); + throw new DateTimeParseException("Character " + c + " is not a digit", new String(strNum), i); + } + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int readIntUnchecked(final char[] strNum, ParsePosition pos, int n) { + int start = pos.getIndex(), end = start + n; + int result = 0; + for (int i = start; i < end; i++) { + final char c = strNum[i]; + int digit = digit(c); + result = result * 10 + digit; + } + pos.setIndex(end); + return result; + } + + private static int getHour(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getMinute(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getDay(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static boolean isValidOffset(char[] chars, int offset) { + return offset < chars.length; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char expected) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + if (chars[offset] != expected) { + throw new DateTimeParseException("Expected character " + expected + " at position " + offset, new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static void consumeNextChar(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + if (isValidOffset(chars, offset) == false) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + pos.setIndex(offset + 1); + } + + private static boolean checkPositionContains(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + break; + } + } + return found; + } + + private static void consumeChar(char[] chars, ParsePosition pos, char... expected) { + int offset = pos.getIndex(); + if (offset >= chars.length) { + throw new DateTimeParseException("Unexpected end of input", new String(chars), offset); + } + + boolean found = false; + for (char e : expected) { + if (chars[offset] == e) { + found = true; + pos.setIndex(offset + 1); + break; + } + } + if (!found) { + throw new DateTimeParseException( + "Expected character " + Arrays.toString(expected) + " at position " + offset, + new String(chars), + offset + ); + } + } + + private static void assertNoMoreChars(char[] chars, ParsePosition pos) { + if (chars.length > pos.getIndex()) { + throw new DateTimeParseException("Trailing junk data after position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + } + + private static ZoneOffset parseTimezone(char[] chars, ParsePosition pos) { + int offset = pos.getIndex(); + final int left = chars.length - offset; + if (checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + assertNoMoreChars(chars, pos); + return ZoneOffset.UTC; + } + + if (left != 6) { + throw new DateTimeParseException("Invalid timezone offset", new String(chars, offset, left), offset); + } + + final char sign = chars[offset]; + consumeNextChar(chars, pos); + int hours = getHour(chars, pos); + consumeChar(chars, pos, TIME_SEPARATOR); + int minutes = getMinute(chars, pos); + if (sign == MINUS) { + if (hours == 0 && minutes == 0) { + throw new DateTimeParseException("Unknown 'Local Offset Convention' date-time not allowed", new String(chars), offset); + } + hours = -hours; + minutes = -minutes; + } else if (sign != PLUS) { + throw new DateTimeParseException("Invalid character starting at position " + offset, new String(chars), offset); + } + + return ZoneOffset.ofHoursMinutes(hours, minutes); + } + + private static OffsetDateTime handleTime(char[] chars, ParsePosition pos, int year, int month, int day, int hour, int minute) { + switch (chars[pos.getIndex()]) { + case TIME_SEPARATOR: + consumeChar(chars, pos, TIME_SEPARATOR); + return handleSeconds(year, month, day, hour, minute, chars, pos); + + case PLUS: + case MINUS: + case ZULU_UPPER: + case ZULU_LOWER: + final ZoneOffset zoneOffset = parseTimezone(chars, pos); + return OffsetDateTime.of(year, month, day, hour, minute, 0, 0, zoneOffset); + } + throw new DateTimeParseException("Unexpected character " + " at position " + pos.getIndex(), new String(chars), pos.getIndex()); + } + + private static int getMonth(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getYear(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 4); + } + + private static int getSeconds(final char[] chars, ParsePosition pos) { + return readInt(chars, pos, 2); + } + + private static int getFractions(final char[] chars, final ParsePosition pos, final int len) { + final int fractions; + fractions = readIntUnchecked(chars, pos, len); + switch (len) { + case 0: + throw new DateTimeParseException("Must have at least 1 fraction digit", new String(chars), pos.getIndex()); + case 1: + return fractions * 100_000_000; + case 2: + return fractions * 10_000_000; + case 3: + return fractions * 1_000_000; + case 4: + return fractions * 100_000; + case 5: + return fractions * 10_000; + case 6: + return fractions * 1_000; + case 7: + return fractions * 100; + case 8: + return fractions * 10; + default: + return fractions; + } + } + + public static int indexOfNonDigit(final char[] text, int offset) { + for (int i = offset; i < text.length; i++) { + if (isDigit(text[i]) == false) { + return i; + } + } + return -1; + } + + public static void consumeDigits(final char[] text, ParsePosition pos) { + final int idx = indexOfNonDigit(text, pos.getIndex()); + if (idx == -1) { + pos.setErrorIndex(text.length); + } else { + pos.setIndex(idx); + } + } + + private static OffsetDateTime handleSeconds(int year, int month, int day, int hour, int minute, char[] chars, ParsePosition pos) { + // From here the specification is more lenient + final int seconds = getSeconds(chars, pos); + int currPos = pos.getIndex(); + final int remaining = chars.length - currPos; + if (remaining == 0) { + // No offset + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + + ZoneOffset offset = null; + int fractions = 0; + if (remaining == 1 && checkPositionContains(chars, pos, ZULU_LOWER, ZULU_UPPER)) { + consumeNextChar(chars, pos); + // Do nothing we are done + offset = ZoneOffset.UTC; + assertNoMoreChars(chars, pos); + } else if (remaining >= 1 && checkPositionContains(chars, pos, FRACTION_SEPARATOR_1, FRACTION_SEPARATOR_2)) { + // We have fractional seconds; + consumeNextChar(chars, pos); + ParsePosition initPosition = new ParsePosition(pos.getIndex()); + consumeDigits(chars, pos); + if (pos.getErrorIndex() == -1) { + // We have an end of fractions + final int len = pos.getIndex() - initPosition.getIndex(); + fractions = getFractions(chars, initPosition, len); + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("No timezone offset information", new String(chars), pos.getIndex()); + } + } else if (remaining >= 1 && checkPositionContains(chars, pos, PLUS, MINUS)) { + // No fractional sections + offset = parseTimezone(chars, pos); + } else { + throw new DateTimeParseException("Unexpected character at position " + (pos.getIndex()), new String(chars), pos.getIndex()); + } + + return OffsetDateTime.of(year, month, day, hour, minute, seconds, fractions, offset); + } +} diff --git a/server/src/main/java/org/opensearch/common/transport/package-info.java b/server/src/main/java/org/opensearch/common/transport/package-info.java deleted file mode 100644 index abb8dfbb4e4f0..0000000000000 --- a/server/src/main/java/org/opensearch/common/transport/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** Base Transport utility package. */ -package org.opensearch.common.transport; diff --git a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java index 2ce86cbfd13f3..d16ec9723fb2b 100644 --- a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java @@ -32,6 +32,7 @@ package org.opensearch.common.unit; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -49,8 +50,9 @@ * parsing and conversion from similarities to edit distances * etc. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Fuzziness implements ToXContentFragment, Writeable { public static final String X_FIELD_NAME = "fuzziness"; diff --git a/server/src/main/java/org/opensearch/common/unit/MemorySizeValue.java b/server/src/main/java/org/opensearch/common/unit/MemorySizeValue.java index 18aae6277c379..96f128cb6dc38 100644 --- a/server/src/main/java/org/opensearch/common/unit/MemorySizeValue.java +++ b/server/src/main/java/org/opensearch/common/unit/MemorySizeValue.java @@ -33,11 +33,13 @@ package org.opensearch.common.unit; import org.opensearch.OpenSearchParseException; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.monitor.jvm.JvmInfo; import java.util.Objects; -import static org.opensearch.common.unit.ByteSizeValue.parseBytesSizeValue; +import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; /** * Utility methods to get memory sizes. diff --git a/server/src/main/java/org/opensearch/common/unit/SizeValue.java b/server/src/main/java/org/opensearch/common/unit/SizeValue.java index 3b73955f8d046..14f2bedde53f8 100644 --- a/server/src/main/java/org/opensearch/common/unit/SizeValue.java +++ b/server/src/main/java/org/opensearch/common/unit/SizeValue.java @@ -33,7 +33,8 @@ package org.opensearch.common.unit; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * Conversion values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SizeValue implements Writeable, Comparable<SizeValue> { private final long size; diff --git a/server/src/main/java/org/opensearch/common/util/BigArrays.java b/server/src/main/java/org/opensearch/common/util/BigArrays.java index 45c2092601a7b..92371c2c77ef9 100644 --- a/server/src/main/java/org/opensearch/common/util/BigArrays.java +++ b/server/src/main/java/org/opensearch/common/util/BigArrays.java @@ -36,18 +36,24 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.recycler.Recycler; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.recycler.Recycler; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.util.BigArray; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import java.util.Arrays; -/** Utility class to work with arrays. */ +/** + * Utility class to work with arrays. + * + * @opensearch.api + * */ +@PublicApi(since = "1.0.0") public class BigArrays { public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(null, null, CircuitBreaker.REQUEST); diff --git a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java index ca63c170c0ccd..e4315f8699206 100644 --- a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java +++ b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java @@ -34,14 +34,14 @@ /** * Performs binary search on an arbitrary data structure. - * + * <p> * To do a search, create a subclass and implement custom {@link #compare(int)} and {@link #distance(int)} methods. - * + * <p> * {@link BinarySearcher} knows nothing about the value being searched for or the underlying data structure. * These things should be determined by the subclass in its overridden methods. - * + * <p> * Refer to {@link BigArrays.DoubleBinarySearcher} for an example. - * + * <p> * NOTE: this class is not thread safe * * @opensearch.internal @@ -74,7 +74,7 @@ private int getClosestIndex(int index1, int index2) { /** * Uses a binary search to determine the index of the element within the index range {from, ... , to} that is * closest to the search value. - * + * <p> * Unlike most binary search implementations, the value being searched for is not an argument to search method. * Rather, this value should be stored by the subclass along with the underlying array. * diff --git a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java index ecc93d017beaf..4afba2905019a 100644 --- a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java @@ -33,152 +33,292 @@ package org.opensearch.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.core.common.util.ByteArray; /** - * Specialized hash table implementation similar to Lucene's BytesRefHash that maps - * BytesRef values to ids. Collisions are resolved with open addressing and linear - * probing, growth is smooth thanks to {@link BigArrays}, hashes are cached for faster - * re-hashing and capacity is always a multiple of 2 for faster identification of buckets. - * This class is not thread-safe. + * Specialized hash table implementation that maps a {@link BytesRef} key to a long ordinal. * - * @opensearch.internal + * <p> + * It uses a compact byte-packing strategy to encode the ordinal and fingerprint information + * in the hash table value. It makes lookups faster by short-circuiting expensive equality checks + * for keys that collide onto the same hash table slot. + * + * <p> + * This class is not thread-safe. + * + * @opensearch.internal */ -public final class BytesRefHash extends AbstractHash { +@InternalApi +public final class BytesRefHash implements Releasable { + private static final long MAX_CAPACITY = 1L << 32; + private static final long DEFAULT_INITIAL_CAPACITY = 32; + private static final float DEFAULT_LOAD_FACTOR = 0.6f; + private static final Hasher DEFAULT_HASHER = key -> T1ha1.hash(key.bytes, key.offset, key.length); + + private static final long MASK_ORDINAL = 0x00000000FFFFFFFFL; // extract ordinal + private static final long MASK_FINGERPRINT = 0xFFFFFFFF00000000L; // extract fingerprint + + /** + * Maximum load factor after which the capacity is doubled. + */ + private final float loadFactor; + + /** + * Calculates the hash of a {@link BytesRef} key. + */ + private final Hasher hasher; + + /** + * Utility class to allocate recyclable arrays. + */ + private final BigArrays bigArrays; + + /** + * Reusable BytesRef to read keys. + */ + private final BytesRef scratch = new BytesRef(); + + /** + * Current capacity of the hash table. This must be a power of two so that the hash table slot + * can be identified quickly using bitmasks, thus avoiding expensive modulo or integer division. + */ + private long capacity; + + /** + * Bitmask to identify the hash table slot from a key's hash. + */ + private long mask; + + /** + * Size threshold after which the hash table needs to be doubled in capacity. + */ + private long grow; + + /** + * Current size of the hash table. + */ + private long size; + + /** + * Underlying array to store the hash table values. + * + * <p> + * Each hash table value (64-bit) uses the following byte packing strategy: + * <pre> + * |================================|================================| + * | Fingerprint | Ordinal | + * |--------------------------------|--------------------------------| + * | 32 bits | 32 bits | + * |================================|================================| + * </pre> + * + * <p> + * This allows us to encode and manipulate additional information in the hash table + * itself without having to look elsewhere in the memory, which is much slower. + * + * <p> + * Terminology: <code>table[index] = value = (fingerprint | ordinal)</code> + */ + private LongArray table; + + /** + * Underlying array to store the starting offsets of keys. + * + * <p> + * Terminology: + * <pre> + * offsets[ordinal] = starting offset (inclusive) + * offsets[ordinal + 1] = ending offset (exclusive) + * </pre> + */ + private LongArray offsets; + + /** + * Underlying byte array to store the keys. + * + * <p> + * Terminology: <code>keys[start...end] = key</code> + */ + private ByteArray keys; - private LongArray startOffsets; - private ByteArray bytes; - private IntArray hashes; // we cache hashes for faster re-hashing - private final BytesRef spare; + /** + * Pre-computed hashes of the stored keys. + * It is used to speed up reinserts when doubling the capacity. + */ + private LongArray hashes; - // Constructor with configurable capacity and default maximum load factor. - public BytesRefHash(long capacity, BigArrays bigArrays) { - this(capacity, DEFAULT_MAX_LOAD_FACTOR, bigArrays); + public BytesRefHash(final BigArrays bigArrays) { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // Constructor with configurable capacity and load factor. - public BytesRefHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { - super(capacity, maxLoadFactor, bigArrays); - startOffsets = bigArrays.newLongArray(capacity + 1, false); - startOffsets.set(0, 0); - bytes = bigArrays.newByteArray(capacity * 3, false); - hashes = bigArrays.newIntArray(capacity, false); - spare = new BytesRef(); + public BytesRefHash(final long initialCapacity, final BigArrays bigArrays) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // BytesRef has a weak hashCode function so we try to improve it by rehashing using Murmur3 - // Feel free to remove rehashing if BytesRef gets a better hash function - private static int rehash(int hash) { - return BitMixer.mix32(hash); + public BytesRefHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) { + this(initialCapacity, loadFactor, DEFAULT_HASHER, bigArrays); } - /** - * Return the key at <code>0 <= index <= capacity()</code>. The result is undefined if the slot is unused. - * <p>Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called</p> - */ - public BytesRef get(long id, BytesRef dest) { - final long startOffset = startOffsets.get(id); - final int length = (int) (startOffsets.get(id + 1) - startOffset); - bytes.get(startOffset, length, dest); - return dest; + public BytesRefHash(final long initialCapacity, final float loadFactor, final Hasher hasher, final BigArrays bigArrays) { + assert initialCapacity > 0 : "initial capacity must be greater than 0"; + assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1"; + + this.loadFactor = loadFactor; + this.hasher = hasher; + this.bigArrays = bigArrays; + + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; + mask = capacity - 1; + size = 0; + grow = (long) (capacity * loadFactor); + + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); + offsets = bigArrays.newLongArray(initialCapacity + 1, false); + offsets.set(0, 0); + keys = bigArrays.newByteArray(initialCapacity * 3, false); + hashes = bigArrays.newLongArray(initialCapacity, false); } /** - * Get the id associated with <code>key</code> + * Adds the given key to the hash table and returns its ordinal. + * If the key exists already, it returns (-1 - ordinal). */ - public long find(BytesRef key, int code) { - final long slot = slot(rehash(code), mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long id = id(index); - if (id == -1L || key.bytesEquals(get(id, spare))) { - return id; + public long add(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + final long val = fingerprint | size; + if (size >= grow) { + growAndInsert(hash, val); + } else { + table.set(idx, val); + } + return append(key, hash); + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return -1 - ordinal; } } } - /** Sugar for {@link #find(BytesRef, int) find(key, key.hashCode()} */ - public long find(BytesRef key) { - return find(key, key.hashCode()); - } + /** + * Returns the ordinal associated with the given key, or -1 if the key doesn't exist. + * + * <p> + * Using the 64-bit hash value, up to 32 least significant bits (LSB) are used to identify the + * home slot in the hash table, and an additional 32 bits are used to identify the fingerprint. + * The fingerprint further increases the entropy and reduces the number of false lookups in the + * keys' table during equality checks, which is expensive. + * + * <p> + * Total entropy bits = 32 + log2(capacity) + * + * <p> + * Linear probing starts from the home slot, until a match or an empty slot is found. + * Values are first checked using their fingerprint (to reduce false positives), then verified + * in the keys' table using an equality check. + */ + public long find(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; - private long set(BytesRef key, int code, long id) { - assert rehash(key.hashCode()) == code; - assert size < maxSize; - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - append(id, key, code); - ++size; - return id; - } else if (key.bytesEquals(get(curId, spare))) { - return -1 - curId; + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + return -1; + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return ordinal; } } } - private void append(long id, BytesRef key, int code) { - assert size == id; - final long startOffset = startOffsets.get(size); - bytes = bigArrays.grow(bytes, startOffset + key.length); - bytes.set(startOffset, key.bytes, key.offset, key.length); - startOffsets = bigArrays.grow(startOffsets, size + 2); - startOffsets.set(size + 1, startOffset + key.length); - hashes = bigArrays.grow(hashes, id + 1); - hashes.set(id, code); + /** + * Returns the key associated with the given ordinal. + * The result is undefined for an unused ordinal. + * + * <p> + * Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called + */ + public BytesRef get(final long ordinal, final BytesRef dest) { + final long start = offsets.get(ordinal); + final int length = (int) (offsets.get(ordinal + 1) - start); + keys.get(start, length, dest); + return dest; } - private boolean assertConsistent(long id, int code) { - get(id, spare); - return rehash(spare.hashCode()) == code; + /** + * Returns the number of mappings in this hash table. + */ + public long size() { + return size; } - private void reset(int code, long id) { - assert assertConsistent(id, code); - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - break; - } - } + /** + * Appends the key in the keys' and offsets' tables. + */ + private long append(final BytesRef key, final long hash) { + final long start = offsets.get(size); + final long end = start + key.length; + offsets = bigArrays.grow(offsets, size + 2); + offsets.set(size + 1, end); + keys = bigArrays.grow(keys, end); + keys.set(start, key.bytes, key.offset, key.length); + hashes = bigArrays.grow(hashes, size + 1); + hashes.set(size, hash); + return size++; } /** - * Try to add <code>key</code>. Return its newly allocated id if it wasn't in the hash table yet, or <code>-1-id</code> - * if it was already present in the hash table. + * Grows the hash table by doubling its capacity, inserting the provided value, + * and reinserting the previous values at their updated slots. */ - public long add(BytesRef key, int code) { - if (size >= maxSize) { - assert size == maxSize; - grow(); - } - assert size < maxSize; - return set(key, rehash(code), size); - } + private void growAndInsert(final long hash, final long value) { + // Ensure that the hash table doesn't grow too large. + // This implicitly also ensures that the ordinals are no larger than 2^32, thus, + // preventing them from polluting the fingerprint bits in the hash table values. + assert capacity < MAX_CAPACITY : "hash table already at the max capacity"; + + capacity <<= 1; + mask = capacity - 1; + grow = (long) (capacity * loadFactor); + table = bigArrays.grow(table, capacity); + table.fill(0, capacity, -1); + table.set(hash & mask, value); - /** Sugar to {@link #add(BytesRef, int) add(key, key.hashCode()}. */ - public long add(BytesRef key) { - return add(key, key.hashCode()); + for (long ordinal = 0; ordinal < size; ordinal++) { + reinsert(ordinal, hashes.get(ordinal)); + } } - @Override - protected void removeAndAdd(long index) { - final long id = id(index, -1); - assert id >= 0; - final int code = hashes.get(id); - reset(code, id); + /** + * Reinserts the hash table value for an existing key stored at the given ordinal. + */ + private void reinsert(final long ordinal, final long hash) { + for (long idx = hash & mask;; idx = (idx + 1) & mask) { + if (table.get(idx) == -1) { + table.set(idx, (hash & MASK_FINGERPRINT) | ordinal); + return; + } + } } @Override public void close() { - try (Releasable releasable = Releasables.wrap(bytes, hashes, startOffsets)) { - super.close(); - } + Releasables.close(table, offsets, keys, hashes); } + /** + * Hasher calculates the hash of a {@link BytesRef} key. + */ + @FunctionalInterface + public interface Hasher { + long hash(BytesRef key); + } } diff --git a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java index 8bc3ca3affb12..ea206ddf980cb 100644 --- a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java @@ -35,6 +35,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -45,16 +46,17 @@ * A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be * executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case * of cancellation. - * + * <p> * Cancellation policy: This class does not support external interruption via <code>Thread#interrupt()</code>. Always use #cancel() instead. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CancellableThreads { private final Set<Thread> threads = new HashSet<>(); // needs to be volatile as it is also read outside of synchronized blocks. - private volatile boolean cancelled = false; private final SetOnce<OnCancel> onCancel = new SetOnce<>(); + private volatile boolean cancelled = false; private String reason; public synchronized boolean isCancelled() { @@ -183,8 +185,9 @@ public synchronized void cancel(String reason) { /** * Interruptible interface * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Interruptible extends IOInterruptible { void run() throws InterruptedException; } @@ -192,8 +195,9 @@ public interface Interruptible extends IOInterruptible { /** * IO Interruptible * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface IOInterruptible { void run() throws IOException, InterruptedException; } @@ -224,9 +228,10 @@ public synchronized void setOnCancel(OnCancel onCancel) { /** * Called when a thread is cancelled * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface OnCancel { /** * Called when some running operations are cancelled or {@link #checkForCancel()} is explicitly called. diff --git a/server/src/main/java/org/opensearch/common/util/Countable.java b/server/src/main/java/org/opensearch/common/util/Countable.java index 1adf6d2fb015c..925d1b6066a99 100644 --- a/server/src/main/java/org/opensearch/common/util/Countable.java +++ b/server/src/main/java/org/opensearch/common/util/Countable.java @@ -32,11 +32,14 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; + /** * Base countable interface. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Countable { int size(); } diff --git a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java index 0c792b37ccfa9..28b55f70855d6 100644 --- a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java @@ -46,33 +46,33 @@ /** * An approximate set membership datastructure - * + * <p> * CuckooFilters are similar to Bloom Filters in usage; values are inserted, and the Cuckoo * can be asked if it has seen a particular value before. Because the structure is approximate, * it can return false positives (says it has seen an item when it has not). False negatives * are not possible though; if the structure says it _has not_ seen an item, that can be * trusted. - * + * <p> * The filter can "saturate" at which point the map has hit it's configured load factor (or near enough * that a large number of evictions are not able to find a free slot) and will refuse to accept * any new insertions. - * + * <p> * NOTE: this version does not support deletions, and as such does not save duplicate * fingerprints (e.g. when inserting, if the fingerprint is already present in the * candidate buckets, it is not inserted). By not saving duplicates, the CuckooFilter * loses the ability to delete values. But not by allowing deletions, we can save space * (do not need to waste slots on duplicate fingerprints), and we do not need to worry * about inserts "overflowing" a bucket because the same item has been repeated repeatedly - * + * <p> * NOTE: this CuckooFilter exposes a number of Expert APIs which assume the caller has * intimate knowledge about how the algorithm works. It is recommended to use * {@link SetBackedScalingCuckooFilter} instead. - * + * <p> * Based on the paper: - * + * <p> * Fan, Bin, et al. "Cuckoo filter: Practically better than bloom." * Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. - * + * <p> * https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf * * @opensearch.internal @@ -200,7 +200,7 @@ public int getCount() { /** * Returns the number of buckets that has been chosen based * on the initial configuration - * + * <p> * Expert-level API */ int getNumBuckets() { @@ -209,7 +209,7 @@ int getNumBuckets() { /** * Returns the number of bits used per entry - * + * <p> * Expert-level API */ int getBitsPerEntry() { @@ -220,7 +220,7 @@ int getBitsPerEntry() { * Returns the cached fingerprint mask. This is simply a mask for the * first bitsPerEntry bits, used by {@link CuckooFilter#fingerprint(int, int, int)} * to generate the fingerprint of a hash - * + * <p> * Expert-level API */ int getFingerprintMask() { @@ -230,7 +230,7 @@ int getFingerprintMask() { /** * Returns an iterator that returns the long[] representation of each bucket. The value * inside each long will be a fingerprint (or 0L, representing empty). - * + * <p> * Expert-level API */ Iterator<long[]> getBuckets() { @@ -267,7 +267,7 @@ boolean mightContain(long hash) { /** * Returns true if the bucket or it's alternate bucket contains the fingerprint. - * + * <p> * Expert-level API, use {@link CuckooFilter#mightContain(long)} to check if * a value is in the filter. */ @@ -307,7 +307,7 @@ boolean add(long hash) { /** * Attempts to merge the fingerprint into the specified bucket or it's alternate bucket. * Returns true if the insertion was successful, false if the filter is saturated. - * + * <p> * Expert-level API, use {@link CuckooFilter#add(long)} to insert * values into the filter */ @@ -351,7 +351,7 @@ boolean mergeFingerprint(int bucket, int fingerprint) { * Low-level insert method. Attempts to write the fingerprint into an empty entry * at this bucket's position. Returns true if that was sucessful, false if all entries * were occupied. - * + * <p> * If the fingerprint already exists in one of the entries, it will not duplicate the * fingerprint like the original paper. This means the filter _cannot_ support deletes, * but is not sensitive to "overflowing" buckets with repeated inserts @@ -376,10 +376,10 @@ private boolean tryInsert(int bucket, int fingerprint) { /** * Converts a hash into a bucket index (primary or alternate). - * + * <p> * If the hash is negative, this flips the bits. The hash is then modulo numBuckets * to get the final index. - * + * <p> * Expert-level API */ static int hashToIndex(int hash, int numBuckets) { @@ -388,16 +388,16 @@ static int hashToIndex(int hash, int numBuckets) { /** * Calculates the alternate bucket for a given bucket:fingerprint tuple - * + * <p> * The alternate bucket is the fingerprint multiplied by a mixing constant, * then xor'd against the bucket. This new value is modulo'd against * the buckets via {@link CuckooFilter#hashToIndex(int, int)} to get the final * index. - * + * <p> * Note that the xor makes this operation reversible as long as we have the * fingerprint and current bucket (regardless of if that bucket was the primary * or alternate). - * + * <p> * Expert-level API */ static int alternateIndex(int bucket, int fingerprint, int numBuckets) { @@ -424,10 +424,10 @@ private int getOffset(int bucket, int position) { /** * Calculates the fingerprint for a given hash. - * + * <p> * The fingerprint is simply the first `bitsPerEntry` number of bits that are non-zero. * If the entire hash is zero, `(int) 1` is used - * + * <p> * Expert-level API */ static int fingerprint(int hash, int bitsPerEntry, int fingerprintMask) { @@ -501,7 +501,7 @@ private double getLoadFactor(int b) { * Calculates the optimal number of buckets for this filter. The xor used in the bucketing * algorithm requires this to be a power of two, so the optimal number of buckets will * be rounded to the next largest power of two where applicable. - * + * <p> * TODO: there are schemes to avoid powers of two, might want to investigate those */ private int getNumBuckets(long capacity, double loadFactor, int b) { diff --git a/server/src/main/java/org/opensearch/common/util/DoubleArray.java b/server/src/main/java/org/opensearch/common/util/DoubleArray.java index ae66902f6f795..aa9238634f2c7 100644 --- a/server/src/main/java/org/opensearch/common/util/DoubleArray.java +++ b/server/src/main/java/org/opensearch/common/util/DoubleArray.java @@ -32,13 +32,15 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.util.BigArray; /** * Abstraction of an array of double values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DoubleArray extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index e2663b56c5cca..9e202a5bfd143 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,18 +20,10 @@ * @opensearch.internal */ public class FeatureFlags { - - /** - * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. - */ - public static final String SEGMENT_REPLICATION_EXPERIMENTAL = - "opensearch.experimental.feature.segment_replication_experimental.enabled"; - /** - * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. - * Once the feature is ready for production release, this feature flag can be removed. + * Gates the visibility of the remote store migration support from docrep . */ - public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + public static final String REMOTE_STORE_MIGRATION_EXPERIMENTAL = "opensearch.experimental.feature.remote_store.migration.enabled"; /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the @@ -52,15 +44,31 @@ public class FeatureFlags { public static final String IDENTITY = "opensearch.experimental.feature.identity.enabled"; /** - * Gates the functionality of concurrently searching the segments + * Gates the functionality of telemetry framework. + */ + public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + + /** + * Gates the optimization of datetime formatters caching along with change in default datetime formatter. + */ + public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; + + /** + * Gates the functionality of writeable remote index * Once the feature is ready for release, this feature flag can be removed. */ - public static final String CONCURRENT_SEGMENT_SEARCH = "opensearch.experimental.feature.concurrent_segment_search.enabled"; + public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; /** - * Gates the functionality of telemetry framework. + * Gates the optimization to enable bloom filters for doc id lookup. */ - public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + public static final String DOC_ID_FUZZY_SET = "opensearch.experimental.optimize_doc_id_lookup.fuzzy_set.enabled"; + + /** + * Gates the functionality of pluggable cache. + * Enables OpenSearch to use pluggable caches with respective store names via setting. + */ + public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; /** * Should store the settings from opensearch.yml. @@ -90,23 +98,42 @@ public static boolean isEnabled(String featureFlagName) { return settings != null && settings.getAsBoolean(featureFlagName, false); } - public static final Setting<Boolean> SEGMENT_REPLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - SEGMENT_REPLICATION_EXPERIMENTAL, + public static boolean isEnabled(Setting<Boolean> featureFlag) { + if ("true".equalsIgnoreCase(System.getProperty(featureFlag.getKey()))) { + // TODO: Remove the if condition once FeatureFlags are only supported via opensearch.yml + return true; + } else if (settings != null) { + return featureFlag.get(settings); + } else { + return featureFlag.getDefault(Settings.EMPTY); + } + } + + public static final Setting<Boolean> REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, false, Property.NodeScope ); - public static final Setting<Boolean> REMOTE_STORE_SETTING = Setting.boolSetting(REMOTE_STORE, false, Property.NodeScope); - public static final Setting<Boolean> EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting<Boolean> IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); public static final Setting<Boolean> TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - public static final Setting<Boolean> CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( - CONCURRENT_SEGMENT_SEARCH, + public static final Setting<Boolean> DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); + + public static final Setting<Boolean> WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( + WRITEABLE_REMOTE_INDEX, false, Property.NodeScope ); + + public static final Setting<Boolean> DOC_ID_FUZZY_SET_SETTING = Setting.boolSetting(DOC_ID_FUZZY_SET, false, Property.NodeScope); + + public static final Setting<Boolean> PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); } diff --git a/server/src/main/java/org/opensearch/common/util/FloatArray.java b/server/src/main/java/org/opensearch/common/util/FloatArray.java index 81f6bebb7508c..bda7324d19273 100644 --- a/server/src/main/java/org/opensearch/common/util/FloatArray.java +++ b/server/src/main/java/org/opensearch/common/util/FloatArray.java @@ -32,13 +32,15 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.util.BigArray; /** * Abstraction of an array of double values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface FloatArray extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/IntArray.java b/server/src/main/java/org/opensearch/common/util/IntArray.java index eb43e013f3758..ad0173904f74a 100644 --- a/server/src/main/java/org/opensearch/common/util/IntArray.java +++ b/server/src/main/java/org/opensearch/common/util/IntArray.java @@ -32,13 +32,15 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.util.BigArray; /** * Abstraction of an array of integer values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IntArray extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/LongArray.java b/server/src/main/java/org/opensearch/common/util/LongArray.java index 0c32effd04cdc..c2f649e49f470 100644 --- a/server/src/main/java/org/opensearch/common/util/LongArray.java +++ b/server/src/main/java/org/opensearch/common/util/LongArray.java @@ -32,13 +32,15 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.util.BigArray; /** * Abstraction of an array of long values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LongArray extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/ObjectArray.java b/server/src/main/java/org/opensearch/common/util/ObjectArray.java index 9c882742b56ab..f3f73b7e37485 100644 --- a/server/src/main/java/org/opensearch/common/util/ObjectArray.java +++ b/server/src/main/java/org/opensearch/common/util/ObjectArray.java @@ -32,13 +32,15 @@ package org.opensearch.common.util; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.util.BigArray; /** * Abstraction of an array of object values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ObjectArray<T> extends BigArray { /** diff --git a/server/src/main/java/org/opensearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/opensearch/common/util/PageCacheRecycler.java index f8c690471fb61..b6fd385d25082 100644 --- a/server/src/main/java/org/opensearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/opensearch/common/util/PageCacheRecycler.java @@ -33,14 +33,14 @@ package org.opensearch.common.util; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.core.common.bytes.PagedBytesReference; import org.opensearch.common.recycler.AbstractRecyclerC; import org.opensearch.common.recycler.Recycler; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.bytes.PagedBytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; import java.util.Arrays; import java.util.Locale; diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 417eb6a316d86..fe053a26329e4 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -8,7 +8,10 @@ package org.opensearch.common.util; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; /** * Specialized hash table implementation that maps a (primitive) long to long. @@ -24,6 +27,7 @@ * * @opensearch.internal */ +@InternalApi public class ReorganizingLongHash implements Releasable { private static final long MAX_CAPACITY = 1L << 32; private static final long DEFAULT_INITIAL_CAPACITY = 32; @@ -109,14 +113,22 @@ public ReorganizingLongHash(final long initialCapacity, final float loadFactor, this.bigArrays = bigArrays; this.loadFactor = loadFactor; - capacity = nextPowerOfTwo((long) (initialCapacity / loadFactor)); + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; mask = capacity - 1; grow = (long) (capacity * loadFactor); size = 0; - - table = bigArrays.newLongArray(capacity, false); - table.fill(0, capacity, -1); // -1 represents an empty slot - keys = bigArrays.newLongArray(initialCapacity, false); + try { + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); // -1 represents an empty slot + keys = bigArrays.newLongArray(initialCapacity, false); + } finally { + if (table == null || keys == null) { + // it's important to close the arrays initialized above to prevent memory leak + // refer: https://github.com/opensearch-project/OpenSearch/issues/10154 + Releasables.closeWhileHandlingException(table, keys); + } + } } /** @@ -296,11 +308,6 @@ private void grow() { @Override public void close() { - table.close(); - keys.close(); - } - - private static long nextPowerOfTwo(final long value) { - return Math.max(1, Long.highestOneBit(value - 1) << 1); + Releasables.close(table, keys); } } diff --git a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java index e99eb751babe8..a635160844159 100644 --- a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java @@ -75,7 +75,7 @@ public class SetBackedScalingCuckooFilter implements Writeable { * This set is used to track the insertions before we convert over to an approximate * filter. This gives us 100% accuracy for small cardinalities. This will be null * if isSetMode = false; - * + * <p> * package-private for testing */ Set<Long> hashes; @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * Registers a circuit breaker with the datastructure. - * + * <p> * CuckooFilter's can "saturate" and refuse to accept any new values. When this happens, * the datastructure scales by adding a new filter. This new filter's bytes will be tracked * in the registered breaker when configured. diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java index d2e7e836bf07f..a9ebb86eed8a2 100644 --- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -20,7 +20,7 @@ public class TokenBucket { /** * Defines a monotonically increasing counter. - * + * <p> * Usage examples: * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations diff --git a/server/src/main/java/org/opensearch/common/util/URIPattern.java b/server/src/main/java/org/opensearch/common/util/URIPattern.java index a3c385e5ea660..49e4b53e20740 100644 --- a/server/src/main/java/org/opensearch/common/util/URIPattern.java +++ b/server/src/main/java/org/opensearch/common/util/URIPattern.java @@ -39,9 +39,9 @@ /** * URI Pattern matcher - * + * <p> * The pattern is URI in which authority, path, query and fragment can be replace with simple pattern. - * + * <p> * For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain * with any port, with path that starts some_path and with any query and fragment. * diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnable.java b/server/src/main/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnable.java index b55280d43a473..12fe437b390bf 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnable.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnable.java @@ -32,7 +32,7 @@ package org.opensearch.common.util.concurrent; import org.apache.logging.log4j.Logger; -import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.lifecycle.Lifecycle; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/AsyncIOProcessor.java b/server/src/main/java/org/opensearch/common/util/concurrent/AsyncIOProcessor.java index e9b9442c555e5..7f6fef83b34a6 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/AsyncIOProcessor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/AsyncIOProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.common.util.concurrent; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import java.io.IOException; @@ -50,8 +51,9 @@ * hijack a worker if nobody else is currently processing queued items. If the internal queue has reached it's capacity incoming threads * might be blocked until other items are processed * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AsyncIOProcessor<Item> { private final Logger logger; private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/AtomicArray.java b/server/src/main/java/org/opensearch/common/util/concurrent/AtomicArray.java index dd1f71a7d2166..4c599aeae7a7c 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/AtomicArray.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/AtomicArray.java @@ -33,6 +33,7 @@ package org.opensearch.common.util.concurrent; import org.opensearch.OpenSearchGenerationException; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.Collections; @@ -43,8 +44,9 @@ * A list backed by an {@link AtomicReferenceArray} with potential null values, easily allowing * to get the concrete values as a list using {@link #asList()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AtomicArray<E> { private final AtomicReferenceArray<E> array; private volatile List<E> nonNullList; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java index 7079aa705d126..be2029b2e7c62 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java @@ -92,4 +92,8 @@ private TimeValue getBufferInterval() { protected abstract String getBufferProcessThreadPoolName(); + // Exclusively for testing, please do not use it elsewhere. + public Supplier<TimeValue> getBufferIntervalSupplier() { + return bufferIntervalSupplier; + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java index cc865022e1e8a..4357254176358 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java @@ -32,10 +32,10 @@ package org.opensearch.common.util.concurrent; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.List; @@ -73,7 +73,7 @@ public void addListener(ActionListener<V> listener, ExecutorService executor) { * notified of a response or exception in a runnable submitted to the ExecutorService provided. * If the future has completed, the listener will be notified immediately without forking to * a different thread. - * + * <p> * It will apply the provided ThreadContext (if not null) when executing the listening. */ public void addListener(ActionListener<V> listener, ExecutorService executor, ThreadContext threadContext) { diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index ec1024bbe5f30..6e45c3fb7b58d 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -393,6 +393,7 @@ static class OpenSearchThreadFactory implements ThreadFactory { final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + @SuppressWarnings("removal") OpenSearchThreadFactory(String namePrefix) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); @@ -446,6 +447,30 @@ public boolean offer(E e) { } } + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public void put(E e) { + super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + return super.offer(e); + } + + /** + * Workaround for https://bugs.openjdk.org/browse/JDK-8323659 regression, introduced in JDK-21.0.2. + */ + @Override + public boolean add(E e) { + return super.offer(e); + } + } /** diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java index d967b7423ca80..afffec4790873 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java @@ -205,4 +205,15 @@ protected Runnable wrapRunnable(Runnable command) { protected Runnable unwrap(Runnable runnable) { return contextHolder.unwrap(runnable); } + + /** + * Returns the cumulative wait time of the ThreadPool. If the ThreadPool does not support tracking the cumulative pool wait time + * then this should return -1 which will prevent the value from showing up in {@link org.opensearch.threadpool.ThreadPoolStats}. + * ThreadPools that do support this metric should override this method. For example, {@link QueueResizingOpenSearchThreadPoolExecutor} + * does so using the {@link TimedRunnable} to get the difference between Runnable creation and execution. + * + */ + public long getPoolWaitTimeNanos() { + return -1; + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java index b4673d9534922..95df4486b9d7b 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java @@ -118,8 +118,9 @@ private void addPending(List<Runnable> runnables, List<Pending> pending, boolean TieBreakingPrioritizedRunnable t = (TieBreakingPrioritizedRunnable) runnable; Runnable innerRunnable = t.runnable; if (innerRunnable != null) { - /** innerRunnable can be null if task is finished but not removed from executor yet, - * see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} + /* + innerRunnable can be null if task is finished but not removed from executor yet, + see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} */ pending.add(new Pending(super.unwrap(innerRunnable), t.priority(), t.insertionOrder, executing)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizableOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizableOpenSearchThreadPoolExecutor.java index 7a0ce8244efe4..c06184f5a5483 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizableOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizableOpenSearchThreadPoolExecutor.java @@ -9,6 +9,7 @@ package org.opensearch.common.util.concurrent; import org.opensearch.common.ExponentiallyWeightedMovingAverage; +import org.opensearch.common.metrics.CounterMetric; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -27,6 +28,7 @@ public final class QueueResizableOpenSearchThreadPoolExecutor extends OpenSearch private final ResizableBlockingQueue<Runnable> workQueue; private final Function<Runnable, WrappedRunnable> runnableWrapper; private final ExponentiallyWeightedMovingAverage executionEWMA; + private final CounterMetric poolWaitTime; /** * Create new resizable at runtime thread pool executor @@ -101,6 +103,7 @@ public final class QueueResizableOpenSearchThreadPoolExecutor extends OpenSearch this.workQueue = workQueue; this.runnableWrapper = runnableWrapper; this.executionEWMA = new ExponentiallyWeightedMovingAverage(ewmaAlpha, 0); + this.poolWaitTime = new CounterMetric(); } @Override @@ -156,6 +159,7 @@ protected void afterExecute(Runnable r, Throwable t) { // taskExecutionNanos may be -1 if the task threw an exception executionEWMA.addValue(taskExecutionNanos); } + poolWaitTime.inc(timedRunnable.getWaitTimeNanos()); } /** @@ -173,4 +177,9 @@ public synchronized int resize(int capacity) { capacity ); } + + @Override + public long getPoolWaitTimeNanos() { + return poolWaitTime.count(); + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java index 336c605e1a590..0c0b437e4f390 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java @@ -32,10 +32,11 @@ package org.opensearch.common.util.concurrent; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.ExponentiallyWeightedMovingAverage; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; import java.util.Locale; @@ -66,6 +67,7 @@ public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchT private final int maxQueueSize; private final long targetedResponseTimeNanos; private final ExponentiallyWeightedMovingAverage executionEWMA; + private final CounterMetric poolWaitTime; private final AtomicLong totalTaskNanos = new AtomicLong(0); private final AtomicInteger taskCount = new AtomicInteger(0); @@ -97,6 +99,7 @@ public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchT this.maxQueueSize = maxQueueSize; this.targetedResponseTimeNanos = targetedResponseTime.getNanos(); this.executionEWMA = new ExponentiallyWeightedMovingAverage(EWMA_ALPHA, 0); + this.poolWaitTime = new CounterMetric(); logger.debug( "thread pool [{}] will adjust queue by [{}] when determining automatic queue size", getName(), @@ -190,6 +193,7 @@ protected void afterExecute(Runnable r, Throwable t) { // taskExecutionNanos may be -1 if the task threw an exception executionEWMA.addValue(taskExecutionNanos); } + poolWaitTime.inc(timedRunnable.getWaitTimeNanos()); if (taskCount.incrementAndGet() == this.tasksPerFrame) { final long endTimeNs = System.nanoTime(); @@ -290,4 +294,8 @@ protected void appendThreadPoolExecutorDetails(StringBuilder sb) { sb.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", "); } + @Override + public long getPoolWaitTimeNanos() { + return poolWaitTime.count(); + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ReleasableLock.java b/server/src/main/java/org/opensearch/common/util/concurrent/ReleasableLock.java index cd87b58e87180..7c0368793ca27 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ReleasableLock.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ReleasableLock.java @@ -32,9 +32,9 @@ package org.opensearch.common.util.concurrent; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.Assertions; -import org.opensearch.common.lease.Releasable; import org.opensearch.index.engine.EngineException; import java.util.concurrent.locks.Lock; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 1e1d70f1468dd..6580b0e0085ef 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -35,14 +35,15 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.client.OriginSettingClient; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.http.HttpTransportSettings; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskThreadContextStatePropagator; @@ -97,8 +98,9 @@ * // previous context is restored on StoredContext#close() * </pre> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ThreadContext implements Writeable { public static final String PREFIX = "request.headers"; @@ -143,10 +145,10 @@ public void unregisterThreadContextStatePropagator(final ThreadContextStatePropa */ public StoredContext stashContext() { final ThreadContextStruct context = threadLocal.get(); - /** - * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. - * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. - * Otherwise when context is stash, it should be empty. + /* + X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + Otherwise when context is stash, it should be empty. */ ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putPersistent(context.persistentHeaders); @@ -159,7 +161,7 @@ public StoredContext stashContext() { ); } - final Map<String, Object> transientHeaders = propagateTransients(context.transientHeaders); + final Map<String, Object> transientHeaders = propagateTransients(context.transientHeaders, context.isSystemContext); if (!transientHeaders.isEmpty()) { threadContextStruct = threadContextStruct.putTransient(transientHeaders); } @@ -180,7 +182,7 @@ public StoredContext stashContext() { public Writeable captureAsWriteable() { final ThreadContextStruct context = threadLocal.get(); return out -> { - final Map<String, String> propagatedHeaders = propagateHeaders(context.transientHeaders); + final Map<String, String> propagatedHeaders = propagateHeaders(context.transientHeaders, context.isSystemContext); context.writeTo(out, defaultHeader, propagatedHeaders); }; } @@ -243,7 +245,7 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collectio final Map<String, Object> newTransientHeaders = new HashMap<>(originalContext.transientHeaders); boolean transientHeadersModified = false; - final Map<String, Object> transientHeaders = propagateTransients(originalContext.transientHeaders); + final Map<String, Object> transientHeaders = propagateTransients(originalContext.transientHeaders, originalContext.isSystemContext); if (!transientHeaders.isEmpty()) { newTransientHeaders.putAll(transientHeaders); transientHeadersModified = true; @@ -320,7 +322,7 @@ public Supplier<StoredContext> wrapRestorable(StoredContext storedContext) { @Override public void writeTo(StreamOutput out) throws IOException { final ThreadContextStruct context = threadLocal.get(); - final Map<String, String> propagatedHeaders = propagateHeaders(context.transientHeaders); + final Map<String, String> propagatedHeaders = propagateHeaders(context.transientHeaders, context.isSystemContext); context.writeTo(out, defaultHeader, propagatedHeaders); } @@ -532,7 +534,7 @@ boolean isDefaultContext() { * by the system itself rather than by a user action. */ public void markAsSystemContext() { - threadLocal.set(threadLocal.get().setSystemContext()); + threadLocal.set(threadLocal.get().setSystemContext(propagators)); } /** @@ -545,9 +547,10 @@ public boolean isSystemContext() { /** * A stored context * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface StoredContext extends AutoCloseable { @Override void close(); @@ -570,15 +573,15 @@ public static Map<String, String> buildDefaultHeaders(Settings settings) { } } - private Map<String, Object> propagateTransients(Map<String, Object> source) { + private Map<String, Object> propagateTransients(Map<String, Object> source, boolean isSystemContext) { final Map<String, Object> transients = new HashMap<>(); - propagators.forEach(p -> transients.putAll(p.transients(source))); + propagators.forEach(p -> transients.putAll(p.transients(source, isSystemContext))); return transients; } - private Map<String, String> propagateHeaders(Map<String, Object> source) { + private Map<String, String> propagateHeaders(Map<String, Object> source, boolean isSystemContext) { final Map<String, String> headers = new HashMap<>(); - propagators.forEach(p -> headers.putAll(p.headers(source))); + propagators.forEach(p -> headers.putAll(p.headers(source, isSystemContext))); return headers; } @@ -600,11 +603,13 @@ private static final class ThreadContextStruct { // saving current warning headers' size not to recalculate the size with every new warning header private final long warningHeadersSize; - private ThreadContextStruct setSystemContext() { + private ThreadContextStruct setSystemContext(final List<ThreadContextStatePropagator> propagators) { if (isSystemContext) { return this; } - return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, persistentHeaders, true); + final Map<String, Object> transients = new HashMap<>(); + propagators.forEach(p -> transients.putAll(p.transients(transientHeaders, true))); + return new ThreadContextStruct(requestHeaders, responseHeaders, transients, persistentHeaders, true); } private ThreadContextStruct( diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java index b3fc79c5446db..e8c12ae13d5eb 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java @@ -8,24 +8,55 @@ package org.opensearch.common.util.concurrent; +import org.opensearch.common.annotation.PublicApi; + import java.util.Map; /** * The propagator for {@link ThreadContext} that helps to carry-over the state from one * thread to another (tasks, tracing context, etc). + * + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface ThreadContextStatePropagator { /** * Returns the list of transient headers that needs to be propagated from current context to new thread context. - * @param source current context transient headers + * + * @param source current context transient headers * @return the list of transient headers that needs to be propagated from current context to new thread context */ + @Deprecated(since = "2.12.0", forRemoval = true) Map<String, Object> transients(Map<String, Object> source); + /** + * Returns the list of transient headers that needs to be propagated from current context to new thread context. + * + * @param source current context transient headers + * @param isSystemContext if the propagation is for system context. + * @return the list of transient headers that needs to be propagated from current context to new thread context + */ + default Map<String, Object> transients(Map<String, Object> source, boolean isSystemContext) { + return transients(source); + }; + /** * Returns the list of request headers that needs to be propagated from current context to request. - * @param source current context headers + * + * @param source current context headers * @return the list of request headers that needs to be propagated from current context to request */ + @Deprecated(since = "2.12.0", forRemoval = true) Map<String, String> headers(Map<String, Object> source); + + /** + * Returns the list of request headers that needs to be propagated from current context to request. + * + * @param source current context headers + * @param isSystemContext if the propagation is for system context. + * @return the list of request headers that needs to be propagated from current context to request + */ + default Map<String, String> headers(Map<String, Object> source, boolean isSystemContext) { + return headers(source); + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java b/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java index f3bc50a33453b..2eb6657898008 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java @@ -107,6 +107,14 @@ long getTotalExecutionNanos() { return Math.max(finishTimeNanos - startTimeNanos, 1); } + long getWaitTimeNanos() { + if (startTimeNanos == -1) { + // There must have been an exception thrown, the total time is unknown (-1) + return -1; + } + return Math.max(startTimeNanos - creationTimeNanos, 1); + } + /** * If the task was failed or rejected, return true. * Otherwise, false. diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 7510c712e3b4b..9b2bd06a88e2e 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -8,15 +8,17 @@ package org.opensearch.common.xcontent; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.ParseContext; + import java.io.IOException; import java.math.BigInteger; import java.nio.CharBuffer; @@ -37,7 +39,6 @@ public class JsonToStringXContentParser extends AbstractXContentParser { private ArrayList<String> keyList = new ArrayList<>(); private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); - private ParseContext parseContext; private NamedXContentRegistry xContentRegistry; @@ -51,14 +52,13 @@ public class JsonToStringXContentParser extends AbstractXContentParser { public JsonToStringXContentParser( NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, - ParseContext parseContext, + XContentParser parser, String fieldTypeName ) throws IOException { super(xContentRegistry, deprecationHandler); - this.parseContext = parseContext; this.deprecationHandler = deprecationHandler; this.xContentRegistry = xContentRegistry; - this.parser = parseContext.parser(); + this.parser = parser; this.fieldTypeName = fieldTypeName; } @@ -70,7 +70,7 @@ public XContentParser parseObject() throws IOException { builder.field(this.fieldTypeName + VALUE_SUFFIX, valueList); builder.field(this.fieldTypeName + VALUE_AND_PATH_SUFFIX, valueAndPathList); builder.endObject(); - String jString = XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON); + String jString = XContentHelper.convertToJson(BytesReference.bytes(builder), false, MediaTypeRegistry.JSON); return JsonXContent.jsonXContent.createParser(this.xContentRegistry, this.deprecationHandler, String.valueOf(jString)); } @@ -83,8 +83,22 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx StringBuilder parsedFields = new StringBuilder(); if (this.parser.currentToken() == Token.FIELD_NAME) { - path.append(DOT_SYMBOL + currentFieldName); - this.keyList.add(currentFieldName); + path.append(DOT_SYMBOL).append(currentFieldName); + int dotIndex = currentFieldName.indexOf(DOT_SYMBOL); + String fieldNameSuffix = currentFieldName; + // The field name may be of the form foo.bar.baz + // If that's the case, each "part" is a key. + while (dotIndex >= 0) { + String fieldNamePrefix = fieldNameSuffix.substring(0, dotIndex); + if (!fieldNamePrefix.isEmpty()) { + this.keyList.add(fieldNamePrefix); + } + fieldNameSuffix = fieldNameSuffix.substring(dotIndex + 1); + dotIndex = fieldNameSuffix.indexOf(DOT_SYMBOL); + } + if (!fieldNameSuffix.isEmpty()) { + this.keyList.add(fieldNameSuffix); + } } else if (this.parser.currentToken() == Token.START_ARRAY) { parseToken(path, currentFieldName); break; @@ -94,18 +108,18 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx parseToken(path, currentFieldName); int dotIndex = path.lastIndexOf(DOT_SYMBOL); if (dotIndex != -1) { - path.delete(dotIndex, path.length()); + path.setLength(path.length() - currentFieldName.length() - 1); } } else { if (!path.toString().contains(currentFieldName)) { - path.append(DOT_SYMBOL + currentFieldName); + path.append(DOT_SYMBOL).append(currentFieldName); } parseValue(parsedFields); this.valueList.add(parsedFields.toString()); this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); int dotIndex = path.lastIndexOf(DOT_SYMBOL); if (dotIndex != -1) { - path.delete(dotIndex, path.length()); + path.setLength(path.length() - currentFieldName.length() - 1); } } @@ -132,8 +146,8 @@ private void parseValue(StringBuilder parsedFields) throws IOException { } @Override - public XContentType contentType() { - return XContentType.JSON; + public MediaType contentType() { + return MediaTypeRegistry.JSON; } @Override diff --git a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java index cf09550ee0d9f..05fc968737394 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent; -import org.opensearch.core.ParseField; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentLocation; @@ -54,7 +54,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler(); /** * The logger to which to send deprecation messages. - * + * <p> * This uses ParseField's logger because that is the logger that * we have been using for many releases for deprecated fields. * Changing that will require some research to make super duper diff --git a/server/src/main/java/org/opensearch/common/xcontent/ParseFieldRegistry.java b/server/src/main/java/org/opensearch/common/xcontent/ParseFieldRegistry.java index 9770f41bce8ff..b480ceaecedb6 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/ParseFieldRegistry.java +++ b/server/src/main/java/org/opensearch/common/xcontent/ParseFieldRegistry.java @@ -32,9 +32,9 @@ package org.opensearch.common.xcontent; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.collect.Tuple; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentLocation; diff --git a/server/src/main/java/org/opensearch/common/xcontent/StatusToXContentObject.java b/server/src/main/java/org/opensearch/common/xcontent/StatusToXContentObject.java index 475f6b46555aa..32e24bbbef63b 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/StatusToXContentObject.java +++ b/server/src/main/java/org/opensearch/common/xcontent/StatusToXContentObject.java @@ -31,8 +31,8 @@ package org.opensearch.common.xcontent; -import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContentObject; /** * Objects that can both render themselves in as json/yaml/etc and can provide a {@link RestStatus} for their response. Usually should be diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java index ae1fb0724fd9e..17bb0a1de267b 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java @@ -33,14 +33,14 @@ package org.opensearch.common.xcontent; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.compress.Compressor; -import org.opensearch.common.compress.CompressorFactory; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContent.Params; @@ -52,7 +52,6 @@ import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; -import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -78,7 +77,7 @@ public static XContentParser createParser( DeprecationHandler deprecationHandler, BytesReference bytes ) throws IOException { - Compressor compressor = CompressorFactory.compressor(bytes); + Compressor compressor = CompressorRegistry.compressor(bytes); if (compressor != null) { InputStream compressedInput = null; try { @@ -86,14 +85,14 @@ public static XContentParser createParser( if (compressedInput.markSupported() == false) { compressedInput = new BufferedInputStream(compressedInput); } - final XContentType contentType = XContentFactory.xContentType(compressedInput); - return XContentFactory.xContent(contentType).createParser(xContentRegistry, deprecationHandler, compressedInput); + final MediaType contentType = MediaTypeRegistry.xContentType(compressedInput); + return contentType.xContent().createParser(xContentRegistry, deprecationHandler, compressedInput); } catch (Exception e) { if (compressedInput != null) compressedInput.close(); throw e; } } else { - return XContentFactory.xContent(xContentType(bytes)).createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); + return MediaTypeRegistry.xContentType(bytes).xContent().createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); } } @@ -107,7 +106,7 @@ public static XContentParser createParser( MediaType mediaType ) throws IOException { Objects.requireNonNull(mediaType); - Compressor compressor = CompressorFactory.compressor(bytes); + Compressor compressor = CompressorRegistry.compressor(bytes); if (compressor != null) { InputStream compressedInput = null; try { @@ -115,7 +114,7 @@ public static XContentParser createParser( if (compressedInput.markSupported() == false) { compressedInput = new BufferedInputStream(compressedInput); } - return XContentFactory.xContent(mediaType).createParser(xContentRegistry, deprecationHandler, compressedInput); + return mediaType.xContent().createParser(xContentRegistry, deprecationHandler, compressedInput); } catch (Exception e) { if (compressedInput != null) compressedInput.close(); throw e; @@ -159,15 +158,12 @@ public static Tuple<XContentType, Map<String, Object>> convertToMap(BytesReferen /** * Converts the given bytes into a map that is optionally ordered. The provided {@link XContentType} must be non-null. */ - public static Tuple<? extends MediaType, Map<String, Object>> convertToMap( - BytesReference bytes, - boolean ordered, - MediaType xContentType - ) throws OpenSearchParseException { + public static Tuple<? extends MediaType, Map<String, Object>> convertToMap(BytesReference bytes, boolean ordered, MediaType mediaType) + throws OpenSearchParseException { try { final MediaType contentType; InputStream input; - Compressor compressor = CompressorFactory.compressor(bytes); + Compressor compressor = CompressorRegistry.compressor(bytes); if (compressor != null) { InputStream compressedStreamInput = compressor.threadLocalInputStream(bytes.streamInput()); if (compressedStreamInput.markSupported() == false) { @@ -179,20 +175,14 @@ public static Tuple<? extends MediaType, Map<String, Object>> convertToMap( final byte[] raw = arr.array(); final int offset = arr.offset(); final int length = arr.length(); - contentType = xContentType != null ? xContentType : XContentFactory.xContentType(raw, offset, length); - return new Tuple<>( - Objects.requireNonNull(contentType), - convertToMap(XContentFactory.xContent(contentType), raw, offset, length, ordered) - ); + contentType = mediaType != null ? mediaType : MediaTypeRegistry.mediaTypeFromBytes(raw, offset, length); + return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(contentType.xContent(), raw, offset, length, ordered)); } else { input = bytes.streamInput(); } try (InputStream stream = input) { - contentType = xContentType != null ? xContentType : XContentFactory.xContentType(stream); - return new Tuple<>( - Objects.requireNonNull(contentType), - convertToMap(XContentFactory.xContent(contentType), stream, ordered) - ); + contentType = mediaType != null ? mediaType : MediaTypeRegistry.xContentType(stream); + return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(contentType.xContent(), stream, ordered)); } } catch (IOException e) { throw new OpenSearchParseException("Failed to parse content to map", e); @@ -266,11 +256,11 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson) t @Deprecated public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException { - return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes.toBytesRef().bytes)); + return convertToJson(bytes, reformatJson, prettyPrint, MediaTypeRegistry.xContent(bytes.toBytesRef().bytes)); } - public static String convertToJson(BytesReference bytes, boolean reformatJson, MediaType xContentType) throws IOException { - return convertToJson(bytes, reformatJson, false, xContentType); + public static String convertToJson(BytesReference bytes, boolean reformatJson, MediaType mediaType) throws IOException { + return convertToJson(bytes, reformatJson, false, mediaType); } /** @@ -283,7 +273,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, M * @throws IOException if the reformatting fails, e.g. because the JSON is not well-formed */ public static String stripWhitespace(String json) throws IOException { - return convertToJson(new BytesArray(json), true, XContentType.JSON); + return convertToJson(new BytesArray(json), true, MediaTypeRegistry.JSON); } /** @@ -303,7 +293,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint, MediaType mediaType) throws IOException { Objects.requireNonNull(mediaType); - if (mediaType == XContentType.JSON && !reformatJson) { + if (mediaType == MediaTypeRegistry.JSON && !reformatJson) { return bytes.utf8ToString(); } @@ -311,7 +301,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b if (bytes instanceof BytesArray) { final BytesArray array = (BytesArray) bytes; try ( - XContentParser parser = XContentFactory.xContent(mediaType) + XContentParser parser = mediaType.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -325,7 +315,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b } else { try ( InputStream stream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContent(mediaType) + XContentParser parser = mediaType.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, stream) ) { return toJsonString(prettyPrint, parser); @@ -340,7 +330,7 @@ private static String toJsonString(boolean prettyPrint, XContentParser parser) t builder.prettyPrint(); } builder.copyCurrentStructure(parser); - return Strings.toString(builder); + return builder.toString(); } /** @@ -461,7 +451,7 @@ private static boolean allListValuesAreMapsOfOne(List list) { */ @Deprecated public static void writeRawField(String field, BytesReference source, XContentBuilder builder, Params params) throws IOException { - Compressor compressor = CompressorFactory.compressor(source); + Compressor compressor = CompressorRegistry.compressor(source); if (compressor != null) { try (InputStream compressedStreamInput = compressor.threadLocalInputStream(source.streamInput())) { builder.rawField(field, compressedStreamInput); @@ -480,7 +470,7 @@ public static void writeRawField(String field, BytesReference source, XContentBu public static void writeRawField(String field, BytesReference source, XContentType xContentType, XContentBuilder builder, Params params) throws IOException { Objects.requireNonNull(xContentType); - Compressor compressor = CompressorFactory.compressor(source); + Compressor compressor = CompressorRegistry.compressor(source); if (compressor != null) { try (InputStream compressedStreamInput = compressor.threadLocalInputStream(source.streamInput())) { builder.rawField(field, compressedStreamInput, xContentType); @@ -499,78 +489,12 @@ public static void writeRawField(String field, BytesReference source, XContentTy */ @Deprecated public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean humanReadable) throws IOException { - return toXContent(toXContent, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - } - - /** - * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided - * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned - * by the {@link ToXContent#isFragment()} method returns. - * - * @deprecated use {@link #toXContent(ToXContent, MediaType, Params, boolean)} instead - */ - @Deprecated - public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) - throws IOException { - try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { - builder.humanReadable(humanReadable); - if (toXContent.isFragment()) { - builder.startObject(); - } - toXContent.toXContent(builder, params); - if (toXContent.isFragment()) { - builder.endObject(); - } - return BytesReference.bytes(builder); - } - } - - /** - * Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided - * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned - * by the {@link ToXContent#isFragment()} method returns. - */ - public static BytesReference toXContent(ToXContent toXContent, MediaType mediaType, Params params, boolean humanReadable) - throws IOException { - try (XContentBuilder builder = XContentBuilder.builder(mediaType.xContent())) { - builder.humanReadable(humanReadable); - if (toXContent.isFragment()) { - builder.startObject(); - } - toXContent.toXContent(builder, params); - if (toXContent.isFragment()) { - builder.endObject(); - } - return BytesReference.bytes(builder); - } - } - - /** - * Guesses the content type based on the provided bytes. - * - * @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type. - * The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed. - * This method is deprecated to prevent usages of it from spreading further without specific reasons. - */ - @Deprecated - public static XContentType xContentType(BytesReference bytes) { - if (bytes instanceof BytesArray) { - final BytesArray array = (BytesArray) bytes; - return XContentFactory.xContentType(array.array(), array.offset(), array.length()); - } - try { - final InputStream inputStream = bytes.streamInput(); - assert inputStream.markSupported(); - return XContentFactory.xContentType(inputStream); - } catch (IOException e) { - assert false : "Should not happen, we're just reading bytes from memory"; - throw new UncheckedIOException(e); - } + return org.opensearch.core.xcontent.XContentHelper.toXContent(toXContent, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); } /** * Returns the contents of an object as an unparsed BytesReference - * + * <p> * This is useful for things like mappings where we're copying bytes around but don't * actually need to parse their contents, and so avoids building large maps of maps * unnecessarily diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentOpenSearchExtension.java b/server/src/main/java/org/opensearch/common/xcontent/XContentOpenSearchExtension.java index 924db8bdea1dd..0ce6e3da515d8 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/XContentOpenSearchExtension.java +++ b/server/src/main/java/org/opensearch/common/xcontent/XContentOpenSearchExtension.java @@ -33,25 +33,16 @@ package org.opensearch.common.xcontent; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentBuilderExtension; -import org.opensearch.script.JodaCompatibleZonedDateTime; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.Instant; -import org.joda.time.MutableDateTime; -import org.joda.time.ReadableInstant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; -import org.joda.time.tz.CachedDateTimeZone; -import org.joda.time.tz.FixedDateTimeZone; import java.time.DayOfWeek; import java.time.Duration; +import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; @@ -80,7 +71,6 @@ */ public class XContentOpenSearchExtension implements XContentBuilderExtension { - public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); public static final DateFormatter DEFAULT_FORMATTER = DateFormatter.forPattern("strict_date_optional_time_nanos"); public static final DateFormatter LOCAL_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSS"); public static final DateFormatter OFFSET_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSSZZZZZ"); @@ -91,11 +81,6 @@ public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() { // Fully-qualified here to reduce ambiguity around our (OpenSearch') Version class writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v))); - writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); - writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); - writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); - writers.put(MutableDateTime.class, XContentBuilder::timeValue); - writers.put(DateTime.class, XContentBuilder::timeValue); writers.put(TimeValue.class, (b, v) -> b.value(v.toString())); writers.put(ZonedDateTime.class, XContentBuilder::timeValue); writers.put(OffsetDateTime.class, XContentBuilder::timeValue); @@ -110,7 +95,6 @@ public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() { writers.put(Year.class, (b, v) -> b.value(v.toString())); writers.put(Duration.class, (b, v) -> b.value(v.toString())); writers.put(Period.class, (b, v) -> b.value(v.toString())); - writers.put(JodaCompatibleZonedDateTime.class, XContentBuilder::timeValue); writers.put(BytesReference.class, (b, v) -> { if (v == null) { @@ -143,14 +127,11 @@ public Map<Class<?>, XContentBuilder.HumanReadableTransformer> getXContentHumanR @Override public Map<Class<?>, Function<Object, Object>> getDateTransformers() { Map<Class<?>, Function<Object, Object>> transformers = new HashMap<>(); - transformers.put(Date.class, d -> DEFAULT_DATE_PRINTER.print(((Date) d).getTime())); - transformers.put(DateTime.class, d -> DEFAULT_DATE_PRINTER.print((DateTime) d)); - transformers.put(MutableDateTime.class, d -> DEFAULT_DATE_PRINTER.print((MutableDateTime) d)); - transformers.put(ReadableInstant.class, d -> DEFAULT_DATE_PRINTER.print((ReadableInstant) d)); - transformers.put(Long.class, d -> DEFAULT_DATE_PRINTER.print((long) d)); - transformers.put(Calendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); - transformers.put(GregorianCalendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis())); - transformers.put(Instant.class, d -> DEFAULT_DATE_PRINTER.print((Instant) d)); + transformers.put(Date.class, d -> DEFAULT_FORMATTER.format(((Date) d).toInstant())); + transformers.put(Long.class, d -> DEFAULT_FORMATTER.format(Instant.ofEpochMilli((long) d))); + transformers.put(Calendar.class, d -> DEFAULT_FORMATTER.format(((Calendar) d).toInstant())); + transformers.put(GregorianCalendar.class, d -> DEFAULT_FORMATTER.format(((Calendar) d).toInstant())); + transformers.put(Instant.class, d -> DEFAULT_FORMATTER.format((Instant) d)); transformers.put(ZonedDateTime.class, d -> DEFAULT_FORMATTER.format((ZonedDateTime) d)); transformers.put(OffsetDateTime.class, d -> DEFAULT_FORMATTER.format((OffsetDateTime) d)); transformers.put(OffsetTime.class, d -> OFFSET_TIME_FORMATTER.format((OffsetTime) d)); @@ -161,10 +142,6 @@ public Map<Class<?>, Function<Object, Object>> getDateTransformers() { ); transformers.put(LocalDate.class, d -> ((LocalDate) d).toString()); transformers.put(LocalTime.class, d -> LOCAL_TIME_FORMATTER.format((LocalTime) d)); - transformers.put( - JodaCompatibleZonedDateTime.class, - d -> DEFAULT_FORMATTER.format(((JodaCompatibleZonedDateTime) d).getZonedDateTime()) - ); return transformers; } } diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java index adfa871cbfcbe..a87edbb949d39 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java @@ -117,12 +117,11 @@ private static void extractRawValues(List values, List<Object> part, String[] pa /** * For the provided path, return its value in the xContent map. - * + * <p> * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * * @param path the value's path in the map. - * * @return the value associated with the path in the map or 'null' if the path does not exist. */ public static Object extractValue(String path, Map<?, ?> map) { @@ -138,7 +137,7 @@ public static Object extractValue(Map<?, ?> map, String... pathElements) { /** * For the provided path, return its value in the xContent map. - * + * <p> * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * @@ -197,13 +196,13 @@ private static Object extractValue(String[] pathElements, int index, Object curr * Only keep properties in {@code map} that match the {@code includes} but * not the {@code excludes}. An empty list of includes is interpreted as a * wildcard while an empty list of excludes does not match anything. - * + * <p> * If a property matches both an include and an exclude, then the exclude * wins. - * + * <p> * If an object matches, then any of its sub properties are automatically * considered as matching as well, both for includes and excludes. - * + * <p> * Dots in field names are treated as sub objects. So for instance if a * document contains {@code a.b} as a property and {@code a} is an include, * then {@code a.b} will be kept in the filtered map. @@ -555,7 +554,7 @@ public static Map<String, Object> nodeMapValue(Object node, String desc) { /** * Returns an array of string value from a node value. - * + * <p> * If the node represents an array the corresponding array of strings is returned. * Otherwise the node is treated as a comma-separated string. */ diff --git a/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java new file mode 100644 index 0000000000000..0a14331be35f7 --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.SetOnce; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * During node bootstrap, installed key provider extensions responsible for generating data keys are loaded. + * Crypto factories against the respective KP plugins are cached. A crypto factory is used to register crypto + * handler against an {@link org.opensearch.common.blobstore.EncryptedBlobStore} + */ +public class CryptoHandlerRegistry { + private static final Logger logger = LogManager.getLogger(CryptoHandlerRegistry.class); + // Package private for tests + SetOnce<Map<String, CryptoKeyProviderPlugin>> registry = new SetOnce<>(); + + // Package private for tests + SetOnce<CryptoPlugin> cryptoHandlerPlugin = new SetOnce<>(); + private final Map<CryptoMetadata, CryptoHandler> registeredCryptoHandlers = new HashMap<>(); + + private static volatile CryptoHandlerRegistry instance; + private static final Object lock = new Object(); + + /** + * Initializes the registry with crypto factories for the installed crypto key providers. + * + * @param cryptoPlugins The list of installed crypto key provider plugins. + * @param settings Crypto settings. + */ + protected CryptoHandlerRegistry( + List<CryptoPlugin> cryptoPlugins, + List<CryptoKeyProviderPlugin> cryptoKeyProviderPlugins, + Settings settings + ) { + if (cryptoPlugins == null || cryptoPlugins.size() == 0) { + return; + } + if (cryptoPlugins.size() > 1) { + // We can remove this to support multiple implementations in future if needed. + throw new IllegalStateException("More than 1 implementation of crypto plugin found."); + } + + cryptoHandlerPlugin.set(cryptoPlugins.get(0)); + registry.set(loadCryptoFactories(cryptoKeyProviderPlugins)); + } + + public static CryptoHandlerRegistry getInstance() { + return instance; + } + + public static CryptoHandlerRegistry initRegistry( + List<CryptoPlugin> cryptoPlugins, + List<CryptoKeyProviderPlugin> cryptoKeyProviderPlugins, + Settings settings + ) { + CryptoHandlerRegistry curInstance = instance; + if (curInstance == null) { + synchronized (lock) { + curInstance = instance; + if (curInstance == null) { + instance = curInstance = new CryptoHandlerRegistry(cryptoPlugins, cryptoKeyProviderPlugins, settings); + } + } + } + return curInstance; + } + + // For tests + protected Map<String, CryptoKeyProviderPlugin> loadCryptoFactories(List<CryptoKeyProviderPlugin> cryptoKPPlugins) { + Map<String, CryptoKeyProviderPlugin> cryptoFactories = new HashMap<>(); + for (CryptoKeyProviderPlugin cryptoKPPlugin : cryptoKPPlugins) { + if (cryptoFactories.containsKey(cryptoKPPlugin.type())) { + throw new IllegalArgumentException("Crypto plugin key provider type [" + cryptoKPPlugin.type() + "] is already registered"); + } + cryptoFactories.put(cryptoKPPlugin.type(), cryptoKPPlugin); + } + + return Map.copyOf(cryptoFactories); + } + + /** + * Retrieves the crypto factory associated with the given key provider type . + * + * @param keyProviderType The unique provider type for which the factory is to be fetched. + * @return The crypto factory used to create {@link CryptoHandler} + * instances in a {@link org.opensearch.common.blobstore.EncryptedBlobStore}. + * @throws IllegalStateException If the crypto registry is not yet loaded. + */ + public CryptoKeyProviderPlugin getCryptoKeyProviderPlugin(String keyProviderType) { + if (registry.get() == null) { + throw new IllegalStateException("Crypto registry is not yet loaded"); + } + return Objects.requireNonNull(registry.get()).get(keyProviderType); + } + + /** + * Fetches the cached crypto manager for the provided crypto metadata or creates a new one if not found. + * If the key provider is not installed, it throws a {@link CryptoRegistryException}. + * + * @param cryptoMetadata The crypto metadata for which the key provider is to be created. + * @return The crypto manager for performing encrypt/decrypt operations. + * @throws CryptoRegistryException If the key provider is not installed or there is an error during crypto manager creation. + */ + public CryptoHandler<?, ?> fetchCryptoHandler(CryptoMetadata cryptoMetadata) { + CryptoHandler<?, ?> cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { + synchronized (registeredCryptoHandlers) { + cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { + Runnable onClose = () -> { + synchronized (registeredCryptoHandlers) { + registeredCryptoHandlers.remove(cryptoMetadata); + } + }; + cryptoHandler = createCryptoHandler(cryptoMetadata, onClose); + registeredCryptoHandlers.put(cryptoMetadata, cryptoHandler); + } + } + } + return cryptoHandler; + } + + private CryptoHandler<?, ?> createCryptoHandler(CryptoMetadata cryptoMetadata, Runnable onClose) { + logger.debug("creating crypto client [{}][{}]", cryptoMetadata.keyProviderType(), cryptoMetadata.keyProviderName()); + CryptoKeyProviderPlugin keyProviderPlugin = getCryptoKeyProviderPlugin(cryptoMetadata.keyProviderType()); + if (keyProviderPlugin == null) { + throw new CryptoRegistryException(cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType()); + } + + try { + MasterKeyProvider masterKeyProvider = keyProviderPlugin.createKeyProvider(cryptoMetadata); + return Objects.requireNonNull(cryptoHandlerPlugin.get()) + .getOrCreateCryptoHandler(masterKeyProvider, cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), onClose); + + } catch (Exception e) { + logger.warn( + new ParameterizedMessage( + "failed to create crypto manager of name [{}] and type [{}]", + cryptoMetadata.keyProviderName(), + cryptoMetadata.keyProviderType() + ), + e + ); + throw new CryptoRegistryException(cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java b/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java new file mode 100644 index 0000000000000..a1b065649079d --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.OpenSearchException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; + +/** + * Thrown when crypto manager creation or retrieval fails. + * + * @opensearch.internal + */ +public class CryptoRegistryException extends OpenSearchException { + private final String name; + private final String type; + private final RestStatus restStatus; + + /** + * Constructs a new CryptoRegistryException with the given client name and client type. + * + * @param clientName The name of the client for which the crypto registry is missing. + * @param clientType The type of the client for which the crypto registry is missing. + */ + public CryptoRegistryException(String clientName, String clientType) { + super("[Missing crypto registry for client name : " + clientName + " of type " + clientType + " ]"); + this.name = clientName; + this.type = clientType; + this.restStatus = RestStatus.NOT_FOUND; + } + + /** + * Constructs a new CryptoRegistryException with the given client name, client type, and a cause. + * + * @param clientName The name of the client that caused the exception. + * @param clientType The type of the client that caused the exception. + * @param cause The cause of the exception, which could be another throwable. + */ + public CryptoRegistryException(String clientName, String clientType, Throwable cause) { + super("[Client name : " + clientName + " Type " + clientType + " ]", cause); + this.name = clientName; + this.type = clientType; + if (cause instanceof IllegalArgumentException) { + this.restStatus = RestStatus.BAD_REQUEST; + } else { + this.restStatus = RestStatus.INTERNAL_SERVER_ERROR; + } + } + + /** + * Constructs a new CryptoRegistryException with the given client name, client type, and a custom message. + * + * @param clientName The name of the client that caused the exception. + * @param clientType The type of the client that caused the exception. + * @param msg A custom message to be included in the exception. + */ + public CryptoRegistryException(String clientName, String clientType, String msg) { + super("[ " + msg + " Client name : " + clientName + " type " + clientType + " ] "); + this.name = clientName; + this.type = clientType; + this.restStatus = RestStatus.INTERNAL_SERVER_ERROR; + } + + /** + * Get the HTTP status associated with this exception. + * + * @return The HTTP status code representing the nature of the exception. + */ + @Override + public RestStatus status() { + return restStatus; + } + + /** + * Get the name of the client associated with this exception. + * + * @return The name of the client for which the exception was raised. + */ + public String getName() { + return name; + } + + /** + * Get the type of the client associated with this exception. + * + * @return The type of the client for which the exception was raised. + */ + public String getType() { + return type; + } + + /** + * Constructs a new CryptoRegistryException by deserializing it from the provided input stream. + * + * @param in The input stream containing the serialized exception data. + * @throws IOException If an I/O error occurs while reading from the input stream. + */ + public CryptoRegistryException(StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.type = in.readString(); + this.restStatus = RestStatus.fromCode(in.readInt()); + } + + /** + * Write the exception data to the provided output stream for serialization. + * + * @param out The output stream to which the exception data should be written. + * @throws IOException If an I/O error occurs while writing to the output stream. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeString(type); + out.writeInt(restStatus.getStatus()); + } +} diff --git a/server/src/main/java/org/opensearch/crypto/package-info.java b/server/src/main/java/org/opensearch/crypto/package-info.java new file mode 100644 index 0000000000000..742960ac1cf97 --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for crypto client abstractions and exceptions. + */ +package org.opensearch.crypto; diff --git a/server/src/main/java/org/opensearch/discovery/Discovery.java b/server/src/main/java/org/opensearch/discovery/Discovery.java index 25b3cb6c2b90d..9d6807b6522c9 100644 --- a/server/src/main/java/org/opensearch/discovery/Discovery.java +++ b/server/src/main/java/org/opensearch/discovery/Discovery.java @@ -33,7 +33,7 @@ package org.opensearch.discovery; import org.opensearch.cluster.coordination.ClusterStatePublisher; -import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.lifecycle.LifecycleComponent; /** * A pluggable module allowing to implement discovery of other nodes, publishing of the cluster diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index cf2f7b47288fd..288371aa240a0 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -37,21 +37,23 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.coordination.ElectionStrategy; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.Randomness; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.NodeHealthService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -129,7 +131,9 @@ public DiscoveryModule( Path configFile, GatewayMetaState gatewayMetaState, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { final Collection<BiConsumer<DiscoveryNode, ClusterState>> joinValidators = new ArrayList<>(); final Map<String, Supplier<SeedHostsProvider>> hostProviders = new HashMap<>(); @@ -205,7 +209,9 @@ public DiscoveryModule( new Random(Randomness.get().nextLong()), rerouteService, electionStrategy, - nodeHealthService + nodeHealthService, + persistedStateRegistry, + remoteStoreNodeService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java index cc19112dcfc83..fb341ac2ac569 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java @@ -32,13 +32,15 @@ package org.opensearch.discovery; +import org.opensearch.Version; +import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.cluster.coordination.PendingClusterStateStats; -import org.opensearch.cluster.coordination.PublishClusterStateStats; import java.io.IOException; @@ -51,21 +53,31 @@ public class DiscoveryStats implements Writeable, ToXContentFragment { private final PendingClusterStateStats queueStats; private final PublishClusterStateStats publishStats; + private final ClusterStateStats clusterStateStats; - public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats) { + public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats, ClusterStateStats clusterStateStats) { this.queueStats = queueStats; this.publishStats = publishStats; + this.clusterStateStats = clusterStateStats; } public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + clusterStateStats = in.readOptionalWriteable(ClusterStateStats::new); + } else { + clusterStateStats = null; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(clusterStateStats); + } } @Override @@ -77,6 +89,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (publishStats != null) { publishStats.toXContent(builder, params); } + if (clusterStateStats != null) { + clusterStateStats.toXContent(builder, params); + } builder.endObject(); return builder; } @@ -92,4 +107,8 @@ public PendingClusterStateStats getQueueStats() { public PublishClusterStateStats getPublishStats() { return publishStats; } + + public ClusterStateStats getClusterStateStats() { + return clusterStateStats; + } } diff --git a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java index 94f36ba0a546e..b663227978e8f 100644 --- a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java @@ -35,7 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; import java.nio.file.Files; @@ -48,12 +48,12 @@ /** * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. - * + * <p> * Each host/port that is part of the discovery process must be listed on * a separate line. If the port is left off an entry, we default to the * first port in the {@code transport.port} range. * An example unicast hosts file could read: - * + * <p> * 67.81.244.10 * 67.81.244.11:9305 * 67.81.244.15:9400 diff --git a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java index 90ca19e9369f4..c41f170bef65a 100644 --- a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java @@ -36,17 +36,17 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; -import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.NotifyOnceListener; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.TransportAddressConnector; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; diff --git a/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java b/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java index 33cdad3045780..6e9fb8b7201a4 100644 --- a/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java +++ b/server/src/main/java/org/opensearch/discovery/InitializeExtensionRequest.java @@ -25,16 +25,19 @@ public class InitializeExtensionRequest extends TransportRequest { private final DiscoveryNode sourceNode; private final DiscoveryExtensionNode extension; + private final String serviceAccountHeader; - public InitializeExtensionRequest(DiscoveryNode sourceNode, DiscoveryExtensionNode extension) { + public InitializeExtensionRequest(DiscoveryNode sourceNode, DiscoveryExtensionNode extension, String serviceAccountHeader) { this.sourceNode = sourceNode; this.extension = extension; + this.serviceAccountHeader = serviceAccountHeader; } public InitializeExtensionRequest(StreamInput in) throws IOException { super(in); sourceNode = new DiscoveryNode(in); extension = new DiscoveryExtensionNode(in); + serviceAccountHeader = in.readString(); } @Override @@ -42,6 +45,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); sourceNode.writeTo(out); extension.writeTo(out); + out.writeString(serviceAccountHeader); } public DiscoveryNode getSourceNode() { @@ -52,6 +56,10 @@ public DiscoveryExtensionNode getExtension() { return extension; } + public String getServiceAccountHeader() { + return serviceAccountHeader; + } + @Override public String toString() { return "InitializeExtensionsRequest{" + "sourceNode=" + sourceNode + ", extension=" + extension + '}'; @@ -62,7 +70,9 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; InitializeExtensionRequest that = (InitializeExtensionRequest) o; - return Objects.equals(sourceNode, that.sourceNode) && Objects.equals(extension, that.extension); + return Objects.equals(sourceNode, that.sourceNode) + && Objects.equals(extension, that.extension) + && Objects.equals(serviceAccountHeader, that.getServiceAccountHeader()); } @Override diff --git a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java index f56ffc84a7909..0c6be72d0fe86 100644 --- a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java +++ b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java @@ -34,12 +34,12 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Arrays; -import java.util.Objects; import java.util.List; +import java.util.Objects; /** * PluginResponse to intialize plugin diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index 96556d1cd71ed..1d997c8cbabe8 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -35,18 +35,18 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.coordination.PeersResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequestOptions; diff --git a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java index 1a4b5a3182dbe..89dfd9310e895 100644 --- a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java @@ -32,7 +32,7 @@ package org.opensearch.discovery; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.util.List; diff --git a/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java b/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java index 14805648c6771..559a37d6e776b 100644 --- a/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java +++ b/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java @@ -35,14 +35,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.SetOnce; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.ConfiguredHostsResolver; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java index 2dcd819e727f2..10185322c2ca6 100644 --- a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java @@ -37,7 +37,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.TransportService; import java.util.List; @@ -49,7 +49,7 @@ * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from the "discovery.seed_hosts" node setting. If the port is * left off an entry, we default to the first port in the {@code transport.port} range. - * + * <p> * An example setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] * diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index a1e467ad1ba48..5a40e45fb22a6 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -33,6 +33,7 @@ package org.opensearch.env; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -56,8 +57,9 @@ /** * The environment of where things exists. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @SuppressForbidden(reason = "configures paths for the system") // TODO: move PathUtils to be package-private here instead of // public+forbidden api! @@ -247,7 +249,7 @@ public Path[] repoFiles() { /** * Resolves the specified location against the list of configured repository roots - * + * <p> * If the specified location doesn't match any of the roots, returns null. */ public Path resolveRepoFile(String location) { @@ -257,7 +259,7 @@ public Path resolveRepoFile(String location) { /** * Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url * against the list of configured repository roots - * + * <p> * If the specified url doesn't match any of the roots, returns null. */ public URL resolveRepoURL(URL url) { diff --git a/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java b/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java index ce3aa0556744b..eb115ee927d68 100644 --- a/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java +++ b/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java @@ -8,10 +8,10 @@ package org.opensearch.env; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index f7d1f6e4343cc..2748938d8b761 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -53,21 +53,22 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.gateway.PersistedClusterStateService; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.FsDirectoryFactory; import org.opensearch.monitor.fs.FsInfo; @@ -108,14 +109,16 @@ /** * A component that holds all data paths for a single node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class NodeEnvironment implements Closeable { /** * A node path. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NodePath { /* ${data.paths}/nodes/{node.id} */ public final Path path; @@ -196,6 +199,8 @@ public String toString() { private final NodeMetadata nodeMetadata; + private final IndexStoreListener indexStoreListener; + /** * Maximum number of data nodes that should run in an environment. */ @@ -292,18 +297,23 @@ public void close() { } } + public NodeEnvironment(Settings settings, Environment environment) throws IOException { + this(settings, environment, IndexStoreListener.EMPTY); + } + /** * Setup the environment. * @param settings settings from opensearch.yml */ - public NodeEnvironment(Settings settings, Environment environment) throws IOException { - if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) { + public NodeEnvironment(Settings settings, Environment environment, IndexStoreListener indexStoreListener) throws IOException { + if (DiscoveryNode.nodeRequiresLocalStorage(settings) == false) { nodePaths = null; fileCacheNodePath = null; sharedDataPath = null; locks = null; nodeLockId = -1; nodeMetadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT); + this.indexStoreListener = IndexStoreListener.EMPTY; return; } boolean success = false; @@ -382,6 +392,7 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } this.nodeMetadata = loadNodeMetadata(settings, logger, nodePaths); + this.indexStoreListener = indexStoreListener; success = true; } finally { if (success == false) { @@ -574,6 +585,9 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings) throws IOException { final ShardId shardId = lock.getShardId(); assert isShardLocked(shardId) : "shard " + shardId + " is not locked"; + + indexStoreListener.beforeShardPathDeleted(shardId, indexSettings, this); + final Path[] paths = availableShardPaths(shardId); logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths); acquireFSLockForPaths(indexSettings, paths); @@ -650,6 +664,8 @@ public void deleteIndexDirectorySafe(Index index, long lockTimeoutMS, IndexSetti * @param indexSettings settings for the index being deleted */ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException { + indexStoreListener.beforeIndexPathDeleted(index, indexSettings, this); + final Path[] indexPaths = indexPaths(index); logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths); IOUtils.rm(indexPaths); @@ -660,6 +676,18 @@ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettin } } + private void deleteIndexFileCacheDirectory(Index index) { + final Path indexCachePath = fileCacheNodePath().fileCachePath.resolve(index.getUUID()); + logger.trace("deleting index {} file cache directory, path: [{}]", index, indexCachePath); + if (Files.exists(indexCachePath)) { + try { + IOUtils.rm(indexCachePath); + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + } + } + } + /** * Tries to lock all local shards for the given index. If any of the shard locks can't be acquired * a {@link ShardLockObtainFailedException} is thrown and all previously acquired locks are released. @@ -702,7 +730,7 @@ public List<ShardLock> lockAllForIndex( * write operation on a shards data directory like deleting files, creating a new index writer * or recover from a different shard instance into it. If the shard lock can not be acquired * a {@link ShardLockObtainFailedException} is thrown. - * + * <p> * Note: this method will return immediately if the lock can't be acquired. * * @param id the shard ID to lock @@ -767,15 +795,18 @@ public void setDetails(String details) { /** * A functional interface that people can use to reference {@link #shardLock(ShardId, String, long)} + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface ShardLocker { ShardLock lock(ShardId shardId, String lockDetails, long lockTimeoutMS) throws ShardLockObtainFailedException; } /** * Returns all currently lock shards. - * + * <p> * Note: the shard ids return do not contain a valid Index UUID */ public Set<ShardId> lockedShards() { @@ -1381,4 +1412,18 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + + /** + * A listener that is executed on per-index and per-shard store events, like deleting shard path + * + * @opensearch.internal + */ + public interface IndexStoreListener { + default void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment env) {} + + default void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment env) {} + + IndexStoreListener EMPTY = new IndexStoreListener() { + }; + } } diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index fd086dcd9db6b..99818d1e320f1 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -33,11 +33,11 @@ package org.opensearch.env; import org.opensearch.Version; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.gateway.MetadataStateFormat; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index e56f7a838d85f..3a8996afed34e 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -41,8 +41,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.set.Sets; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.set.Sets; import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.gateway.PersistedClusterStateService; diff --git a/server/src/main/java/org/opensearch/env/ShardLock.java b/server/src/main/java/org/opensearch/env/ShardLock.java index dd34eb3275f68..76afc0ec0329a 100644 --- a/server/src/main/java/org/opensearch/env/ShardLock.java +++ b/server/src/main/java/org/opensearch/env/ShardLock.java @@ -32,6 +32,7 @@ package org.opensearch.env; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.io.Closeable; @@ -44,8 +45,9 @@ * * @see NodeEnvironment * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ShardLock implements Closeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java b/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java index 525d8a76c9699..ae77d942356b5 100644 --- a/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java +++ b/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java @@ -33,6 +33,7 @@ package org.opensearch.env; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -41,8 +42,9 @@ /** * Exception used when the in-memory lock for a shard cannot be obtained * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardLockObtainFailedException extends OpenSearchException { public ShardLockObtainFailedException(ShardId shardId, String message) { diff --git a/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java index 7c7e3e78798e8..076ab41a077c4 100644 --- a/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java @@ -10,7 +10,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; + import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequest.java b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequest.java index c8eb65e2a94d4..24984a3800819 100644 --- a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequest.java +++ b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequest.java @@ -8,11 +8,11 @@ package org.opensearch.extensions; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.WriteableSetting; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.WriteableSetting; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java index 67c56b7f458ff..e3b0b5bb67fe6 100644 --- a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java @@ -8,8 +8,6 @@ package org.opensearch.extensions; -import java.util.List; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; @@ -17,9 +15,11 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.settings.WriteableSetting; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; +import java.util.List; + /** * Handles requests to add setting update consumers * diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java index ac1dfe5309ffa..a888f99ff11ed 100644 --- a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -15,7 +15,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java index 245abc558e5fa..99f6a063b437e 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java @@ -9,14 +9,14 @@ package org.opensearch.extensions; -import java.io.IOException; -import java.util.Objects; - import org.opensearch.Version; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import java.io.IOException; +import java.util.Objects; + /** * This class handles the dependent extensions information * diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java index d9531c0cc2894..4706d1821ffa9 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java @@ -9,13 +9,14 @@ package org.opensearch.extensions; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; /** * The response for getting the Extension Dependency. diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java index e54e3a6a4f940..fc2ba817ace5f 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java @@ -8,15 +8,16 @@ package org.opensearch.extensions; -import java.net.UnknownHostException; import org.opensearch.cluster.node.DiscoveryNode; +import java.net.UnknownHostException; + /** * Reference to a method that transports a parse request to an extension. By convention, this method takes * a category class used to identify the reader defined within the JVM that the extension is running on. * Additionally, this method takes in the extension's corresponding DiscoveryNode and a byte array (context) that the * extension's reader will be applied to. - * + * <p> * By convention the extensions' reader is a constructor that takes StreamInput as an argument for most classes and a static method for things like enums. * Classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should * look like: diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 468de4238f879..b531abcb845d7 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -8,16 +8,6 @@ package org.opensearch.extensions; -import java.io.IOException; -import java.net.InetAddress; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -29,15 +19,16 @@ import org.opensearch.cluster.ClusterSettingsResponse; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; - +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.discovery.InitializeExtensionResponse; +import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.extensions.action.ExtensionActionRequest; import org.opensearch.extensions.action.ExtensionActionResponse; @@ -49,13 +40,23 @@ import org.opensearch.extensions.rest.RestActionsRequestHandler; import org.opensearch.extensions.settings.CustomSettingsRequestHandler; import org.opensearch.extensions.settings.RegisterCustomSettingsRequest; +import org.opensearch.identity.IdentityService; +import org.opensearch.identity.tokens.AuthToken; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; -import org.opensearch.env.EnvironmentSettingsResponse; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; /** * The main class for managing Extension communication with the OpenSearch Node. @@ -101,6 +102,7 @@ public static enum OpenSearchRequestType { private Settings environmentSettings; private AddSettingsUpdateConsumerRequestHandler addSettingsUpdateConsumerRequestHandler; private NodeClient client; + private IdentityService identityService; /** * Instantiate a new ExtensionsManager object to handle requests and responses from extensions. This is called during Node bootstrap. @@ -108,7 +110,7 @@ public static enum OpenSearchRequestType { * @param additionalSettings Additional settings to read in from extension initialization request * @throws IOException If the extensions discovery file is not properly retrieved. */ - public ExtensionsManager(Set<Setting<?>> additionalSettings) throws IOException { + public ExtensionsManager(Set<Setting<?>> additionalSettings, IdentityService identityService) throws IOException { logger.info("ExtensionsManager initialized"); this.initializedExtensions = new HashMap<String, DiscoveryExtensionNode>(); this.extensionIdMap = new HashMap<String, DiscoveryExtensionNode>(); @@ -123,6 +125,7 @@ public ExtensionsManager(Set<Setting<?>> additionalSettings) throws IOException } this.client = null; this.extensionTransportActionsHandler = null; + this.identityService = identityService; } /** @@ -142,9 +145,15 @@ public void initializeServicesAndRestHandler( TransportService transportService, ClusterService clusterService, Settings initialEnvironmentSettings, - NodeClient client + NodeClient client, + IdentityService identityService ) { - this.restActionsRequestHandler = new RestActionsRequestHandler(actionModule.getRestController(), extensionIdMap, transportService); + this.restActionsRequestHandler = new RestActionsRequestHandler( + actionModule.getRestController(), + extensionIdMap, + transportService, + identityService + ); this.customSettingsRequestHandler = new CustomSettingsRequestHandler(settingsModule); this.transportService = transportService; this.clusterService = clusterService; @@ -291,7 +300,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -305,6 +314,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -331,11 +346,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture<InitializeExtensionResponse> inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler<InitializeExtensionResponse> initializeExtensionResponseHandler = new TransportResponseHandler< @@ -375,7 +390,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -390,11 +406,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode, issueServiceAccount(extensionNode)), initializeExtensionResponseHandler ); } @@ -437,6 +453,15 @@ TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) thro } } + /** + * A helper method called during initialization that issues a service accounts to extensions + * @param extension The extension to be issued a service account + */ + private String issueServiceAccount(DiscoveryExtensionNode extension) { + AuthToken serviceAccountToken = identityService.getTokenManager().issueServiceAccountToken(extension.getId()); + return serviceAccountToken.asAuthHeaderValue(); + } + static String getRequestExtensionActionName() { return REQUEST_EXTENSION_ACTION_NAME; } diff --git a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java index d434074279041..81b1b91b11481 100644 --- a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java @@ -8,21 +8,22 @@ package org.opensearch.extensions; -import java.io.IOException; -import java.util.Optional; -import java.util.Set; - import org.opensearch.action.ActionModule; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; - import org.opensearch.extensions.action.ExtensionActionRequest; import org.opensearch.extensions.action.ExtensionActionResponse; import org.opensearch.extensions.action.RemoteExtensionActionResponse; +import org.opensearch.identity.IdentityService; import org.opensearch.transport.TransportService; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.Set; + /** * Noop class for ExtensionsManager * @@ -31,7 +32,7 @@ public class NoopExtensionsManager extends ExtensionsManager { public NoopExtensionsManager() throws IOException { - super(Set.of()); + super(Set.of(), new IdentityService(Settings.EMPTY, List.of())); } @Override @@ -41,7 +42,8 @@ public void initializeServicesAndRestHandler( TransportService transportService, ClusterService clusterService, Settings initialEnvironmentSettings, - NodeClient client + NodeClient client, + IdentityService identityService ) { // no-op } diff --git a/server/src/main/java/org/opensearch/extensions/UpdateSettingsResponseHandler.java b/server/src/main/java/org/opensearch/extensions/UpdateSettingsResponseHandler.java index bc6f6f0d688f5..8c7c3c4cb9bd9 100644 --- a/server/src/main/java/org/opensearch/extensions/UpdateSettingsResponseHandler.java +++ b/server/src/main/java/org/opensearch/extensions/UpdateSettingsResponseHandler.java @@ -8,7 +8,6 @@ package org.opensearch.extensions; -import java.io.IOException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -17,6 +16,8 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; +import java.io.IOException; + /** * Response handler for {@link UpdateSettingsRequest} * diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java index 6b026b51ed2aa..5f0ba572b2ea6 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionActionResponse.java @@ -8,7 +8,7 @@ package org.opensearch.extensions.action; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyTransportAction.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyTransportAction.java index 364965dc582e6..f5ec472c3edc4 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyTransportAction.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionProxyTransportAction.java @@ -8,12 +8,12 @@ package org.opensearch.extensions.action; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java index 4b0b9725e50ae..50a3e0036cbb1 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportAction.java @@ -8,9 +8,9 @@ package org.opensearch.extensions.action; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportAction; +import org.opensearch.core.action.ActionListener; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java index 22502509634c6..ac60df1b73764 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java @@ -10,19 +10,19 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.support.ActionFilters; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.extensions.AcknowledgedResponse; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ActionNotFoundTransportException; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java b/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java index cd6d01f0c6621..7e559f9b948d3 100644 --- a/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java +++ b/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java @@ -10,8 +10,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.extensions.proto.RegisterTransportActionsProto.RegisterTransportActions; import org.opensearch.extensions.proto.ExtensionIdentityProto.ExtensionIdentity; +import org.opensearch.extensions.proto.RegisterTransportActionsProto.RegisterTransportActions; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/extensions/action/RemoteExtensionActionResponse.java b/server/src/main/java/org/opensearch/extensions/action/RemoteExtensionActionResponse.java index 7a6b053eb7480..d092bee714941 100644 --- a/server/src/main/java/org/opensearch/extensions/action/RemoteExtensionActionResponse.java +++ b/server/src/main/java/org/opensearch/extensions/action/RemoteExtensionActionResponse.java @@ -8,7 +8,7 @@ package org.opensearch.extensions.action; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java index e6df6e964a31b..89df1e4fbde35 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java @@ -18,10 +18,10 @@ import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.http.HttpRequest; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestRequest.Method; import org.opensearch.transport.TransportRequest; -import org.opensearch.http.HttpRequest; import java.io.IOException; import java.util.ArrayList; @@ -31,6 +31,8 @@ import java.util.Objects; import java.util.Set; +import static java.util.Objects.requireNonNull; + /** * Request to execute REST actions on extension node. * This contains necessary portions of a {@link RestRequest} object, but does not pass the full request for security concerns. @@ -86,7 +88,7 @@ public ExtensionRestRequest( this.headers = headers; this.mediaType = mediaType; this.content = content; - this.principalIdentifierToken = principalIdentifier; + this.principalIdentifierToken = requireNonNull(principalIdentifier); this.httpVersion = httpVersion; } @@ -104,7 +106,7 @@ public ExtensionRestRequest(StreamInput in) throws IOException { params = in.readMap(StreamInput::readString, StreamInput::readString); headers = in.readMap(StreamInput::readString, StreamInput::readStringList); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -125,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeStringCollection); out.writeBoolean(mediaType != null); if (mediaType != null) { - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); @@ -280,7 +282,7 @@ public boolean isContentConsumed() { } /** - * Gets a parser for the contents of this request if there is content and an xContentType. + * Gets a parser for the contents of this request if there is content, an xContentType, and a principal identifier. * * @param xContentRegistry The extension's xContentRegistry * @return A parser for the given content and content type. @@ -291,6 +293,9 @@ public final XContentParser contentParser(NamedXContentRegistry xContentRegistry if (!hasContent() || getXContentType() == null) { throw new OpenSearchParseException("There is no request body or the ContentType is invalid."); } + if (getRequestIssuerIdentity() == null) { + throw new OpenSearchParseException("There is no request body or the requester identity is invalid."); + } return getXContentType().xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, content.streamInput()); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java index 294f534e3f2fe..af539d864a4d6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestResponse.java @@ -9,10 +9,10 @@ package org.opensearch.extensions.rest; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import java.util.List; diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index d890c1b85bb81..383796f0c3b44 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -9,11 +9,12 @@ package org.opensearch.extensions.rest; import org.opensearch.action.ActionModule.DynamicActionRegistry; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.extensions.AcknowledgedResponse; import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Map; @@ -28,6 +29,7 @@ public class RestActionsRequestHandler { private final RestController restController; private final Map<String, DiscoveryExtensionNode> extensionIdMap; private final TransportService transportService; + private final IdentityService identityService; /** * Instantiates a new REST Actions Request Handler using the Node's RestController. @@ -39,11 +41,13 @@ public class RestActionsRequestHandler { public RestActionsRequestHandler( RestController restController, Map<String, DiscoveryExtensionNode> extensionIdMap, - TransportService transportService + TransportService transportService, + IdentityService identityService ) { this.restController = restController; this.extensionIdMap = extensionIdMap; this.transportService = transportService; + this.identityService = identityService; } /** @@ -58,11 +62,15 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, transportService, - dynamicActionRegistry + dynamicActionRegistry, + identityService ); restController.registerHandler(handler); return new AcknowledgedResponse(true); diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java index 63ae6ce93af22..9cddca6c0e14d 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java @@ -10,9 +10,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.rest.RestResponse; import org.opensearch.core.rest.RestStatus; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.rest.RestResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index a96e87b0200cc..41783b89ccc69 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -14,34 +14,37 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.http.HttpRequest; +import org.opensearch.identity.IdentityService; +import org.opensearch.identity.Subject; +import org.opensearch.identity.tokens.OnBehalfOfClaims; +import org.opensearch.identity.tokens.TokenManager; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestRequest.Method; -import org.opensearch.core.rest.RestStatus; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; -import org.opensearch.http.HttpRequest; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.security.Principal; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -54,19 +57,13 @@ public class RestSendToExtensionAction extends BaseRestHandler { private static final String SEND_TO_EXTENSION_ACTION = "send_to_extension_action"; private static final Logger logger = LogManager.getLogger(RestSendToExtensionAction.class); - // To replace with user identity see https://github.com/opensearch-project/OpenSearch/pull/4247 - private static final Principal DEFAULT_PRINCIPAL = new Principal() { - @Override - public String getName() { - return "OpenSearchUser"; - } - }; private final List<Route> routes; private final List<DeprecatedRoute> deprecatedRoutes; private final String pathPrefix; private final DiscoveryExtensionNode discoveryExtensionNode; private final TransportService transportService; + private final IdentityService identityService; private static final Set<String> allowList = Set.of("Content-Type"); private static final Set<String> denyList = Set.of("Authorization", "Proxy-Authorization"); @@ -82,7 +79,8 @@ public RestSendToExtensionAction( RegisterRestActionsRequest restActionsRequest, DiscoveryExtensionNode discoveryExtensionNode, TransportService transportService, - DynamicActionRegistry dynamicActionRegistry + DynamicActionRegistry dynamicActionRegistry, + IdentityService identityService ) { this.pathPrefix = "/_extensions/_" + restActionsRequest.getUniqueId(); RestRequest.Method method; @@ -147,11 +145,12 @@ public RestSendToExtensionAction( this.discoveryExtensionNode = discoveryExtensionNode; this.transportService = transportService; + this.identityService = identityService; } @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override @@ -240,12 +239,15 @@ public String executor() { }; try { + // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity - final String extensionTokenProcessor = "placeholder_token_processor"; - final String requestIssuerIdentity = "placeholder_request_issuer_identity"; Map<String, List<String>> filteredHeaders = filterHeaders(headers, allowList, denyList); + TokenManager tokenManager = identityService.getTokenManager(); + Subject subject = this.identityService.getSubject(); + OnBehalfOfClaims claims = new OnBehalfOfClaims(discoveryExtensionNode.getId(), subject.getPrincipal().getName()); + transportService.sendRequest( discoveryExtensionNode, ExtensionsManager.REQUEST_REST_EXECUTE_ON_EXTENSION_ACTION, @@ -259,7 +261,7 @@ public String executor() { filteredHeaders, contentType, content, - requestIssuerIdentity, + tokenManager.issueOnBehalfOfToken(subject, claims).asAuthHeaderValue(), httpVersion ), restExecuteOnExtensionResponseHandler diff --git a/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java index 980dcf67c3128..562fdad54abe4 100644 --- a/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java @@ -10,8 +10,8 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.SettingsModule; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.extensions.AcknowledgedResponse; -import org.opensearch.transport.TransportResponse; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/extensions/settings/RegisterCustomSettingsRequest.java b/server/src/main/java/org/opensearch/extensions/settings/RegisterCustomSettingsRequest.java index e3386c2838755..1145d4bc69d2c 100644 --- a/server/src/main/java/org/opensearch/extensions/settings/RegisterCustomSettingsRequest.java +++ b/server/src/main/java/org/opensearch/extensions/settings/RegisterCustomSettingsRequest.java @@ -8,10 +8,10 @@ package org.opensearch.extensions.settings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.WriteableSetting; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java index 937432096817e..3d129d4794a10 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java @@ -32,10 +32,6 @@ package org.opensearch.gateway; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -44,30 +40,31 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.common.logging.Loggers; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.transport.ReceiveTimeoutTransportException; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import static java.util.Collections.emptySet; -import static java.util.Collections.unmodifiableSet; +import reactor.util.annotation.NonNull; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; /** * Allows to asynchronously fetch shard related data from other nodes for allocation, without blocking * the cluster update thread. * <p> - * The async fetch logic maintains a map of which nodes are being fetched from in an async manner, - * and once the results are back, it makes sure to schedule a reroute to make sure those results will - * be taken into account. + * The async fetch logic maintains a cache {@link AsyncShardFetchCache} which is filled in async manner when nodes respond back. + * It also schedules a reroute to make sure those results will be taken into account. * * @opensearch.internal */ @@ -77,18 +74,19 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel * An action that lists the relevant shard data that needs to be fetched. */ public interface Lister<NodesResponse extends BaseNodesResponse<NodeResponse>, NodeResponse extends BaseNodeResponse> { - void list(ShardId shardId, @Nullable String customDataPath, DiscoveryNode[] nodes, ActionListener<NodesResponse> listener); + void list(Map<ShardId, ShardAttributes> shardAttributesMap, DiscoveryNode[] nodes, ActionListener<NodesResponse> listener); + } protected final Logger logger; protected final String type; - protected final ShardId shardId; - protected final String customDataPath; + protected final Map<ShardId, ShardAttributes> shardAttributesMap; private final Lister<BaseNodesResponse<T>, T> action; - private final Map<String, NodeEntry<T>> cache = new HashMap<>(); - private final Set<String> nodesToIgnore = new HashSet<>(); + private final AsyncShardFetchCache<T> cache; private final AtomicLong round = new AtomicLong(); private boolean closed; + private final String reroutingKey; + private final Map<ShardId, Set<String>> shardToIgnoreNodes = new HashMap<>(); @SuppressWarnings("unchecked") protected AsyncShardFetch( @@ -100,9 +98,36 @@ protected AsyncShardFetch( ) { this.logger = logger; this.type = type; - this.shardId = Objects.requireNonNull(shardId); - this.customDataPath = Objects.requireNonNull(customDataPath); + shardAttributesMap = new HashMap<>(); + shardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); this.action = (Lister<BaseNodesResponse<T>, T>) action; + this.reroutingKey = "ShardId=[" + shardId.toString() + "]"; + cache = new ShardCache<>(logger, reroutingKey, type); + } + + /** + * Added to fetch a batch of shards from nodes + * + * @param logger Logger + * @param type type of action + * @param shardAttributesMap Map of {@link ShardId} to {@link ShardAttributes} to perform fetching on them a + * @param action Transport Action + * @param batchId For the given ShardAttributesMap, we expect them to tie with a single batch id for logging and later identification + */ + @SuppressWarnings("unchecked") + protected AsyncShardFetch( + Logger logger, + String type, + Map<ShardId, ShardAttributes> shardAttributesMap, + Lister<? extends BaseNodesResponse<T>, T> action, + String batchId + ) { + this.logger = logger; + this.type = type; + this.shardAttributesMap = shardAttributesMap; + this.action = (Lister<BaseNodesResponse<T>, T>) action; + this.reroutingKey = "BatchID=[" + batchId + "]"; + cache = new ShardCache<>(logger, reroutingKey, type); } @Override @@ -110,19 +135,6 @@ public synchronized void close() { this.closed = true; } - /** - * Returns the number of async fetches that are currently ongoing. - */ - public synchronized int getNumberOfInFlightFetches() { - int count = 0; - for (NodeEntry<T> nodeEntry : cache.values()) { - if (nodeEntry.isFetching()) { - count++; - } - } - return count; - } - /** * Fetches the data for the relevant shard. If there any ongoing async fetches going on, or new ones have * been initiated by this call, the result will have no data. @@ -130,63 +142,71 @@ public synchronized int getNumberOfInFlightFetches() { * The ignoreNodes are nodes that are supposed to be ignored for this round, since fetching is async, we need * to keep them around and make sure we add them back when all the responses are fetched and returned. */ - public synchronized FetchResult<T> fetchData(DiscoveryNodes nodes, Set<String> ignoreNodes) { + public synchronized FetchResult<T> fetchData(DiscoveryNodes nodes, Map<ShardId, Set<String>> ignoreNodes) { if (closed) { - throw new IllegalStateException(shardId + ": can't fetch data on closed async fetch"); + throw new IllegalStateException(reroutingKey + ": can't fetch data on closed async fetch"); + } + + if (shardAttributesMap.size() == 1) { + // we will do assertions here on ignoreNodes + if (ignoreNodes.size() > 1) { + throw new IllegalStateException( + "Fetching Shard Data, " + reroutingKey + "Can only have atmost one shard" + "for non-batch mode" + ); + } + if (ignoreNodes.size() == 1) { + if (shardAttributesMap.containsKey(ignoreNodes.keySet().iterator().next()) == false) { + throw new IllegalStateException("Shard Id must be same as initialized in AsyncShardFetch. Expecting = " + reroutingKey); + } + } } - nodesToIgnore.addAll(ignoreNodes); - fillShardCacheWithDataNodes(cache, nodes); - List<NodeEntry<T>> nodesToFetch = findNodesToFetch(cache); - if (nodesToFetch.isEmpty() == false) { + + // add the nodes to ignore to the list of nodes to ignore for each shard + for (Map.Entry<ShardId, Set<String>> ignoreNodesEntry : ignoreNodes.entrySet()) { + Set<String> ignoreNodesSet = shardToIgnoreNodes.getOrDefault(ignoreNodesEntry.getKey(), new HashSet<>()); + ignoreNodesSet.addAll(ignoreNodesEntry.getValue()); + shardToIgnoreNodes.put(ignoreNodesEntry.getKey(), ignoreNodesSet); + } + + cache.fillShardCacheWithDataNodes(nodes); + List<String> nodeIds = cache.findNodesToFetch(); + if (nodeIds.isEmpty() == false) { // mark all node as fetching and go ahead and async fetch them // use a unique round id to detect stale responses in processAsyncFetch final long fetchingRound = round.incrementAndGet(); - for (NodeEntry<T> nodeEntry : nodesToFetch) { - nodeEntry.markAsFetching(fetchingRound); - } - DiscoveryNode[] discoNodesToFetch = nodesToFetch.stream() - .map(NodeEntry::getNodeId) - .map(nodes::get) - .toArray(DiscoveryNode[]::new); + cache.markAsFetching(nodeIds, fetchingRound); + DiscoveryNode[] discoNodesToFetch = nodeIds.stream().map(nodes::get).toArray(DiscoveryNode[]::new); asyncFetch(discoNodesToFetch, fetchingRound); } // if we are still fetching, return null to indicate it - if (hasAnyNodeFetching(cache)) { - return new FetchResult<>(shardId, null, emptySet()); + if (cache.hasAnyNodeFetching()) { + return new FetchResult<>(null, emptyMap()); } else { // nothing to fetch, yay, build the return value - Map<DiscoveryNode, T> fetchData = new HashMap<>(); Set<String> failedNodes = new HashSet<>(); - for (Iterator<Map.Entry<String, NodeEntry<T>>> it = cache.entrySet().iterator(); it.hasNext();) { - Map.Entry<String, NodeEntry<T>> entry = it.next(); - String nodeId = entry.getKey(); - NodeEntry<T> nodeEntry = entry.getValue(); - - DiscoveryNode node = nodes.get(nodeId); - if (node != null) { - if (nodeEntry.isFailed()) { - // if its failed, remove it from the list of nodes, so if this run doesn't work - // we try again next round to fetch it again - it.remove(); - failedNodes.add(nodeEntry.getNodeId()); - } else { - if (nodeEntry.getValue() != null) { - fetchData.put(node, nodeEntry.getValue()); - } - } - } - } - Set<String> allIgnoreNodes = unmodifiableSet(new HashSet<>(nodesToIgnore)); + Map<DiscoveryNode, T> fetchData = cache.getCacheData(nodes, failedNodes); + + Map<ShardId, Set<String>> allIgnoreNodesMap = unmodifiableMap(new HashMap<>(shardToIgnoreNodes)); // clear the nodes to ignore, we had a successful run in fetching everything we can // we need to try them if another full run is needed - nodesToIgnore.clear(); + shardToIgnoreNodes.clear(); // if at least one node failed, make sure to have a protective reroute // here, just case this round won't find anything, and we need to retry fetching data - if (failedNodes.isEmpty() == false || allIgnoreNodes.isEmpty() == false) { - reroute(shardId, "nodes failed [" + failedNodes.size() + "], ignored [" + allIgnoreNodes.size() + "]"); + + if (failedNodes.isEmpty() == false + || allIgnoreNodesMap.values().stream().anyMatch(ignoreNodeSet -> ignoreNodeSet.isEmpty() == false)) { + reroute( + reroutingKey, + "nodes failed [" + + failedNodes.size() + + "], ignored [" + + allIgnoreNodesMap.values().stream().mapToInt(Set::size).sum() + + "]" + ); } - return new FetchResult<>(shardId, fetchData, allIgnoreNodes); + + return new FetchResult<>(fetchData, allIgnoreNodesMap); } } @@ -199,87 +219,28 @@ public synchronized FetchResult<T> fetchData(DiscoveryNodes nodes, Set<String> i protected synchronized void processAsyncFetch(List<T> responses, List<FailedNodeException> failures, long fetchingRound) { if (closed) { // we are closed, no need to process this async fetch at all - logger.trace("{} ignoring fetched [{}] results, already closed", shardId, type); + logger.trace("{} ignoring fetched [{}] results, already closed", reroutingKey, type); return; } - logger.trace("{} processing fetched [{}] results", shardId, type); + logger.trace("{} processing fetched [{}] results", reroutingKey, type); if (responses != null) { - for (T response : responses) { - NodeEntry<T> nodeEntry = cache.get(response.getNode().getId()); - if (nodeEntry != null) { - if (nodeEntry.getFetchingRound() != fetchingRound) { - assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; - logger.trace( - "{} received response for [{}] from node {} for an older fetching round (expected: {} but was: {})", - shardId, - nodeEntry.getNodeId(), - type, - nodeEntry.getFetchingRound(), - fetchingRound - ); - } else if (nodeEntry.isFailed()) { - logger.trace( - "{} node {} has failed for [{}] (failure [{}])", - shardId, - nodeEntry.getNodeId(), - type, - nodeEntry.getFailure() - ); - } else { - // if the entry is there, for the right fetching round and not marked as failed already, process it - logger.trace("{} marking {} as done for [{}], result is [{}]", shardId, nodeEntry.getNodeId(), type, response); - nodeEntry.doneFetching(response); - } - } - } + cache.processResponses(responses, fetchingRound); } if (failures != null) { - for (FailedNodeException failure : failures) { - logger.trace("{} processing failure {} for [{}]", shardId, failure, type); - NodeEntry<T> nodeEntry = cache.get(failure.nodeId()); - if (nodeEntry != null) { - if (nodeEntry.getFetchingRound() != fetchingRound) { - assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; - logger.trace( - "{} received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})", - shardId, - nodeEntry.getNodeId(), - type, - nodeEntry.getFetchingRound(), - fetchingRound - ); - } else if (nodeEntry.isFailed() == false) { - // if the entry is there, for the right fetching round and not marked as failed already, process it - Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause()); - // if the request got rejected or timed out, we need to try it again next time... - if (unwrappedCause instanceof OpenSearchRejectedExecutionException - || unwrappedCause instanceof ReceiveTimeoutTransportException - || unwrappedCause instanceof OpenSearchTimeoutException) { - nodeEntry.restartFetching(); - } else { - logger.warn( - () -> new ParameterizedMessage( - "{}: failed to list shard for {} on node [{}]", - shardId, - type, - failure.nodeId() - ), - failure - ); - nodeEntry.doneFetching(failure.getCause()); - } - } - } - } + cache.processFailures(failures, fetchingRound); } - reroute(shardId, "post_response"); + reroute(reroutingKey, "post_response"); + } + + public synchronized int getNumberOfInFlightFetches() { + return cache.getInflightFetches(); } /** * Implement this in order to scheduled another round that causes a call to fetch data. */ - protected abstract void reroute(ShardId shardId, String reason); + protected abstract void reroute(String reroutingKey, String reason); /** * Clear cache for node, ensuring next fetch will fetch a fresh copy. @@ -288,54 +249,13 @@ synchronized void clearCacheForNode(String nodeId) { cache.remove(nodeId); } - /** - * Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from - * it nodes that are no longer part of the state. - */ - private void fillShardCacheWithDataNodes(Map<String, NodeEntry<T>> shardCache, DiscoveryNodes nodes) { - // verify that all current data nodes are there - for (final DiscoveryNode node : nodes.getDataNodes().values()) { - if (shardCache.containsKey(node.getId()) == false) { - shardCache.put(node.getId(), new NodeEntry<T>(node.getId())); - } - } - // remove nodes that are not longer part of the data nodes set - shardCache.keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); - } - - /** - * Finds all the nodes that need to be fetched. Those are nodes that have no - * data, and are not in fetch mode. - */ - private List<NodeEntry<T>> findNodesToFetch(Map<String, NodeEntry<T>> shardCache) { - List<NodeEntry<T>> nodesToFetch = new ArrayList<>(); - for (NodeEntry<T> nodeEntry : shardCache.values()) { - if (nodeEntry.hasData() == false && nodeEntry.isFetching() == false) { - nodesToFetch.add(nodeEntry); - } - } - return nodesToFetch; - } - - /** - * Are there any nodes that are fetching data? - */ - private boolean hasAnyNodeFetching(Map<String, NodeEntry<T>> shardCache) { - for (NodeEntry<T> nodeEntry : shardCache.values()) { - if (nodeEntry.isFetching()) { - return true; - } - } - return false; - } - /** * Async fetches data for the provided shard with the set of nodes that need to be fetched from. */ // visible for testing void asyncFetch(final DiscoveryNode[] nodes, long fetchingRound) { - logger.trace("{} fetching [{}] from {}", shardId, type, nodes); - action.list(shardId, customDataPath, nodes, new ActionListener<BaseNodesResponse<T>>() { + logger.trace("{} fetching [{}] from {}", reroutingKey, type, nodes); + action.list(shardAttributesMap, nodes, new ActionListener<BaseNodesResponse<T>>() { @Override public void onResponse(BaseNodesResponse<T> response) { processAsyncFetch(response.getNodes(), response.failures(), fetchingRound); @@ -352,20 +272,84 @@ public void onFailure(Exception e) { }); } + /** + * Cache implementation of transport actions returning single shard related data in the response. + * Store node level responses of transport actions like {@link TransportNodesListGatewayStartedShards} or + * {@link TransportNodesListShardStoreMetadata}. + * + * @param <K> Response type of transport action. + */ + static class ShardCache<K extends BaseNodeResponse> extends AsyncShardFetchCache<K> { + + private final Map<String, NodeEntry<K>> cache; + + public ShardCache(Logger logger, String logKey, String type) { + super(Loggers.getLogger(logger, "_" + logKey), type); + cache = new HashMap<>(); + } + + @Override + public void initData(DiscoveryNode node) { + cache.put(node.getId(), new NodeEntry<>(node.getId())); + } + + @Override + public void putData(DiscoveryNode node, K response) { + cache.get(node.getId()).doneFetching(response); + } + + @Override + public K getData(DiscoveryNode node) { + return cache.get(node.getId()).getValue(); + } + + @NonNull + @Override + public Map<String, ? extends BaseNodeEntry> getCache() { + return cache; + } + + @Override + public void deleteShard(ShardId shardId) { + cache.clear(); // single shard cache can clear the full map + } + + /** + * A node entry, holding the state of the fetched data for a specific shard + * for a giving node. + */ + static class NodeEntry<U extends BaseNodeResponse> extends AsyncShardFetchCache.BaseNodeEntry { + @Nullable + private U value; + + void doneFetching(U value) { + super.doneFetching(); + this.value = value; + } + + NodeEntry(String nodeId) { + super(nodeId); + } + + U getValue() { + return value; + } + + } + } + /** * The result of a fetch operation. Make sure to first check {@link #hasData()} before * fetching the actual data. */ public static class FetchResult<T extends BaseNodeResponse> { - private final ShardId shardId; private final Map<DiscoveryNode, T> data; - private final Set<String> ignoreNodes; + private final Map<ShardId, Set<String>> ignoredShardToNodes; - public FetchResult(ShardId shardId, Map<DiscoveryNode, T> data, Set<String> ignoreNodes) { - this.shardId = shardId; + public FetchResult(Map<DiscoveryNode, T> data, Map<ShardId, Set<String>> ignoreNodes) { this.data = data; - this.ignoreNodes = ignoreNodes; + this.ignoredShardToNodes = ignoreNodes; } /** @@ -389,88 +373,14 @@ public Map<DiscoveryNode, T> getData() { * Process any changes needed to the allocation based on this fetch result. */ public void processAllocation(RoutingAllocation allocation) { - for (String ignoreNode : ignoreNodes) { - allocation.addIgnoreShardForNode(shardId, ignoreNode); + for (Map.Entry<ShardId, Set<String>> entry : ignoredShardToNodes.entrySet()) { + ShardId shardId = entry.getKey(); + Set<String> ignoreNodes = entry.getValue(); + if (ignoreNodes.isEmpty() == false) { + ignoreNodes.forEach(nodeId -> allocation.addIgnoreShardForNode(shardId, nodeId)); + } } - } - } - - /** - * A node entry, holding the state of the fetched data for a specific shard - * for a giving node. - */ - static class NodeEntry<T> { - private final String nodeId; - private boolean fetching; - @Nullable - private T value; - private boolean valueSet; - private Throwable failure; - private long fetchingRound; - - NodeEntry(String nodeId) { - this.nodeId = nodeId; - } - - String getNodeId() { - return this.nodeId; - } - - boolean isFetching() { - return fetching; - } - - void markAsFetching(long fetchingRound) { - assert fetching == false : "double marking a node as fetching"; - this.fetching = true; - this.fetchingRound = fetchingRound; - } - - void doneFetching(T value) { - assert fetching : "setting value but not in fetching mode"; - assert failure == null : "setting value when failure already set"; - this.valueSet = true; - this.value = value; - this.fetching = false; - } - - void doneFetching(Throwable failure) { - assert fetching : "setting value but not in fetching mode"; - assert valueSet == false : "setting failure when already set value"; - assert failure != null : "setting failure can't be null"; - this.failure = failure; - this.fetching = false; - } - - void restartFetching() { - assert fetching : "restarting fetching, but not in fetching mode"; - assert valueSet == false : "value can't be set when restarting fetching"; - assert failure == null : "failure can't be set when restarting fetching"; - this.fetching = false; - } - - boolean isFailed() { - return failure != null; - } - - boolean hasData() { - return valueSet || failure != null; - } - - Throwable getFailure() { - assert hasData() : "getting failure when data has not been fetched"; - return failure; - } - - @Nullable - T getValue() { - assert failure == null : "trying to fetch value, but its marked as failed, check isFailed"; - assert valueSet : "value is not set, hasn't been fetched yet"; - return value; - } - long getFetchingRound() { - return fetchingRound; } } } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java new file mode 100644 index 0000000000000..3140ceef4f3ee --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java @@ -0,0 +1,316 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.transport.ReceiveTimeoutTransportException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import reactor.util.annotation.NonNull; + +/** + * AsyncShardFetchCache will operate on the node level cache which is map of String and BaseNodeEntry. initData, + * putData and getData needs to be called for all the nodes. This class is responsible for managing the flow for all + * the nodes. + * It'll also give useful insights like how many ongoing fetches are happening, how many nodes are left for fetch or + * mark some node in fetching mode. All of these functionalities require checking the cache information and respond + * accordingly. + * <p> + * initData : how to initialize an entry of shard cache for a node. + * putData : how to store the response of transport action in the cache. + * getData : how to get the stored data for any shard allocators like {@link PrimaryShardAllocator} or + * {@link ReplicaShardAllocator} + * deleteShard : how to clean up the stored data from cache for a shard. + * + * @param <K> Response type of transport action which has the data to be stored in the cache. + * + * @opensearch.internal + */ +public abstract class AsyncShardFetchCache<K extends BaseNodeResponse> { + private final Logger logger; + private final String type; + + protected AsyncShardFetchCache(Logger logger, String type) { + this.logger = logger; + this.type = type; + } + + abstract void initData(DiscoveryNode node); + + abstract void putData(DiscoveryNode node, K response); + + abstract K getData(DiscoveryNode node); + + @NonNull + abstract Map<String, ? extends BaseNodeEntry> getCache(); + + /** + * Cleanup cached data for this shard once it's started. Cleanup only happens at shard level. Node entries will + * automatically be cleaned up once shards are assigned. + * + * @param shardId for which we need to free up the cached data. + */ + abstract void deleteShard(ShardId shardId); + + /** + * Returns the number of fetches that are currently ongoing. + */ + int getInflightFetches() { + int count = 0; + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.isFetching()) { + count++; + } + } + return count; + } + + /** + * Fills the shard fetched data with new (data) nodes and a fresh NodeEntry, and removes from + * it nodes that are no longer part of the state. + */ + void fillShardCacheWithDataNodes(DiscoveryNodes nodes) { + // verify that all current data nodes are there + for (final DiscoveryNode node : nodes.getDataNodes().values()) { + if (getCache().containsKey(node.getId()) == false) { + initData(node); + } + } + // remove nodes that are not longer part of the data nodes set + getCache().keySet().removeIf(nodeId -> !nodes.nodeExists(nodeId)); + } + + /** + * Finds all the nodes that need to be fetched. Those are nodes that have no + * data, and are not in fetch mode. + */ + List<String> findNodesToFetch() { + List<String> nodesToFetch = new ArrayList<>(); + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.hasData() == false && nodeEntry.isFetching() == false) { + nodesToFetch.add(nodeEntry.getNodeId()); + } + } + return nodesToFetch; + } + + /** + * Are there any nodes that are fetching data? + */ + boolean hasAnyNodeFetching() { + for (BaseNodeEntry nodeEntry : getCache().values()) { + if (nodeEntry.isFetching()) { + return true; + } + } + return false; + } + + /** + * Get the data from cache, ignore the failed entries. Use getData functional interface to get the data, as + * different implementations may have different ways to populate the data from cache. + * + * @param nodes Discovery nodes for which we need to return the cache data. + * @param failedNodes return failedNodes with the nodes where fetch has failed. + * @return Map of cache data for every DiscoveryNode. + */ + Map<DiscoveryNode, K> getCacheData(DiscoveryNodes nodes, Set<String> failedNodes) { + Map<DiscoveryNode, K> fetchData = new HashMap<>(); + for (Iterator<? extends Map.Entry<String, ? extends BaseNodeEntry>> it = getCache().entrySet().iterator(); it.hasNext();) { + Map.Entry<String, BaseNodeEntry> entry = (Map.Entry<String, BaseNodeEntry>) it.next(); + String nodeId = entry.getKey(); + BaseNodeEntry nodeEntry = entry.getValue(); + + DiscoveryNode node = nodes.get(nodeId); + if (node != null) { + if (nodeEntry.isFailed()) { + // if its failed, remove it from the list of nodes, so if this run doesn't work + // we try again next round to fetch it again + it.remove(); + failedNodes.add(nodeEntry.getNodeId()); + } else { + K nodeResponse = getData(node); + if (nodeResponse != null) { + fetchData.put(node, nodeResponse); + } + } + } + } + return fetchData; + } + + void processResponses(List<K> responses, long fetchingRound) { + for (K response : responses) { + BaseNodeEntry nodeEntry = getCache().get(response.getNode().getId()); + if (nodeEntry != null) { + if (validateNodeResponse(nodeEntry, fetchingRound)) { + // if the entry is there, for the right fetching round and not marked as failed already, process it + logger.trace("marking {} as done for [{}], result is [{}]", nodeEntry.getNodeId(), type, response); + putData(response.getNode(), response); + } + } + } + } + + private boolean validateNodeResponse(BaseNodeEntry nodeEntry, long fetchingRound) { + if (nodeEntry.getFetchingRound() != fetchingRound) { + assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; + logger.trace( + "received response for [{}] from node {} for an older fetching round (expected: {} but was: {})", + nodeEntry.getNodeId(), + type, + nodeEntry.getFetchingRound(), + fetchingRound + ); + return false; + } else if (nodeEntry.isFailed()) { + logger.trace("node {} has failed for [{}] (failure [{}])", nodeEntry.getNodeId(), type, nodeEntry.getFailure()); + return false; + } + return true; + } + + private void handleNodeFailure(BaseNodeEntry nodeEntry, FailedNodeException failure, long fetchingRound) { + if (nodeEntry.getFetchingRound() != fetchingRound) { + assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; + logger.trace( + "received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})", + nodeEntry.getNodeId(), + type, + nodeEntry.getFetchingRound(), + fetchingRound + ); + } else if (nodeEntry.isFailed() == false) { + // if the entry is there, for the right fetching round and not marked as failed already, process it + Throwable unwrappedCause = ExceptionsHelper.unwrapCause(failure.getCause()); + // if the request got rejected or timed out, we need to try it again next time... + if (retryableException(unwrappedCause)) { + nodeEntry.restartFetching(); + } else { + logger.warn(() -> new ParameterizedMessage("failed to list shard for {} on node [{}]", type, failure.nodeId()), failure); + nodeEntry.doneFetching(failure.getCause()); + } + } + } + + boolean retryableException(Throwable unwrappedCause) { + return unwrappedCause instanceof OpenSearchRejectedExecutionException + || unwrappedCause instanceof ReceiveTimeoutTransportException + || unwrappedCause instanceof OpenSearchTimeoutException; + } + + void processFailures(List<FailedNodeException> failures, long fetchingRound) { + for (FailedNodeException failure : failures) { + logger.trace("processing failure {} for [{}]", failure, type); + BaseNodeEntry nodeEntry = getCache().get(failure.nodeId()); + if (nodeEntry != null) { + handleNodeFailure(nodeEntry, failure, fetchingRound); + } + } + } + + /** + * Common function for removing whole node entry. + * + * @param nodeId nodeId to be cleaned. + */ + void remove(String nodeId) { + this.getCache().remove(nodeId); + } + + void markAsFetching(List<String> nodeIds, long fetchingRound) { + for (String nodeId : nodeIds) { + getCache().get(nodeId).markAsFetching(fetchingRound); + } + } + + /** + * A node entry, holding only node level fetching related information. + * Actual metadata of shard is stored in child classes. + */ + static class BaseNodeEntry { + private final String nodeId; + private boolean fetching; + private boolean valueSet; + private Throwable failure; + private long fetchingRound; + + BaseNodeEntry(String nodeId) { + this.nodeId = nodeId; + } + + String getNodeId() { + return this.nodeId; + } + + boolean isFetching() { + return fetching; + } + + void markAsFetching(long fetchingRound) { + assert fetching == false : "double marking a node as fetching"; + this.fetching = true; + this.fetchingRound = fetchingRound; + } + + void doneFetching() { + assert fetching : "setting value but not in fetching mode"; + assert failure == null : "setting value when failure already set"; + this.valueSet = true; + this.fetching = false; + } + + void doneFetching(Throwable failure) { + assert fetching : "setting value but not in fetching mode"; + assert valueSet == false : "setting failure when already set value"; + assert failure != null : "setting failure can't be null"; + this.failure = failure; + this.fetching = false; + } + + void restartFetching() { + assert fetching : "restarting fetching, but not in fetching mode"; + assert valueSet == false : "value can't be set when restarting fetching"; + assert failure == null : "failure can't be set when restarting fetching"; + this.fetching = false; + } + + boolean isFailed() { + return failure != null; + } + + boolean hasData() { + return valueSet || failure != null; + } + + Throwable getFailure() { + assert hasData() : "getting failure when data has not been fetched"; + return failure; + } + + long getFetchingRound() { + return fetchingRound; + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 59ef894958cbe..853fe03904c53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -50,7 +50,7 @@ /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. - * + * <p> * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. * diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index dba081f1b50c2..48479691689e5 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -44,8 +43,9 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Setting; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.env.NodeEnvironment; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.env.NodeEnvironment; import java.io.IOException; import java.util.ArrayList; @@ -244,7 +244,7 @@ List<IndexMetadata> filterDanglingIndices(Metadata metadata, Map<Index, IndexMet /** * Removes all aliases from the supplied index metadata. - * + * <p> * Dangling importing indices with aliases is dangerous, it could for instance result in inability to write to an existing alias if it * previously had only one index with any is_write_index indication. */ diff --git a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java index 5a20112b19219..c8ef9364ebba9 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -49,13 +48,15 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Set; import java.util.Spliterators; @@ -226,7 +227,9 @@ private static void clearCacheForPrimary( AsyncShardFetch<TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata> fetch, RoutingAllocation allocation ) { - ShardRouting primary = allocation.routingNodes().activePrimary(fetch.shardId); + assert fetch.shardAttributesMap.size() == 1 : "expected only one shard"; + ShardId shardId = fetch.shardAttributesMap.keySet().iterator().next(); + ShardRouting primary = allocation.routingNodes().activePrimary(shardId); if (primary != null) { fetch.clearCacheForNode(primary.currentNodeId()); } @@ -254,15 +257,15 @@ class InternalAsyncFetch<T extends BaseNodeResponse> extends AsyncShardFetch<T> } @Override - protected void reroute(ShardId shardId, String reason) { - logger.trace("{} scheduling reroute for {}", shardId, reason); + protected void reroute(String reroutingKey, String reason) { + logger.trace("{} scheduling reroute for {}", reroutingKey, reason); assert rerouteService != null; rerouteService.reroute( "async_shard_fetch", Priority.HIGH, ActionListener.wrap( - r -> logger.trace("{} scheduled reroute completed for {}", shardId, reason), - e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", shardId, reason), e) + r -> logger.trace("{} scheduled reroute completed for {}", reroutingKey, reason), + e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", reroutingKey, reason), e) ) ); } @@ -293,7 +296,11 @@ protected AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.Nod ); AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetch.fetchData( allocation.nodes(), - allocation.getIgnoreNodes(shard.shardId()) + new HashMap<>() { + { + put(shard.shardId(), allocation.getIgnoreNodes(shard.shardId())); + } + } ); if (shardState.hasData()) { @@ -328,7 +335,11 @@ protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetadata.NodeS ); AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata> shardStores = fetch.fetchData( allocation.nodes(), - allocation.getIgnoreNodes(shard.shardId()) + new HashMap<>() { + { + put(shard.shardId(), allocation.getIgnoreNodes(shard.shardId())); + } + } ); if (shardStores.hasData()) { shardStores.processAllocation(allocation); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index ad9faef067c89..c3056276706a0 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -35,8 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -45,6 +45,9 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationState.PersistedState; import org.opensearch.cluster.coordination.InMemoryPersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Manifest; @@ -52,7 +55,6 @@ import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.SetOnce; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -60,6 +62,10 @@ import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.NodeMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.threadpool.ThreadPool; @@ -72,6 +78,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -80,37 +87,37 @@ import java.util.function.UnaryOperator; import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. - * - * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that - * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link - * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and - * non-stale state, and cluster-manager-ineligible nodes receive the real cluster state from the elected cluster-manager after joining the cluster. + * <p> + * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that the state being + * loaded when constructing the instance of this class is not necessarily the state that will be used as {@link ClusterState#metadata()} because it might be + * stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and non-stale state, and cluster-manager-ineligible nodes + * receive the real cluster state from the elected cluster-manager after joining the cluster. * * @opensearch.internal */ public class GatewayMetaState implements Closeable { /** - * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially - * stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is - * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. + * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially stale (since + * it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is restarted as a + * cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. */ public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG"; - // Set by calling start() - private final SetOnce<PersistedState> persistedState = new SetOnce<>(); + private PersistedStateRegistry persistedStateRegistry; public PersistedState getPersistedState() { - final PersistedState persistedState = this.persistedState.get(); + final PersistedState persistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); assert persistedState != null : "not started"; return persistedState; } public Metadata getMetadata() { - return getPersistedState().getLastAcceptedState().metadata(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getLastAcceptedState().metadata(); } public void start( @@ -120,9 +127,13 @@ public void start( MetaStateService metaStateService, MetadataIndexUpgradeService metadataIndexUpgradeService, MetadataUpgrader metadataUpgrader, - PersistedClusterStateService persistedClusterStateService + PersistedClusterStateService persistedClusterStateService, + RemoteClusterStateService remoteClusterStateService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreRestoreService remoteStoreRestoreService ) { - assert persistedState.get() == null : "should only start once, but already have " + persistedState.get(); + assert this.persistedStateRegistry == null : "Persisted state registry should only be set once"; + this.persistedStateRegistry = persistedStateRegistry; if (DiscoveryNode.isClusterManagerNode(settings) || DiscoveryNode.isDataNode(settings)) { try { @@ -144,14 +155,45 @@ public void start( } PersistedState persistedState = null; + PersistedState remotePersistedState = null; boolean success = false; try { - final ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .version(lastAcceptedVersion) + .metadata(metadata) + .build(); + + if (DiscoveryNode.isClusterManagerNode(settings) && isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + // Remote Metadata should always override local disk Metadata + // if local disk Metadata's cluster uuid is UNKNOWN_UUID + ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); + } + + // Recovers Cluster and Index level blocks + clusterState = prepareInitialClusterState( transportService, clusterService, - ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) - .version(lastAcceptedVersion) - .metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)) + ClusterState.builder(clusterState) + .metadata(upgradeMetadataForNode(clusterState.metadata(), metadataIndexUpgradeService, metadataUpgrader)) .build() ); @@ -177,11 +219,14 @@ public void start( success = true; } finally { if (success == false) { - IOUtils.closeWhileHandlingException(persistedState); + IOUtils.closeWhileHandlingException(persistedStateRegistry); } } - this.persistedState.set(persistedState); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); + if (remotePersistedState != null) { + persistedStateRegistry.addPersistedState(PersistedStateType.REMOTE, remotePersistedState); + } } catch (IOException e) { throw new OpenSearchException("failed to load metadata", e); } @@ -208,7 +253,7 @@ public void start( throw new UncheckedIOException(e); } } - persistedState.set(new InMemoryPersistedState(currentTerm, clusterState)); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(currentTerm, clusterState)); } } @@ -234,8 +279,8 @@ Metadata upgradeMetadataForNode( } /** - * This method calls {@link MetadataIndexUpgradeService} to makes sure that indices are compatible with the current - * version. The MetadataIndexUpgradeService might also update obsolete settings if needed. + * This method calls {@link MetadataIndexUpgradeService} to makes sure that indices are compatible with the current version. The MetadataIndexUpgradeService + * might also update obsolete settings if needed. * * @return input <code>metadata</code> if no upgrade is needed or an upgraded metadata */ @@ -327,12 +372,14 @@ public void applyClusterState(ClusterChangedEvent event) { @Override public void close() throws IOException { - IOUtils.close(persistedState.get()); + IOUtils.close(persistedStateRegistry); } // visible for testing public boolean allPendingAsyncStatesWritten() { - final PersistedState ps = persistedState.get(); + // This method is invoked for persisted state implementations which write asynchronously. + // RemotePersistedState is invoked in synchronous path. So this logic is not required for remote state. + final PersistedState ps = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); if (ps instanceof AsyncLucenePersistedState) { return ((AsyncLucenePersistedState) ps).allPendingAsyncStatesWritten(); } else { @@ -505,6 +552,9 @@ static class LucenePersistedState implements PersistedState { // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { + // During remote state restore, there will be non empty metadata getting persisted with cluster UUID as + // ClusterState.UNKOWN_UUID . The valid UUID will be generated and persisted along with the first cluster state getting + // published. writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); } catch (Exception e) { try { @@ -566,6 +616,12 @@ public void setLastAcceptedState(ClusterState clusterState) { lastAcceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + // Note: These stats are not published yet, will come in future + return null; + } + private PersistedClusterStateService.Writer getWriterSafe() { final PersistedClusterStateService.Writer writer = persistenceWriter.get(); if (writer == null) { @@ -599,4 +655,136 @@ public void close() throws IOException { IOUtils.close(persistenceWriter.getAndSet(null)); } } + + /** + * Encapsulates the writing of metadata to a remote store using {@link RemoteClusterStateService}. + */ + public static class RemotePersistedState implements PersistedState { + + private static final Logger logger = LogManager.getLogger(RemotePersistedState.class); + + private ClusterState lastAcceptedState; + private ClusterMetadataManifest lastAcceptedManifest; + private final RemoteClusterStateService remoteClusterStateService; + private String previousClusterUUID; + + public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService, final String previousClusterUUID) { + this.remoteClusterStateService = remoteClusterStateService; + this.previousClusterUUID = previousClusterUUID; + } + + @Override + public long getCurrentTerm() { + return lastAcceptedState != null ? lastAcceptedState.term() : 0L; + } + + @Override + public ClusterState getLastAcceptedState() { + return lastAcceptedState; + } + + @Override + public void setCurrentTerm(long currentTerm) { + // no-op + // For LucenePersistedState, setCurrentTerm is used only while handling StartJoinRequest by all follower nodes. + // But for RemotePersistedState, the state is only pushed by the active cluster. So this method is not required. + } + + @Override + public void setLastAcceptedState(ClusterState clusterState) { + try { + final ClusterMetadataManifest manifest; + if (shouldWriteFullClusterState(clusterState)) { + final Optional<ClusterMetadataManifest> latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", + clusterState.metadata().clusterUUID() + ); + } + manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); + } else { + assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true + : "Previous manifest and previous ClusterState are not in sync"; + manifest = remoteClusterStateService.writeIncrementalMetadata(lastAcceptedState, clusterState, lastAcceptedManifest); + } + assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync"; + lastAcceptedManifest = manifest; + lastAcceptedState = clusterState; + } catch (Exception e) { + remoteClusterStateService.writeMetadataFailed(); + handleExceptionOnWrite(e); + } + } + + @Override + public PersistedStateStats getStats() { + return remoteClusterStateService.getStats(); + } + + private boolean verifyManifestAndClusterState(ClusterMetadataManifest manifest, ClusterState clusterState) { + assert manifest != null : "ClusterMetadataManifest is null"; + assert clusterState != null : "ClusterState is null"; + assert clusterState.metadata().indices().size() == manifest.getIndices().size() + : "Number of indices in last accepted state and manifest are different"; + manifest.getIndices().stream().forEach(md -> { + assert clusterState.metadata().indices().containsKey(md.getIndexName()) + : "Last accepted state does not contain the index : " + md.getIndexName(); + assert clusterState.metadata().indices().get(md.getIndexName()).getIndexUUID().equals(md.getIndexUUID()) + : "Last accepted state and manifest do not have same UUID for index : " + md.getIndexName(); + }); + return true; + } + + private boolean shouldWriteFullClusterState(ClusterState clusterState) { + if (lastAcceptedState == null + || lastAcceptedManifest == null + || lastAcceptedState.term() != clusterState.term() + || lastAcceptedManifest.getOpensearchVersion() != Version.CURRENT) { + return true; + } + return false; + } + + @Override + public void markLastAcceptedStateAsCommitted() { + try { + assert lastAcceptedState != null : "Last accepted state is not present"; + assert lastAcceptedManifest != null : "Last accepted manifest is not present"; + ClusterState clusterState = lastAcceptedState; + if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false + && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { + Metadata.Builder metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); + metadataBuilder.clusterUUIDCommitted(true); + clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); + } + final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( + clusterState, + lastAcceptedManifest + ); + lastAcceptedManifest = committedManifest; + lastAcceptedState = clusterState; + } catch (Exception e) { + handleExceptionOnWrite(e); + } + } + + @Override + public void close() throws IOException { + remoteClusterStateService.close(); + } + + private void handleExceptionOnWrite(Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayModule.java b/server/src/main/java/org/opensearch/gateway/GatewayModule.java index 59ec0243c88c9..847ba01737332 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayModule.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayModule.java @@ -47,6 +47,7 @@ protected void configure() { bind(GatewayService.class).asEagerSingleton(); bind(TransportNodesListGatewayMetaState.class).asEagerSingleton(); bind(TransportNodesListGatewayStartedShards.class).asEagerSingleton(); + bind(TransportNodesListGatewayStartedShardsBatch.class).asEagerSingleton(); bind(LocalAllocateDangledIndices.class).asEagerSingleton(); } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayService.java b/server/src/main/java/org/opensearch/gateway/GatewayService.java index cf105380e98ad..7f95d7afe90c7 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayService.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayService.java @@ -46,15 +46,15 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.discovery.Discovery; import org.opensearch.core.rest.RestStatus; +import org.opensearch.discovery.Discovery; import org.opensearch.threadpool.ThreadPool; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index 5ee369d6b9402..84290634359e3 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -50,16 +49,17 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/gateway/MetaStateService.java b/server/src/main/java/org/opensearch/gateway/MetaStateService.java index ce912142e35b0..94bfdae151427 100644 --- a/server/src/main/java/org/opensearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/opensearch/gateway/MetaStateService.java @@ -41,9 +41,9 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java index 9943f04c964e1..ad47ca66129bb 100644 --- a/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/opensearch/gateway/MetadataStateFormat.java @@ -48,13 +48,13 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.lucene.store.IndexOutputOutputStream; import org.opensearch.common.lucene.store.InputStreamIndexInput; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.IOUtils; import java.io.FileNotFoundException; import java.io.IOException; @@ -288,7 +288,7 @@ private long write(final T state, boolean cleanup, final Path... locations) thro } protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { - return XContentFactory.contentBuilder(type, stream); + return MediaTypeRegistry.contentBuilder(type, stream); } /** @@ -321,7 +321,7 @@ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) thro long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) { try ( - XContentParser parser = XContentFactory.xContent(FORMAT) + XContentParser parser = FORMAT.xContent() .createParser( namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index caddf92150cff..75beb6e29599c 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -67,29 +67,29 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.RecyclingBytesStreamOutput; import org.opensearch.common.io.Streams; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.common.util.ByteArray; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.ByteArray; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; -import org.opensearch.core.index.Index; import java.io.Closeable; import java.io.IOError; @@ -111,16 +111,16 @@ * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any * documents that have not changed. The index has the following fields: - * + * <p> * +------------------------------+-----------------------------+----------------------------------------------+ * | "type" (string field) | "index_uuid" (string field) | "data" (stored binary field in SMILE format) | * +------------------------------+-----------------------------+----------------------------------------------+ * | GLOBAL_TYPE_NAME == "global" | (omitted) | Global metadata | * | INDEX_TYPE_NAME == "index" | Index UUID | Index metadata | * +------------------------------+-----------------------------+----------------------------------------------+ - * + * <p> * Additionally each commit has the following user data: - * + * <p> * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ * | Key symbol | Key literal | Value | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ @@ -129,7 +129,7 @@ * | NODE_ID_KEY | "node_id" | The (persistent) ID of the node that wrote this metadata | * | NODE_VERSION_KEY | "node_version" | The (ID of the) version of the node that wrote this metadata | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ - * + * <p> * (the last-accepted term is recorded in Metadata → CoordinationMetadata so does not need repeating here) * * @opensearch.internal @@ -448,7 +448,7 @@ private OnDiskState loadOnDiskState(Path dataPath, DirectoryReader reader) throw final SetOnce<Metadata.Builder> builderReference = new SetOnce<>(); consumeFromType(searcher, GLOBAL_TYPE_NAME, bytes -> { final Metadata metadata = Metadata.Builder.fromXContent( - XContentFactory.xContent(XContentType.SMILE) + XContentType.SMILE.xContent() .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length) ); logger.trace("found global metadata with last-accepted term [{}]", metadata.coordinationMetadata().term()); @@ -468,7 +468,7 @@ private OnDiskState loadOnDiskState(Path dataPath, DirectoryReader reader) throw final Set<String> indexUUIDs = new HashSet<>(); consumeFromType(searcher, INDEX_TYPE_NAME, bytes -> { final IndexMetadata indexMetadata = IndexMetadata.fromXContent( - XContentFactory.xContent(XContentType.SMILE) + XContentType.SMILE.xContent() .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.bytes, bytes.offset, bytes.length) ); logger.trace("found index metadata for {}", indexMetadata.getIndex()); @@ -921,7 +921,7 @@ private Document makeDocument(String typeName, ToXContent metadata, DocumentBuff try (RecyclingBytesStreamOutput streamOutput = documentBuffer.streamOutput()) { try ( - XContentBuilder xContentBuilder = XContentFactory.contentBuilder( + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder( XContentType.SMILE, Streams.flushOnCloseStream(streamOutput) ) diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 4dc9396751fc9..5046873830c01 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -69,7 +69,7 @@ * that holds a copy of the shard. The shard metadata from each node is compared against the * set of valid allocation IDs and for all valid shard copies (if any), the primary shard allocator * executes the allocation deciders to chose a copy to assign the primary shard to. - * + * <p> * Note that the PrimaryShardAllocator does *not* allocate primaries on index creation * (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}), * nor does it allocate primaries when a primary shard failed and there is a valid replica @@ -81,7 +81,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - private static boolean isResponsibleFor(final ShardRouting shard) { + protected static boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery @@ -89,19 +89,20 @@ private static boolean isResponsibleFor(final ShardRouting shard) { || shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT); } - @Override - public AllocateUnassignedDecision makeAllocationDecision( - final ShardRouting unassignedShard, - final RoutingAllocation allocation, - final Logger logger - ) { + /** + * Skip doing fetchData call for a shard if recovery mode is snapshot. Also do not take decision if allocator is + * not responsible for this particular shard. + * + * @param unassignedShard unassigned shard routing + * @param allocation routing allocation object + * @return allocation decision taken for this shard + */ + protected AllocateUnassignedDecision getInEligibleShardDecision(ShardRouting unassignedShard, RoutingAllocation allocation) { if (isResponsibleFor(unassignedShard) == false) { // this allocator is not responsible for allocating this shard return AllocateUnassignedDecision.NOT_TAKEN; } - final boolean explain = allocation.debugDecision(); - if (unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT && allocation.snapshotShardSizeInfo().getShardSize(unassignedShard) == null) { List<NodeAllocationResult> nodeDecisions = null; @@ -110,9 +111,45 @@ public AllocateUnassignedDecision makeAllocationDecision( } return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } + return null; + } + @Override + public AllocateUnassignedDecision makeAllocationDecision( + final ShardRouting unassignedShard, + final RoutingAllocation allocation, + final Logger logger + ) { + AllocateUnassignedDecision decision = getInEligibleShardDecision(unassignedShard, allocation); + if (decision != null) { + return decision; + } final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation); - if (shardState.hasData() == false) { + List<NodeGatewayStartedShards> nodeShardStates = adaptToNodeStartedShardList(shardState); + return getAllocationDecision(unassignedShard, allocation, nodeShardStates, logger); + } + + /** + * Transforms {@link FetchResult} of {@link NodeGatewayStartedShards} to {@link List} of {@link NodeGatewayStartedShards} + * Returns null if {@link FetchResult} does not have any data. + */ + private static List<NodeGatewayStartedShards> adaptToNodeStartedShardList(FetchResult<NodeGatewayStartedShards> shardsState) { + if (!shardsState.hasData()) { + return null; + } + List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>(); + shardsState.getData().forEach((node, nodeGatewayStartedShard) -> { nodeShardStates.add(nodeGatewayStartedShard); }); + return nodeShardStates; + } + + protected AllocateUnassignedDecision getAllocationDecision( + ShardRouting unassignedShard, + RoutingAllocation allocation, + List<NodeGatewayStartedShards> shardState, + Logger logger + ) { + final boolean explain = allocation.debugDecision(); + if (shardState == null) { allocation.setHasPendingAsyncFetch(); List<NodeAllocationResult> nodeDecisions = null; if (explain) { @@ -120,7 +157,6 @@ public AllocateUnassignedDecision makeAllocationDecision( } return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } - // don't create a new IndexSetting object for every shard as this could cause a lot of garbage // on cluster restart if we allocate a boat load of shards final IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(unassignedShard.index()); @@ -260,11 +296,11 @@ public AllocateUnassignedDecision makeAllocationDecision( */ private static List<NodeAllocationResult> buildNodeDecisions( NodesToAllocate nodesToAllocate, - FetchResult<NodeGatewayStartedShards> fetchedShardData, + List<NodeGatewayStartedShards> fetchedShardData, Set<String> inSyncAllocationIds ) { List<NodeAllocationResult> nodeResults = new ArrayList<>(); - Collection<NodeGatewayStartedShards> ineligibleShards; + Collection<NodeGatewayStartedShards> ineligibleShards = new ArrayList<>(); if (nodesToAllocate != null) { final Set<DiscoveryNode> discoNodes = new HashSet<>(); nodeResults.addAll( @@ -280,15 +316,13 @@ private static List<NodeAllocationResult> buildNodeDecisions( }) .collect(Collectors.toList()) ); - ineligibleShards = fetchedShardData.getData() - .values() - .stream() + ineligibleShards = fetchedShardData.stream() .filter(shardData -> discoNodes.contains(shardData.getNode()) == false) .collect(Collectors.toList()); } else { // there were no shard copies that were eligible for being assigned the allocation, // so all fetched shard data are ineligible shards - ineligibleShards = fetchedShardData.getData().values(); + ineligibleShards = fetchedShardData; } nodeResults.addAll( @@ -328,12 +362,12 @@ protected static NodeShardsResult buildNodeShardsResult( boolean matchAnyShard, Set<String> ignoreNodes, Set<String> inSyncAllocationIds, - FetchResult<NodeGatewayStartedShards> shardState, + List<NodeGatewayStartedShards> shardState, Logger logger ) { List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>(); int numberOfAllocationsFound = 0; - for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + for (NodeGatewayStartedShards nodeShardState : shardState) { DiscoveryNode node = nodeShardState.getNode(); String allocationId = nodeShardState.allocationId(); @@ -386,6 +420,22 @@ protected static NodeShardsResult buildNodeShardsResult( } } + nodeShardStates.sort(createActiveShardComparator(matchAnyShard, inSyncAllocationIds)); + + if (logger.isTraceEnabled()) { + logger.trace( + "{} candidates for allocation: {}", + shard, + nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")) + ); + } + return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); + } + + private static Comparator<NodeGatewayStartedShards> createActiveShardComparator( + boolean matchAnyShard, + Set<String> inSyncAllocationIds + ) { /** * Orders the active shards copies based on below comparators * 1. No store exception i.e. shard copy is readable @@ -406,16 +456,7 @@ protected static NodeShardsResult buildNodeShardsResult( .thenComparing(HIGHEST_REPLICATION_CHECKPOINT_FIRST_COMPARATOR); } - nodeShardStates.sort(comparator); - - if (logger.isTraceEnabled()) { - logger.trace( - "{} candidates for allocation: {}", - shard, - nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")) - ); - } - return new NodeShardsResult(nodeShardStates, numberOfAllocationsFound); + return comparator; } /** @@ -457,7 +498,10 @@ private static NodesToAllocate buildNodesToAllocate( protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation); - private static class NodeShardsResult { + /** + * This class encapsulates the result of a call to {@link #buildNodeShardsResult} + */ + static class NodeShardsResult { final List<NodeGatewayStartedShards> orderedAllocationCandidates; final int allocationsFound; @@ -467,7 +511,10 @@ private static class NodeShardsResult { } } - static class NodesToAllocate { + /** + * This class encapsulates the result of a call to {@link #buildNodesToAllocate} + */ + protected static class NodesToAllocate { final List<DecidedNode> yesNodeShards; final List<DecidedNode> throttleNodeShards; final List<DecidedNode> noNodeShards; diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index 5216dd2fcb4b5..d9474b32bdbf6 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -48,11 +48,11 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import java.util.ArrayList; import java.util.Collections; @@ -61,6 +61,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; @@ -70,93 +71,112 @@ * @opensearch.internal */ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { + protected boolean shouldSkipFetchForRecovery(ShardRouting shard) { + if (shard.primary()) { + return true; + } + if (shard.initializing() == false) { + return true; + } + if (shard.relocatingNodeId() != null) { + return true; + } + if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... + return true; + } + return false; + } + + protected Runnable cancelExistingRecoveryForBetterMatch( + ShardRouting shard, + RoutingAllocation allocation, + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores + ) { + if (nodeShardStores == null) { + logger.trace("{}: fetching new stores for initializing shard", shard); + return null; + } + Metadata metadata = allocation.metadata(); + RoutingNodes routingNodes = allocation.routingNodes(); + ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); + assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; + assert primaryShard.currentNodeId() != null; + final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); + + final StoreFilesMetadata primaryStore = findStore(primaryNode, nodeShardStores); + if (primaryStore == null) { + // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) + // just let the recovery find it out, no need to do anything about it for the initializing shard + logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); + return null; + } + + MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, true, primaryNode, primaryStore, nodeShardStores, false); + if (matchingNodes.getNodeWithHighestMatch() != null) { + DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); + DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); + // current node will not be in matchingNodes as it is filtered away by SameShardAllocationDecider + if (currentNode.equals(nodeWithHighestMatch) == false + && matchingNodes.canPerformNoopRecovery(nodeWithHighestMatch) + && canPerformOperationBasedRecovery(primaryStore, nodeShardStores, currentNode) == false) { + // we found a better match that can perform noop recovery, cancel the existing allocation. + logger.debug( + "cancelling allocation of replica on [{}], can perform a noop recovery on node [{}]", + currentNode, + nodeWithHighestMatch + ); + final Set<String> failedNodeIds = shard.unassignedInfo() == null + ? Collections.emptySet() + : shard.unassignedInfo().getFailedNodeIds(); + UnassignedInfo unassignedInfo = new UnassignedInfo( + UnassignedInfo.Reason.REALLOCATED_REPLICA, + "existing allocation of replica to [" + + currentNode + + "] cancelled, can perform a noop recovery on [" + + nodeWithHighestMatch + + "]", + null, + 0, + allocation.getCurrentNanoTime(), + System.currentTimeMillis(), + false, + UnassignedInfo.AllocationStatus.NO_ATTEMPT, + failedNodeIds + ); + // don't cancel shard in the loop as it will cause a ConcurrentModificationException + return () -> routingNodes.failShard( + logger, + shard, + unassignedInfo, + metadata.getIndexSafe(shard.index()), + allocation.changes() + ); + } + } + return null; + } + /** * Process existing recoveries of replicas and see if we need to cancel them if we find a better * match. Today, a better match is one that can perform a no-op recovery while the previous recovery * has to copy segment files. */ public void processExistingRecoveries(RoutingAllocation allocation) { - Metadata metadata = allocation.metadata(); RoutingNodes routingNodes = allocation.routingNodes(); List<Runnable> shardCancellationActions = new ArrayList<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shard : routingNode) { - if (shard.primary()) { - continue; - } - if (shard.initializing() == false) { - continue; - } - if (shard.relocatingNodeId() != null) { - continue; - } - - // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { + if (shouldSkipFetchForRecovery(shard)) { continue; } AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores = fetchData(shard, allocation); - if (shardStores.hasData() == false) { - logger.trace("{}: fetching new stores for initializing shard", shard); - continue; // still fetching - } + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores = convertToNodeStoreFilesMetadataMap(shardStores); - ShardRouting primaryShard = allocation.routingNodes().activePrimary(shard.shardId()); - assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; - assert primaryShard.currentNodeId() != null; - final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); - final TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore = findStore(primaryNode, shardStores); - if (primaryStore == null) { - // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) - // just let the recovery find it out, no need to do anything about it for the initializing shard - logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); - continue; - } - - MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, true, primaryNode, primaryStore, shardStores, false); - if (matchingNodes.getNodeWithHighestMatch() != null) { - DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); - DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); - // current node will not be in matchingNodes as it is filtered away by SameShardAllocationDecider - if (currentNode.equals(nodeWithHighestMatch) == false - && matchingNodes.canPerformNoopRecovery(nodeWithHighestMatch) - && canPerformOperationBasedRecovery(primaryStore, shardStores, currentNode) == false) { - // we found a better match that can perform noop recovery, cancel the existing allocation. - logger.debug( - "cancelling allocation of replica on [{}], can perform a noop recovery on node [{}]", - currentNode, - nodeWithHighestMatch - ); - final Set<String> failedNodeIds = shard.unassignedInfo() == null - ? Collections.emptySet() - : shard.unassignedInfo().getFailedNodeIds(); - UnassignedInfo unassignedInfo = new UnassignedInfo( - UnassignedInfo.Reason.REALLOCATED_REPLICA, - "existing allocation of replica to [" - + currentNode - + "] cancelled, can perform a noop recovery on [" - + nodeWithHighestMatch - + "]", - null, - 0, - allocation.getCurrentNanoTime(), - System.currentTimeMillis(), - false, - UnassignedInfo.AllocationStatus.NO_ATTEMPT, - failedNodeIds - ); - // don't cancel shard in the loop as it will cause a ConcurrentModificationException - shardCancellationActions.add( - () -> routingNodes.failShard( - logger, - shard, - unassignedInfo, - metadata.getIndexSafe(shard.index()), - allocation.changes() - ) - ); - } + Runnable cancellationAction = cancelExistingRecoveryForBetterMatch(shard, allocation, nodeShardStores); + if (cancellationAction != null) { + shardCancellationActions.add(cancellationAction); } } } @@ -168,7 +188,7 @@ && canPerformOperationBasedRecovery(primaryStore, shardStores, currentNode) == f /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - private static boolean isResponsibleFor(final ShardRouting shard) { + protected static boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... @@ -186,12 +206,11 @@ public AllocateUnassignedDecision makeAllocationDecision( return AllocateUnassignedDecision.NOT_TAKEN; } - final RoutingNodes routingNodes = allocation.routingNodes(); - final boolean explain = allocation.debugDecision(); // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing Tuple<Decision, Map<String, NodeAllocationResult>> result = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation); Decision allocateDecision = result.v1(); - if (allocateDecision.type() != Decision.Type.YES && (explain == false || hasInitiatedFetching(unassignedShard) == false)) { + if (allocateDecision.type() != Decision.Type.YES + && (allocation.debugDecision() == false || hasInitiatedFetching(unassignedShard) == false)) { // only return early if we are not in explain mode, or we are in explain mode but we have not // yet attempted to fetch any shard data logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard); @@ -202,28 +221,41 @@ public AllocateUnassignedDecision makeAllocationDecision( } AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores = fetchData(unassignedShard, allocation); - if (shardStores.hasData() == false) { + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores = convertToNodeStoreFilesMetadataMap(shardStores); + return getAllocationDecision(unassignedShard, allocation, nodeShardStores, result, logger); + } + + protected AllocateUnassignedDecision getAllocationDecision( + ShardRouting unassignedShard, + RoutingAllocation allocation, + Map<DiscoveryNode, StoreFilesMetadata> nodeShardStores, + Tuple<Decision, Map<String, NodeAllocationResult>> allocationDecision, + Logger logger + ) { + if (nodeShardStores == null) { + // node shard stores is null when we don't have data yet and still fetching the shard stores logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard); allocation.setHasPendingAsyncFetch(); List<NodeAllocationResult> nodeDecisions = null; - if (explain) { + if (allocation.debugDecision()) { nodeDecisions = buildDecisionsForAllNodes(unassignedShard, allocation); } return AllocateUnassignedDecision.no(AllocationStatus.FETCHING_SHARD_DATA, nodeDecisions); } - + final RoutingNodes routingNodes = allocation.routingNodes(); + final boolean explain = allocation.debugDecision(); ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId()); if (primaryShard == null) { assert explain : "primary should only be null here if we are in explain mode, so we didn't " + "exit early when canBeAllocatedToAtLeastOneNode didn't return a YES decision"; return AllocateUnassignedDecision.no( - UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.type()), - new ArrayList<>(result.v2().values()) + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), + new ArrayList<>(allocationDecision.v2().values()) ); } assert primaryShard.currentNodeId() != null; final DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId()); - final TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore = findStore(primaryNode, shardStores); + final StoreFilesMetadata primaryStore = findStore(primaryNode, nodeShardStores); if (primaryStore == null) { // if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed) // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica @@ -239,14 +271,17 @@ public AllocateUnassignedDecision makeAllocationDecision( false, primaryNode, primaryStore, - shardStores, + nodeShardStores, explain ); assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions"; - List<NodeAllocationResult> nodeDecisions = augmentExplanationsWithStoreInfo(result.v2(), matchingNodes.nodeDecisions); - if (allocateDecision.type() != Decision.Type.YES) { - return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.type()), nodeDecisions); + List<NodeAllocationResult> nodeDecisions = augmentExplanationsWithStoreInfo(allocationDecision.v2(), matchingNodes.nodeDecisions); + if (allocationDecision.v1().type() != Decision.Type.YES) { + return AllocateUnassignedDecision.no( + UnassignedInfo.AllocationStatus.fromDecision(allocationDecision.v1().type()), + nodeDecisions + ); } else if (matchingNodes.getNodeWithHighestMatch() != null) { RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId()); // we only check on THROTTLE since we checked before on NO @@ -295,13 +330,13 @@ public AllocateUnassignedDecision makeAllocationDecision( /** * Determines if the shard can be allocated on at least one node based on the allocation deciders. - * + * <p> * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element * in the returned tuple. */ - private static Tuple<Decision, Map<String, NodeAllocationResult>> canBeAllocatedToAtLeastOneNode( + protected static Tuple<Decision, Map<String, NodeAllocationResult>> canBeAllocatedToAtLeastOneNode( ShardRouting shard, RoutingAllocation allocation ) { @@ -357,15 +392,11 @@ private static List<NodeAllocationResult> augmentExplanationsWithStoreInfo( /** * Finds the store for the assigned shard in the fetched data, returns null if none is found. */ - private static TransportNodesListShardStoreMetadata.StoreFilesMetadata findStore( - DiscoveryNode node, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data - ) { - NodeStoreFilesMetadata nodeFilesStore = data.getData().get(node); - if (nodeFilesStore == null) { + private static StoreFilesMetadata findStore(DiscoveryNode node, Map<DiscoveryNode, StoreFilesMetadata> data) { + if (!data.containsKey(node)) { return null; } - return nodeFilesStore.storeFilesMetadata(); + return data.get(node); } private MatchingNodes findMatchingNodes( @@ -373,20 +404,20 @@ private MatchingNodes findMatchingNodes( RoutingAllocation allocation, boolean noMatchFailedNodes, DiscoveryNode primaryNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data, + StoreFilesMetadata primaryStore, + Map<DiscoveryNode, StoreFilesMetadata> data, boolean explain ) { Map<DiscoveryNode, MatchingNode> matchingNodes = new HashMap<>(); Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null; - for (Map.Entry<DiscoveryNode, NodeStoreFilesMetadata> nodeStoreEntry : data.getData().entrySet()) { + for (Map.Entry<DiscoveryNode, StoreFilesMetadata> nodeStoreEntry : data.entrySet()) { DiscoveryNode discoNode = nodeStoreEntry.getKey(); if (noMatchFailedNodes && shard.unassignedInfo() != null && shard.unassignedInfo().getFailedNodeIds().contains(discoNode.getId())) { continue; } - TransportNodesListShardStoreMetadata.StoreFilesMetadata storeFilesMetadata = nodeStoreEntry.getValue().storeFilesMetadata(); + StoreFilesMetadata storeFilesMetadata = nodeStoreEntry.getValue(); // we don't have any files at all, it is an empty index if (storeFilesMetadata.isEmpty()) { continue; @@ -441,10 +472,20 @@ private MatchingNodes findMatchingNodes( return new MatchingNodes(matchingNodes, nodeDecisions); } - private static long computeMatchingBytes( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - TransportNodesListShardStoreMetadata.StoreFilesMetadata storeFilesMetadata + private Map<DiscoveryNode, StoreFilesMetadata> convertToNodeStoreFilesMetadataMap( + AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> data ) { + if (data.hasData() == false) { + // if we don't have data yet return null + return null; + } + return data.getData() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().storeFilesMetadata())); + } + + private static long computeMatchingBytes(StoreFilesMetadata primaryStore, StoreFilesMetadata storeFilesMetadata) { long sizeMatched = 0; for (StoreFileMetadata storeFileMetadata : storeFilesMetadata) { String metadataFileName = storeFileMetadata.name(); @@ -455,19 +496,16 @@ private static long computeMatchingBytes( return sizeMatched; } - private static boolean hasMatchingSyncId( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - TransportNodesListShardStoreMetadata.StoreFilesMetadata replicaStore - ) { + private static boolean hasMatchingSyncId(StoreFilesMetadata primaryStore, StoreFilesMetadata replicaStore) { String primarySyncId = primaryStore.syncId(); return primarySyncId != null && primarySyncId.equals(replicaStore.syncId()); } private static MatchingNode computeMatchingNode( DiscoveryNode primaryNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, + StoreFilesMetadata primaryStore, DiscoveryNode replicaNode, - TransportNodesListShardStoreMetadata.StoreFilesMetadata replicaStore + StoreFilesMetadata replicaStore ) { final long retainingSeqNoForPrimary = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(primaryNode); final long retainingSeqNoForReplica = primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(replicaNode); @@ -478,15 +516,15 @@ private static MatchingNode computeMatchingNode( } private static boolean canPerformOperationBasedRecovery( - TransportNodesListShardStoreMetadata.StoreFilesMetadata primaryStore, - AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> shardStores, + StoreFilesMetadata primaryStore, + Map<DiscoveryNode, StoreFilesMetadata> shardStores, DiscoveryNode targetNode ) { - final NodeStoreFilesMetadata targetNodeStore = shardStores.getData().get(targetNode); - if (targetNodeStore == null || targetNodeStore.storeFilesMetadata().isEmpty()) { + final StoreFilesMetadata targetNodeStore = shardStores.get(targetNode); + if (targetNodeStore == null || targetNodeStore.isEmpty()) { return false; } - if (hasMatchingSyncId(primaryStore, targetNodeStore.storeFilesMetadata())) { + if (hasMatchingSyncId(primaryStore, targetNodeStore)) { return true; } return primaryStore.getPeerRecoveryRetentionLeaseRetainingSeqNo(targetNode) >= 0; @@ -499,7 +537,10 @@ private static boolean canPerformOperationBasedRecovery( */ protected abstract boolean hasInitiatedFetching(ShardRouting shard); - private static class MatchingNode { + /** + * A class to enacapsulate the details regarding the a MatchNode for shard assignment + */ + protected static class MatchingNode { static final Comparator<MatchingNode> COMPARATOR = Comparator.<MatchingNode, Boolean>comparing(m -> m.isNoopRecovery) .thenComparing(m -> m.retainingSeqNo) .thenComparing(m -> m.matchingBytes); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java new file mode 100644 index 0000000000000..403e3e96fa209 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesGatewayStartedShardHelper.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.shard.ShardStateMetadata; +import org.opensearch.index.store.Store; +import org.opensearch.indices.IndicesService; + +import java.io.IOException; + +/** + * This class has the common code used in {@link TransportNodesListGatewayStartedShards} and + * {@link TransportNodesListGatewayStartedShardsBatch} to get the shard info on the local node. + * <p> + * This class should not be used to add more functions and will be removed when the + * {@link TransportNodesListGatewayStartedShards} will be deprecated and all the code will be moved to + * {@link TransportNodesListGatewayStartedShardsBatch} + * + * @opensearch.internal + */ +public class TransportNodesGatewayStartedShardHelper { + public static TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard getShardInfoOnLocalNode( + Logger logger, + final ShardId shardId, + NamedXContentRegistry namedXContentRegistry, + NodeEnvironment nodeEnv, + IndicesService indicesService, + String shardDataPathInRequest, + Settings settings, + ClusterService clusterService + ) throws IOException { + logger.trace("{} loading local shard state info", shardId); + ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + logger, + namedXContentRegistry, + nodeEnv.availableShardPaths(shardId) + ); + if (shardStateMetadata != null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { + final String customDataPath; + if (shardDataPathInRequest != null) { + customDataPath = shardDataPathInRequest; + } else { + // TODO: Fallback for BWC with older OpenSearch versions. + // Remove once request.getCustomDataPath() always returns non-null + final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); + if (metadata != null) { + customDataPath = new IndexSettings(metadata, settings).customDataPath(); + } else { + logger.trace("{} node doesn't have meta data for the requests index", shardId); + throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); + } + } + // we don't have an open shard on the store, validate the files on disk are openable + ShardPath shardPath = null; + try { + shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath == null) { + throw new IllegalStateException(shardId + " no shard path found"); + } + Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); + } catch (Exception exception) { + final ShardPath finalShardPath = shardPath; + logger.trace( + () -> new ParameterizedMessage( + "{} can't open index for shard [{}] in path [{}]", + shardId, + shardStateMetadata, + (finalShardPath != null) ? finalShardPath.resolveIndex() : "" + ), + exception + ); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard( + allocationId, + shardStateMetadata.primary, + null, + exception + ); + } + } + + logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); + String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; + final IndexShard shard = indicesService.getShardOrNull(shardId); + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard( + allocationId, + shardStateMetadata.primary, + shard != null ? shard.getLatestReplicationCheckpoint() : null + ); + } + logger.trace("{} no local shard info found", shardId); + return new TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard(null, false, null); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java index 3be8ac9784960..647e3632ea0ca 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; @@ -46,10 +45,11 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.inject.Inject; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index e2a3f08bb02c6..0ba872aab9974 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -32,10 +32,8 @@ package org.opensearch.gateway; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; @@ -44,32 +42,31 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.shard.ShardStateMetadata; -import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.Objects; +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + /** * This transport action is used to fetch the shard version from each node during primary allocation in {@link GatewayAllocator}. * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate @@ -124,7 +121,14 @@ public TransportNodesListGatewayStartedShards( } @Override - public void list(ShardId shardId, String customDataPath, DiscoveryNode[] nodes, ActionListener<NodesGatewayStartedShards> listener) { + public void list( + Map<ShardId, ShardAttributes> shardAttributesMap, + DiscoveryNode[] nodes, + ActionListener<NodesGatewayStartedShards> listener + ) { + assert shardAttributesMap.size() == 1 : "only one shard should be specified"; + final ShardId shardId = shardAttributesMap.keySet().iterator().next(); + final String customDataPath = shardAttributesMap.get(shardId).getCustomDataPath(); execute(new Request(shardId, customDataPath, nodes), listener); } @@ -150,72 +154,23 @@ protected NodesGatewayStartedShards newResponse( @Override protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { try { - final ShardId shardId = request.getShardId(); - logger.trace("{} loading local shard state info", shardId); - ShardStateMetadata shardStateMetadata = ShardStateMetadata.FORMAT.loadLatestState( + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShard shardInfo = getShardInfoOnLocalNode( logger, + request.getShardId(), namedXContentRegistry, - nodeEnv.availableShardPaths(request.shardId) + nodeEnv, + indicesService, + request.getCustomDataPath(), + settings, + clusterService + ); + return new NodeGatewayStartedShards( + clusterService.localNode(), + shardInfo.allocationId(), + shardInfo.primary(), + shardInfo.replicationCheckpoint(), + shardInfo.storeException() ); - if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null - && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { - final String customDataPath; - if (request.getCustomDataPath() != null) { - customDataPath = request.getCustomDataPath(); - } else { - // TODO: Fallback for BWC with older OpenSearch versions. - // Remove once request.getCustomDataPath() always returns non-null - final IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); - if (metadata != null) { - customDataPath = new IndexSettings(metadata, settings).customDataPath(); - } else { - logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); - } - } - // we don't have an open shard on the store, validate the files on disk are openable - ShardPath shardPath = null; - try { - shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); - if (shardPath == null) { - throw new IllegalStateException(shardId + " no shard path found"); - } - Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); - } catch (Exception exception) { - final ShardPath finalShardPath = shardPath; - logger.trace( - () -> new ParameterizedMessage( - "{} can't open index for shard [{}] in path [{}]", - shardId, - shardStateMetadata, - (finalShardPath != null) ? finalShardPath.resolveIndex() : "" - ), - exception - ); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - null, - exception - ); - } - } - - logger.debug("{} shard state info found: [{}]", shardId, shardStateMetadata); - String allocationId = shardStateMetadata.allocationId != null ? shardStateMetadata.allocationId.getId() : null; - final IndexShard shard = indicesService.getShardOrNull(shardId); - return new NodeGatewayStartedShards( - clusterService.localNode(), - allocationId, - shardStateMetadata.primary, - shard != null ? shard.getLatestReplicationCheckpoint() : null - ); - } - logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), null, false, null); } catch (Exception e) { throw new OpenSearchException("failed to load started shards", e); } diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java new file mode 100644 index 0000000000000..bc327c1b85748 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShardsBatch.java @@ -0,0 +1,401 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.getShardInfoOnLocalNode; + +/** + * This transport action is used to fetch batch of unassigned shard version from each node during primary allocation in {@link GatewayAllocator}. + * We use this to find out which node holds the latest shard version and which of them used to be a primary in order to allocate + * shards after node or cluster restarts. + * + * @opensearch.internal + */ +public class TransportNodesListGatewayStartedShardsBatch extends TransportNodesAction< + TransportNodesListGatewayStartedShardsBatch.Request, + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeRequest, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> + implements + AsyncShardFetch.Lister< + TransportNodesListGatewayStartedShardsBatch.NodesGatewayStartedShardsBatch, + TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch> { + + public static final String ACTION_NAME = "internal:gateway/local/started_shards_batch"; + public static final ActionType<NodesGatewayStartedShardsBatch> TYPE = new ActionType<>( + ACTION_NAME, + NodesGatewayStartedShardsBatch::new + ); + + private final Settings settings; + private final NodeEnvironment nodeEnv; + private final IndicesService indicesService; + private final NamedXContentRegistry namedXContentRegistry; + + @Inject + public TransportNodesListGatewayStartedShardsBatch( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + NodeEnvironment env, + IndicesService indicesService, + NamedXContentRegistry namedXContentRegistry + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + Request::new, + NodeRequest::new, + ThreadPool.Names.FETCH_SHARD_STARTED, + NodeGatewayStartedShardsBatch.class + ); + this.settings = settings; + this.nodeEnv = env; + this.indicesService = indicesService; + this.namedXContentRegistry = namedXContentRegistry; + } + + @Override + public void list( + Map<ShardId, ShardAttributes> shardAttributesMap, + DiscoveryNode[] nodes, + ActionListener<NodesGatewayStartedShardsBatch> listener + ) { + execute(new Request(nodes, shardAttributesMap), listener); + } + + @Override + protected NodeRequest newNodeRequest(Request request) { + return new NodeRequest(request); + } + + @Override + protected NodeGatewayStartedShardsBatch newNodeResponse(StreamInput in) throws IOException { + return new NodeGatewayStartedShardsBatch(in); + } + + @Override + protected NodesGatewayStartedShardsBatch newResponse( + Request request, + List<NodeGatewayStartedShardsBatch> responses, + List<FailedNodeException> failures + ) { + return new NodesGatewayStartedShardsBatch(clusterService.getClusterName(), responses, failures); + } + + /** + * This function is similar to nodeOperation method of {@link TransportNodesListGatewayStartedShards} we loop over + * the shards here and populate the data about the shards held by the local node. + * + * @param request Request containing the map shardIdsWithCustomDataPath. + * @return NodeGatewayStartedShardsBatch contains the data about the primary shards held by the local node + */ + @Override + protected NodeGatewayStartedShardsBatch nodeOperation(NodeRequest request) { + Map<ShardId, NodeGatewayStartedShard> shardsOnNode = new HashMap<>(); + for (ShardAttributes shardAttr : request.shardAttributes.values()) { + final ShardId shardId = shardAttr.getShardId(); + try { + shardsOnNode.put( + shardId, + getShardInfoOnLocalNode( + logger, + shardId, + namedXContentRegistry, + nodeEnv, + indicesService, + shardAttr.getCustomDataPath(), + settings, + clusterService + ) + ); + } catch (Exception e) { + shardsOnNode.put( + shardId, + new NodeGatewayStartedShard(null, false, null, new OpenSearchException("failed to load started shards", e)) + ); + } + } + return new NodeGatewayStartedShardsBatch(clusterService.localNode(), shardsOnNode); + } + + /** + * This is used in constructing the request for making the transport request to set of other node. + * Refer {@link TransportNodesAction} class start method. + * + * @opensearch.internal + */ + public static class Request extends BaseNodesRequest<Request> { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public Request(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public Request(DiscoveryNode[] nodes, Map<ShardId, ShardAttributes> shardAttributes) { + super(nodes); + this.shardAttributes = Objects.requireNonNull(shardAttributes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + } + + /** + * Responses received from set of other nodes is clubbed into this class and sent back to the caller + * of this transport request. Refer {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodesGatewayStartedShardsBatch extends BaseNodesResponse<NodeGatewayStartedShardsBatch> { + + public NodesGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + } + + public NodesGatewayStartedShardsBatch( + ClusterName clusterName, + List<NodeGatewayStartedShardsBatch> nodes, + List<FailedNodeException> failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List<NodeGatewayStartedShardsBatch> readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeGatewayStartedShardsBatch::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List<NodeGatewayStartedShardsBatch> nodes) throws IOException { + out.writeList(nodes); + } + } + + /** + * NodeRequest class is for deserializing the request received by this node from other node for this transport action. + * This is used in {@link TransportNodesAction} + * + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + private final Map<ShardId, ShardAttributes> shardAttributes; + + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public NodeRequest(Request request) { + this.shardAttributes = Objects.requireNonNull(request.getShardAttributes()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + + /** + * This class encapsulates the metadata about a started shard that needs to be persisted or sent between nodes. + * This is used in {@link NodeGatewayStartedShardsBatch} to construct the response for each node, instead of + * {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} because we don't need to save an extra + * {@link DiscoveryNode} object like in {@link TransportNodesListGatewayStartedShards.NodeGatewayStartedShards} + * which reduces memory footprint of its objects. + * + * @opensearch.internal + */ + public static class NodeGatewayStartedShard { + private final String allocationId; + private final boolean primary; + private final Exception storeException; + private final ReplicationCheckpoint replicationCheckpoint; + + public NodeGatewayStartedShard(StreamInput in) throws IOException { + allocationId = in.readOptionalString(); + primary = in.readBoolean(); + if (in.readBoolean()) { + storeException = in.readException(); + } else { + storeException = null; + } + if (in.readBoolean()) { + replicationCheckpoint = new ReplicationCheckpoint(in); + } else { + replicationCheckpoint = null; + } + } + + public NodeGatewayStartedShard(String allocationId, boolean primary, ReplicationCheckpoint replicationCheckpoint) { + this(allocationId, primary, replicationCheckpoint, null); + } + + public NodeGatewayStartedShard( + String allocationId, + boolean primary, + ReplicationCheckpoint replicationCheckpoint, + Exception storeException + ) { + this.allocationId = allocationId; + this.primary = primary; + this.replicationCheckpoint = replicationCheckpoint; + this.storeException = storeException; + } + + public String allocationId() { + return this.allocationId; + } + + public boolean primary() { + return this.primary; + } + + public ReplicationCheckpoint replicationCheckpoint() { + return this.replicationCheckpoint; + } + + public Exception storeException() { + return this.storeException; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(allocationId); + out.writeBoolean(primary); + if (storeException != null) { + out.writeBoolean(true); + out.writeException(storeException); + } else { + out.writeBoolean(false); + } + if (replicationCheckpoint != null) { + out.writeBoolean(true); + replicationCheckpoint.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NodeGatewayStartedShard that = (NodeGatewayStartedShard) o; + + return primary == that.primary + && Objects.equals(allocationId, that.allocationId) + && Objects.equals(storeException, that.storeException) + && Objects.equals(replicationCheckpoint, that.replicationCheckpoint); + } + + @Override + public int hashCode() { + int result = (allocationId != null ? allocationId.hashCode() : 0); + result = 31 * result + (primary ? 1 : 0); + result = 31 * result + (storeException != null ? storeException.hashCode() : 0); + result = 31 * result + (replicationCheckpoint != null ? replicationCheckpoint.hashCode() : 0); + return result; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append("NodeGatewayStartedShards[").append("allocationId=").append(allocationId).append(",primary=").append(primary); + if (storeException != null) { + buf.append(",storeException=").append(storeException); + } + if (replicationCheckpoint != null) { + buf.append(",ReplicationCheckpoint=").append(replicationCheckpoint.toString()); + } + buf.append("]"); + return buf.toString(); + } + } + + /** + * This is the response from a single node, this is used in {@link NodesGatewayStartedShardsBatch} for creating + * node to its response mapping for this transport request. + * Refer {@link TransportNodesAction} start method + * + * @opensearch.internal + */ + public static class NodeGatewayStartedShardsBatch extends BaseNodeResponse { + private final Map<ShardId, NodeGatewayStartedShard> nodeGatewayStartedShardsBatch; + + public Map<ShardId, NodeGatewayStartedShard> getNodeGatewayStartedShardsBatch() { + return nodeGatewayStartedShardsBatch; + } + + public NodeGatewayStartedShardsBatch(StreamInput in) throws IOException { + super(in); + this.nodeGatewayStartedShardsBatch = in.readMap(ShardId::new, NodeGatewayStartedShard::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(nodeGatewayStartedShardsBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public NodeGatewayStartedShardsBatch(DiscoveryNode node, Map<ShardId, NodeGatewayStartedShard> nodeGatewayStartedShardsBatch) { + super(node); + this.nodeGatewayStartedShardsBatch = nodeGatewayStartedShardsBatch; + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/WriteStateException.java b/server/src/main/java/org/opensearch/gateway/WriteStateException.java index e60eb0c11310d..e1de211ade38f 100644 --- a/server/src/main/java/org/opensearch/gateway/WriteStateException.java +++ b/server/src/main/java/org/opensearch/gateway/WriteStateException.java @@ -31,6 +31,8 @@ package org.opensearch.gateway; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOError; import java.io.IOException; import java.io.UncheckedIOException; @@ -38,8 +40,9 @@ /** * This exception is thrown when there is a problem of writing state to disk. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WriteStateException extends IOException { private final boolean dirty; diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java new file mode 100644 index 0000000000000..4725f40076ce2 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -0,0 +1,616 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Manifest file which contains the details of the uploaded entity metadata + * + * @opensearch.internal + */ +public class ClusterMetadataManifest implements Writeable, ToXContentFragment { + + public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. + public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. + + private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); + private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); + private static final ParseField CLUSTER_UUID_FIELD = new ParseField("cluster_uuid"); + private static final ParseField STATE_UUID_FIELD = new ParseField("state_uuid"); + private static final ParseField OPENSEARCH_VERSION_FIELD = new ParseField("opensearch_version"); + private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); + private static final ParseField COMMITTED_FIELD = new ParseField("committed"); + private static final ParseField CODEC_VERSION_FIELD = new ParseField("codec_version"); + private static final ParseField GLOBAL_METADATA_FIELD = new ParseField("global_metadata"); + private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); + private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); + + private static long term(Object[] fields) { + return (long) fields[0]; + } + + private static long version(Object[] fields) { + return (long) fields[1]; + } + + private static String clusterUUID(Object[] fields) { + return (String) fields[2]; + } + + private static String stateUUID(Object[] fields) { + return (String) fields[3]; + } + + private static Version opensearchVersion(Object[] fields) { + return Version.fromId((int) fields[4]); + } + + private static String nodeId(Object[] fields) { + return (String) fields[5]; + } + + private static boolean committed(Object[] fields) { + return (boolean) fields[6]; + } + + private static List<UploadedIndexMetadata> indices(Object[] fields) { + return (List<UploadedIndexMetadata>) fields[7]; + } + + private static String previousClusterUUID(Object[] fields) { + return (String) fields[8]; + } + + private static boolean clusterUUIDCommitted(Object[] fields) { + return (boolean) fields[9]; + } + + private static int codecVersion(Object[] fields) { + return (int) fields[10]; + } + + private static String globalMetadataFileName(Object[] fields) { + return (String) fields[11]; + } + + private static final ConstructingObjectParser<ClusterMetadataManifest, Void> PARSER_V0 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + CODEC_V0, + null, + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser<ClusterMetadataManifest, Void> PARSER_V1 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + codecVersion(fields), + globalMetadataFileName(fields), + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser<ClusterMetadataManifest, Void> CURRENT_PARSER = PARSER_V1; + + static { + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V1, CODEC_V1); + } + + private static void declareParser(ConstructingObjectParser<ClusterMetadataManifest, Void> parser, long codec_version) { + parser.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); + parser.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); + parser.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); + parser.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> UploadedIndexMetadata.fromXContent(p), + INDICES_FIELD + ); + parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + + if (codec_version >= CODEC_V1) { + parser.declareInt(ConstructingObjectParser.constructorArg(), CODEC_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), GLOBAL_METADATA_FIELD); + } + } + + private final int codecVersion; + private final String globalMetadataFileName; + private final List<UploadedIndexMetadata> indices; + private final long clusterTerm; + private final long stateVersion; + private final String clusterUUID; + private final String stateUUID; + private final Version opensearchVersion; + private final String nodeId; + private final boolean committed; + private final String previousClusterUUID; + private final boolean clusterUUIDCommitted; + + public List<UploadedIndexMetadata> getIndices() { + return indices; + } + + public long getClusterTerm() { + return clusterTerm; + } + + public long getStateVersion() { + return stateVersion; + } + + public String getClusterUUID() { + return clusterUUID; + } + + public String getStateUUID() { + return stateUUID; + } + + public Version getOpensearchVersion() { + return opensearchVersion; + } + + public String getNodeId() { + return nodeId; + } + + public boolean isCommitted() { + return committed; + } + + public String getPreviousClusterUUID() { + return previousClusterUUID; + } + + public boolean isClusterUUIDCommitted() { + return clusterUUIDCommitted; + } + + public int getCodecVersion() { + return codecVersion; + } + + public String getGlobalMetadataFileName() { + return globalMetadataFileName; + } + + public ClusterMetadataManifest( + long clusterTerm, + long version, + String clusterUUID, + String stateUUID, + Version opensearchVersion, + String nodeId, + boolean committed, + int codecVersion, + String globalMetadataFileName, + List<UploadedIndexMetadata> indices, + String previousClusterUUID, + boolean clusterUUIDCommitted + ) { + this.clusterTerm = clusterTerm; + this.stateVersion = version; + this.clusterUUID = clusterUUID; + this.stateUUID = stateUUID; + this.opensearchVersion = opensearchVersion; + this.nodeId = nodeId; + this.committed = committed; + this.codecVersion = codecVersion; + this.globalMetadataFileName = globalMetadataFileName; + this.indices = Collections.unmodifiableList(indices); + this.previousClusterUUID = previousClusterUUID; + this.clusterUUIDCommitted = clusterUUIDCommitted; + } + + public ClusterMetadataManifest(StreamInput in) throws IOException { + this.clusterTerm = in.readVLong(); + this.stateVersion = in.readVLong(); + this.clusterUUID = in.readString(); + this.stateUUID = in.readString(); + this.opensearchVersion = Version.fromId(in.readInt()); + this.nodeId = in.readString(); + this.committed = in.readBoolean(); + this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); + this.previousClusterUUID = in.readString(); + this.clusterUUIDCommitted = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.codecVersion = in.readInt(); + this.globalMetadataFileName = in.readString(); + } else { + this.codecVersion = CODEC_V0; // Default codec + this.globalMetadataFileName = null; + } + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMetadataManifest manifest) { + return new Builder(manifest); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(CLUSTER_TERM_FIELD.getPreferredName(), getClusterTerm()) + .field(STATE_VERSION_FIELD.getPreferredName(), getStateVersion()) + .field(CLUSTER_UUID_FIELD.getPreferredName(), getClusterUUID()) + .field(STATE_UUID_FIELD.getPreferredName(), getStateUUID()) + .field(OPENSEARCH_VERSION_FIELD.getPreferredName(), getOpensearchVersion().id) + .field(NODE_ID_FIELD.getPreferredName(), getNodeId()) + .field(COMMITTED_FIELD.getPreferredName(), isCommitted()); + builder.startArray(INDICES_FIELD.getPreferredName()); + { + for (UploadedIndexMetadata uploadedIndexMetadata : indices) { + uploadedIndexMetadata.toXContent(builder, params); + } + } + builder.endArray(); + builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); + builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); + if (onOrAfterCodecVersion(CODEC_V1)) { + builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); + builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); + } + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(clusterTerm); + out.writeVLong(stateVersion); + out.writeString(clusterUUID); + out.writeString(stateUUID); + out.writeInt(opensearchVersion.id); + out.writeString(nodeId); + out.writeBoolean(committed); + out.writeCollection(indices); + out.writeString(previousClusterUUID); + out.writeBoolean(clusterUUIDCommitted); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeInt(codecVersion); + out.writeString(globalMetadataFileName); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ClusterMetadataManifest that = (ClusterMetadataManifest) o; + return Objects.equals(indices, that.indices) + && clusterTerm == that.clusterTerm + && stateVersion == that.stateVersion + && Objects.equals(clusterUUID, that.clusterUUID) + && Objects.equals(stateUUID, that.stateUUID) + && Objects.equals(opensearchVersion, that.opensearchVersion) + && Objects.equals(nodeId, that.nodeId) + && Objects.equals(committed, that.committed) + && Objects.equals(previousClusterUUID, that.previousClusterUUID) + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted) + && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) + && Objects.equals(codecVersion, that.codecVersion); + } + + @Override + public int hashCode() { + return Objects.hash( + codecVersion, + globalMetadataFileName, + indices, + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + previousClusterUUID, + clusterUUIDCommitted + ); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + public boolean onOrAfterCodecVersion(int codecVersion) { + return this.codecVersion >= codecVersion; + } + + public static ClusterMetadataManifest fromXContentV0(XContentParser parser) throws IOException { + return PARSER_V0.parse(parser, null); + } + + public static ClusterMetadataManifest fromXContent(XContentParser parser) throws IOException { + return CURRENT_PARSER.parse(parser, null); + } + + /** + * Builder for ClusterMetadataManifest + * + * @opensearch.internal + */ + public static class Builder { + + private String globalMetadataFileName; + private int codecVersion; + private List<UploadedIndexMetadata> indices; + private long clusterTerm; + private long stateVersion; + private String clusterUUID; + private String stateUUID; + private Version opensearchVersion; + private String nodeId; + private String previousClusterUUID; + private boolean committed; + private boolean clusterUUIDCommitted; + + public Builder indices(List<UploadedIndexMetadata> indices) { + this.indices = indices; + return this; + } + + public Builder codecVersion(int codecVersion) { + this.codecVersion = codecVersion; + return this; + } + + public Builder globalMetadataFileName(String globalMetadataFileName) { + this.globalMetadataFileName = globalMetadataFileName; + return this; + } + + public Builder clusterTerm(long clusterTerm) { + this.clusterTerm = clusterTerm; + return this; + } + + public Builder stateVersion(long stateVersion) { + this.stateVersion = stateVersion; + return this; + } + + public Builder clusterUUID(String clusterUUID) { + this.clusterUUID = clusterUUID; + return this; + } + + public Builder stateUUID(String stateUUID) { + this.stateUUID = stateUUID; + return this; + } + + public Builder opensearchVersion(Version opensearchVersion) { + this.opensearchVersion = opensearchVersion; + return this; + } + + public Builder nodeId(String nodeId) { + this.nodeId = nodeId; + return this; + } + + public Builder committed(boolean committed) { + this.committed = committed; + return this; + } + + public List<UploadedIndexMetadata> getIndices() { + return indices; + } + + public Builder previousClusterUUID(String previousClusterUUID) { + this.previousClusterUUID = previousClusterUUID; + return this; + } + + public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { + this.clusterUUIDCommitted = clusterUUIDCommitted; + return this; + } + + public Builder() { + indices = new ArrayList<>(); + } + + public Builder(ClusterMetadataManifest manifest) { + this.clusterTerm = manifest.clusterTerm; + this.stateVersion = manifest.stateVersion; + this.clusterUUID = manifest.clusterUUID; + this.stateUUID = manifest.stateUUID; + this.opensearchVersion = manifest.opensearchVersion; + this.nodeId = manifest.nodeId; + this.committed = manifest.committed; + this.globalMetadataFileName = manifest.globalMetadataFileName; + this.codecVersion = manifest.codecVersion; + this.indices = new ArrayList<>(manifest.indices); + this.previousClusterUUID = manifest.previousClusterUUID; + this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; + } + + public ClusterMetadataManifest build() { + return new ClusterMetadataManifest( + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + codecVersion, + globalMetadataFileName, + indices, + previousClusterUUID, + clusterUUIDCommitted + ); + } + + } + + /** + * Metadata for uploaded index metadata + * + * @opensearch.internal + */ + public static class UploadedIndexMetadata implements Writeable, ToXContentFragment { + + private static final ParseField INDEX_NAME_FIELD = new ParseField("index_name"); + private static final ParseField INDEX_UUID_FIELD = new ParseField("index_uuid"); + private static final ParseField UPLOADED_FILENAME_FIELD = new ParseField("uploaded_filename"); + + private static String indexName(Object[] fields) { + return (String) fields[0]; + } + + private static String indexUUID(Object[] fields) { + return (String) fields[1]; + } + + private static String uploadedFilename(Object[] fields) { + return (String) fields[2]; + } + + private static final ConstructingObjectParser<UploadedIndexMetadata, Void> PARSER = new ConstructingObjectParser<>( + "uploaded_index_metadata", + fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields)) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); + } + + private final String indexName; + private final String indexUUID; + private final String uploadedFilename; + + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName) { + this.indexName = indexName; + this.indexUUID = indexUUID; + this.uploadedFilename = uploadedFileName; + } + + public UploadedIndexMetadata(StreamInput in) throws IOException { + this.indexName = in.readString(); + this.indexUUID = in.readString(); + this.uploadedFilename = in.readString(); + } + + public String getUploadedFilePath() { + return uploadedFilename; + } + + public String getUploadedFilename() { + String[] splitPath = uploadedFilename.split("/"); + return splitPath[splitPath.length - 1]; + } + + public String getIndexName() { + return indexName; + } + + public String getIndexUUID() { + return indexUUID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) + .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) + .endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexName); + out.writeString(indexUUID); + out.writeString(uploadedFilename); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final UploadedIndexMetadata that = (UploadedIndexMetadata) o; + return Objects.equals(indexName, that.indexName) + && Objects.equals(indexUUID, that.indexUUID) + && Objects.equals(uploadedFilename, that.uploadedFilename); + } + + @Override + public int hashCode() { + return Objects.hash(indexName, indexUUID, uploadedFilename); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + public static UploadedIndexMetadata fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java new file mode 100644 index 0000000000000..c892b475d71da --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -0,0 +1,1335 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.opensearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.LongSupplier; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; + +/** + * A Service which provides APIs to upload and download cluster metadata from remote store. + * + * @opensearch.internal + */ +public class RemoteClusterStateService implements Closeable { + + public static final String METADATA_NAME_FORMAT = "%s.dat"; + + public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; + + public static final int RETAINED_MANIFESTS = 10; + + public static final String DELIMITER = "__"; + + private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); + + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting<TimeValue> INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<TimeValue> GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<TimeValue> METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.metadata_manifest.upload_timeout", + METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final ChecksumBlobStoreFormat<IndexMetadata> INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "index-metadata", + METADATA_NAME_FORMAT, + IndexMetadata::fromXContent + ); + + public static final ChecksumBlobStoreFormat<Metadata> GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "metadata", + METADATA_NAME_FORMAT, + Metadata::fromXContent + ); + + /** + * Manifest format compatible with older codec v0, where codec version was missing. + */ + public static final ChecksumBlobStoreFormat<ClusterMetadataManifest> CLUSTER_METADATA_MANIFEST_FORMAT_V0 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); + + /** + * Manifest format compatible with codec v1, where we introduced codec versions/global metadata. + */ + public static final ChecksumBlobStoreFormat<ClusterMetadataManifest> CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( + "cluster-metadata-manifest", + METADATA_MANIFEST_NAME_FORMAT, + ClusterMetadataManifest::fromXContent + ); + + /** + * Used to specify if cluster state metadata should be published to remote store + */ + public static final Setting<Boolean> REMOTE_CLUSTER_STATE_ENABLED_SETTING = Setting.boolSetting( + "cluster.remote_store.state.enabled", + false, + Property.NodeScope, + Property.Final + ); + + public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + public static final String INDEX_PATH_TOKEN = "index"; + public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; + public static final String MANIFEST_PATH_TOKEN = "manifest"; + public static final String MANIFEST_FILE_PREFIX = "manifest"; + public static final String METADATA_FILE_PREFIX = "metadata"; + public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion + + private final String nodeId; + private final Supplier<RepositoriesService> repositoriesService; + private final Settings settings; + private final LongSupplier relativeTimeNanosSupplier; + private final ThreadPool threadpool; + private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; + private volatile TimeValue slowWriteLoggingThreshold; + + private volatile TimeValue indexMetadataUploadTimeout; + private volatile TimeValue globalMetadataUploadTimeout; + private volatile TimeValue metadataManifestUploadTimeout; + + private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); + private final RemotePersistenceStats remoteStateStats; + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V1; + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + // ToXContent Params with gateway mode. + // We are using gateway context mode to persist all custom metadata. + public static final ToXContent.Params FORMAT_PARAMS; + static { + Map<String, String> params = new HashMap<>(1); + params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new ToXContent.MapParams(params); + } + + public RemoteClusterStateService( + String nodeId, + Supplier<RepositoriesService> repositoriesService, + Settings settings, + ClusterSettings clusterSettings, + LongSupplier relativeTimeNanosSupplier, + ThreadPool threadPool + ) { + assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; + this.nodeId = nodeId; + this.repositoriesService = repositoriesService; + this.settings = settings; + this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; + this.threadpool = threadPool; + this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.remoteStateStats = new RemotePersistenceStats(); + } + + private BlobStoreTransferService getBlobStoreTransferService() { + if (blobStoreTransferService == null) { + blobStoreTransferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadpool); + } + return blobStoreTransferService; + } + + /** + * This method uploads entire cluster state metadata to the configured blob store. For now only index metadata upload is supported. This method should be + * invoked by the elected cluster manager when the remote cluster state is enabled. + * + * @return A manifest object which contains the details of uploaded entity metadata. + */ + @Nullable + public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + + // TODO: we can upload global metadata and index metadata in parallel. [issue: #10645] + // Write globalMetadata + String globalMetadataFile = writeGlobalMetadata(clusterState); + + // any validations before/after upload ? + final List<UploadedIndexMetadata> allUploadedIndexMetadata = writeIndexMetadataParallel( + clusterState, + new ArrayList<>(clusterState.metadata().indices().values()) + ); + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + allUploadedIndexMetadata, + previousClusterUUID, + globalMetadataFile, + false + ); + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); + if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + logger.warn( + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote full state with [{}] indices", + durationMillis, + slowWriteLoggingThreshold, + allUploadedIndexMetadata.size() + ); + } else { + logger.info( + "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices and global metadata", + durationMillis, + allUploadedIndexMetadata.size() + ); + } + return manifest; + } + + /** + * This method uploads the diff between the previous cluster state and the current cluster state. The previous manifest file is needed to create the new + * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current + * cluster state. + * + * @return The uploaded ClusterMetadataManifest file + */ + @Nullable + public ClusterMetadataManifest writeIncrementalMetadata( + ClusterState previousClusterState, + ClusterState clusterState, + ClusterMetadataManifest previousManifest + ) throws IOException { + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); + + // Write Global Metadata + final boolean updateGlobalMetadata = Metadata.isGlobalStateEquals( + previousClusterState.metadata(), + clusterState.metadata() + ) == false; + String globalMetadataFile; + // For migration case from codec V0 to V1, we have added null check on global metadata file, + // If file is empty and codec is 1 then write global metadata. + if (updateGlobalMetadata || previousManifest.getGlobalMetadataFileName() == null) { + globalMetadataFile = writeGlobalMetadata(clusterState); + } else { + logger.debug("Global metadata has not updated in cluster state, skipping upload of it"); + globalMetadataFile = previousManifest.getGlobalMetadataFileName(); + } + + // Write Index Metadata + final Map<String, Long> previousStateIndexMetadataVersionByName = new HashMap<>(); + for (final IndexMetadata indexMetadata : previousClusterState.metadata().indices().values()) { + previousStateIndexMetadataVersionByName.put(indexMetadata.getIndex().getName(), indexMetadata.getVersion()); + } + + int numIndicesUpdated = 0; + int numIndicesUnchanged = 0; + final Map<String, ClusterMetadataManifest.UploadedIndexMetadata> allUploadedIndexMetadata = previousManifest.getIndices() + .stream() + .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); + + List<IndexMetadata> toUpload = new ArrayList<>(); + + for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { + final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); + if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { + logger.debug( + "updating metadata for [{}], changing version from [{}] to [{}]", + indexMetadata.getIndex(), + previousVersion, + indexMetadata.getVersion() + ); + numIndicesUpdated++; + toUpload.add(indexMetadata); + } else { + numIndicesUnchanged++; + } + previousStateIndexMetadataVersionByName.remove(indexMetadata.getIndex().getName()); + } + + List<UploadedIndexMetadata> uploadedIndexMetadataList = writeIndexMetadataParallel(clusterState, toUpload); + uploadedIndexMetadataList.forEach( + uploadedIndexMetadata -> allUploadedIndexMetadata.put(uploadedIndexMetadata.getIndexName(), uploadedIndexMetadata) + ); + + for (String removedIndexName : previousStateIndexMetadataVersionByName.keySet()) { + allUploadedIndexMetadata.remove(removedIndexName); + } + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + new ArrayList<>(allUploadedIndexMetadata.values()), + previousManifest.getPreviousClusterUUID(), + globalMetadataFile, + false + ); + deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); + + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); + if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + logger.warn( + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + durationMillis, + slowWriteLoggingThreshold, + numIndicesUpdated, + numIndicesUnchanged, + updateGlobalMetadata + ); + } else { + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + manifest.getStateVersion(), + durationMillis, + numIndicesUpdated, + numIndicesUnchanged, + updateGlobalMetadata + ); + } + return manifest; + } + + /** + * Uploads provided ClusterState's global Metadata to remote store in parallel. + * The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @return String file name where globalMetadata file is stored. + */ + private String writeGlobalMetadata(ClusterState clusterState) throws IOException { + + AtomicReference<String> result = new AtomicReference<String>(); + AtomicReference<Exception> exceptionReference = new AtomicReference<Exception>(); + + final BlobContainer globalMetadataContainer = globalMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + final String globalMetadataFilename = globalMetadataFileName(clusterState.metadata()); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "GlobalMetadata uploaded successfully.")); + result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); + }, ex -> { exceptionReference.set(ex); }), latch); + + GLOBAL_METADATA_FORMAT.writeAsyncWithUrgentPriority( + clusterState.metadata(), + globalMetadataContainer, + globalMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + // TODO: We should add metrics where transfer is timing out. [Issue: #10687] + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + return result.get(); + } + + /** + * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @param toUpload list of IndexMetadata to upload + * @return {@code List<UploadedIndexMetadata>} list of IndexMetadata uploaded to remote + */ + private List<UploadedIndexMetadata> writeIndexMetadataParallel(ClusterState clusterState, List<IndexMetadata> toUpload) + throws IOException { + List<Exception> exceptionList = Collections.synchronizedList(new ArrayList<>(toUpload.size())); + final CountDownLatch latch = new CountDownLatch(toUpload.size()); + List<UploadedIndexMetadata> result = new ArrayList<>(toUpload.size()); + + LatchedActionListener<UploadedIndexMetadata> latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap((UploadedIndexMetadata uploadedIndexMetadata) -> { + logger.trace( + String.format(Locale.ROOT, "IndexMetadata uploaded successfully for %s", uploadedIndexMetadata.getIndexName()) + ); + result.add(uploadedIndexMetadata); + }, ex -> { + assert ex instanceof RemoteStateTransferException; + logger.error( + () -> new ParameterizedMessage("Exception during transfer of IndexMetadata to Remote {}", ex.getMessage()), + ex + ); + exceptionList.add(ex); + }), + latch + ); + + for (IndexMetadata indexMetadata : toUpload) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX/metadata_4_1690947200 + writeIndexMetadataAsync(clusterState, indexMetadata, latchedActionListener); + } + + try { + if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (InterruptedException ex) { + exceptionList.forEach(ex::addSuppressed); + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionList.size() > 0) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Exception during transfer of IndexMetadata to Remote %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + return result; + } + + /** + * Allows async Upload of IndexMetadata to remote + * + * @param clusterState current ClusterState + * @param indexMetadata {@link IndexMetadata} to upload + * @param latchedActionListener listener to respond back on after upload finishes + */ + private void writeIndexMetadataAsync( + ClusterState clusterState, + IndexMetadata indexMetadata, + LatchedActionListener<UploadedIndexMetadata> latchedActionListener + ) throws IOException { + final BlobContainer indexMetadataContainer = indexMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID(), + indexMetadata.getIndexUUID() + ); + final String indexMetadataFilename = indexMetadataFileName(indexMetadata); + ActionListener<Void> completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse( + new UploadedIndexMetadata( + indexMetadata.getIndex().getName(), + indexMetadata.getIndexUUID(), + indexMetadataContainer.path().buildAsString() + indexMetadataFilename + ) + ), + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().toString(), ex)) + ); + + INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( + indexMetadata, + indexMetadataContainer, + indexMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + } + + @Nullable + public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) + throws IOException { + assert clusterState != null : "Last accepted cluster state is not set"; + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + assert previousManifest != null : "Last cluster metadata manifest is not set"; + ClusterMetadataManifest committedManifest = uploadManifest( + clusterState, + previousManifest.getIndices(), + previousManifest.getPreviousClusterUUID(), + previousManifest.getGlobalMetadataFileName(), + true + ); + deleteStaleClusterUUIDs(clusterState, committedManifest); + return committedManifest; + } + + @Override + public void close() throws IOException { + if (blobStoreRepository != null) { + IOUtils.close(blobStoreRepository); + } + } + + public void start() { + assert isRemoteStoreClusterStateEnabled(settings) == true : "Remote cluster state is not enabled"; + final String remoteStoreRepo = settings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY + ); + assert remoteStoreRepo != null : "Remote Cluster State repository is not configured"; + final Repository repository = repositoriesService.get().repository(remoteStoreRepo); + assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; + blobStoreRepository = (BlobStoreRepository) repository; + } + + private ClusterMetadataManifest uploadManifest( + ClusterState clusterState, + List<UploadedIndexMetadata> uploadedIndexMetadata, + String previousClusterUUID, + String globalClusterMetadataFileName, + boolean committed + ) throws IOException { + synchronized (this) { + final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version(), committed); + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + clusterState.term(), + clusterState.getVersion(), + clusterState.metadata().clusterUUID(), + clusterState.stateUUID(), + Version.CURRENT, + nodeId, + committed, + MANIFEST_CURRENT_CODEC_VERSION, + globalClusterMetadataFileName, + uploadedIndexMetadata, + previousClusterUUID, + clusterState.metadata().clusterUUIDCommitted() + ); + writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); + return manifest; + } + } + + private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) + throws IOException { + AtomicReference<String> result = new AtomicReference<String>(); + AtomicReference<Exception> exceptionReference = new AtomicReference<Exception>(); + + final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); + }, ex -> { exceptionReference.set(ex); }), latch); + + CLUSTER_METADATA_MANIFEST_FORMAT.writeAsyncWithUrgentPriority( + uploadManifest, + metadataManifestContainer, + fileName, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + fileName, + uploadManifest.isCommitted() ? "commit" : "publish" + ); + } + + private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { + final Optional<ClusterMetadataManifest> latestManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!latestManifest.isPresent()) { + final String previousClusterUUID = getLastKnownUUIDFromRemote(clusterName); + assert !clusterUUID.equals(previousClusterUUID) : "Last cluster UUID is same current cluster UUID"; + return previousClusterUUID; + } + return latestManifest.get().getPreviousClusterUUID(); + } + + private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); + } + + private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + } + + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest + return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + } + + private BlobPath getCusterMetadataBasePath(String clusterName, String clusterUUID) { + return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); + } + + private BlobContainer clusterUUIDContainer(String clusterName) { + return blobStoreRepository.blobStore() + .blobContainer( + blobStoreRepository.basePath() + .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) + .add(CLUSTER_STATE_PATH_TOKEN) + ); + } + + private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { + this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; + } + + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { + this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; + } + + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } + + public TimeValue getMetadataManifestUploadTimeout() { + return this.metadataManifestUploadTimeout; + } + + static String getManifestFileName(long term, long version, boolean committed) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest__<inverted_term>__<inverted_version>__C/P__<inverted__timestamp>__<codec_version> + return String.join( + DELIMITER, + MANIFEST_PATH_TOKEN, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + (committed ? "C" : "P"), // C for committed and P for published + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(MANIFEST_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last place to + // determine codec version. + ); + } + + static String indexMetadataFileName(IndexMetadata indexMetadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/<index_UUID>/metadata__<inverted_index_metadata_version>__<inverted__timestamp>__<codec + // version> + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(indexMetadata.getVersion()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last + // place to determine codec version. + ); + } + + private static String globalMetadataFileName(Metadata metadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata__<inverted_metadata_version>__<inverted__timestamp>__<codec_version> + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(metadata.version()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + } + + private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { + return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); + } + + /** + * Fetch latest index metadata from remote cluster state + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param clusterMetadataManifest manifest file of cluster + * @return {@code Map<String, IndexMetadata>} latest IndexUUID to IndexMetadata map + */ + private Map<String, IndexMetadata> getIndexMetadataMap( + String clusterName, + String clusterUUID, + ClusterMetadataManifest clusterMetadataManifest + ) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; + Map<String, IndexMetadata> remoteIndexMetadata = new HashMap<>(); + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); + remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); + } + return remoteIndexMetadata; + } + + /** + * Fetch index metadata from remote cluster state + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata + * @return {@link IndexMetadata} + */ + private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { + BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); + try { + String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); + return INDEX_METADATA_FORMAT.read( + blobContainer, + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), + e + ); + } + } + + /** + * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { + start(); + Optional<ClusterMetadataManifest> clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (clusterMetadataManifest.isEmpty()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + ); + } + // Fetch Global Metadata + Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); + + // Fetch Index Metadata + Map<String, IndexMetadata> indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); + + Map<String, IndexMetadata> indexMetadataMap = new HashMap<>(); + indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); + + return ClusterState.builder(ClusterState.EMPTY_STATE) + .version(clusterMetadataManifest.get().getStateVersion()) + .metadata(Metadata.builder(globalMetadata).indices(indexMetadataMap).build()) + .build(); + } + + private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + String[] splitPath = globalMetadataFileName.split("/"); + return GLOBAL_METADATA_FORMAT.read( + globalMetadataContainer(clusterName, clusterUUID), + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional<ClusterMetadataManifest> getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional<String> latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + } + + /** + * Fetch the previous cluster UUIDs from remote state store and return the most recent valid cluster UUID + * + * @param clusterName The cluster name for which previous cluster UUID is to be fetched + * @return Last valid cluster UUID + */ + public String getLastKnownUUIDFromRemote(String clusterName) { + try { + Set<String> clusterUUIDs = getAllClusterUUIDs(clusterName); + Map<String, ClusterMetadataManifest> latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + List<String> validChain = createClusterChain(latestManifests, clusterName); + if (validChain.isEmpty()) { + return ClusterState.UNKNOWN_UUID; + } + return validChain.get(0); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName), + e + ); + } + } + + private Set<String> getAllClusterUUIDs(String clusterName) throws IOException { + Map<String, BlobContainer> clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + if (clusterUUIDMetadata == null) { + return Collections.emptySet(); + } + return Collections.unmodifiableSet(clusterUUIDMetadata.keySet()); + } + + private Map<String, ClusterMetadataManifest> getLatestManifestForAllClusterUUIDs(String clusterName, Set<String> clusterUUIDs) { + Map<String, ClusterMetadataManifest> manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional<ClusterMetadataManifest> manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e + ); + } + } + return manifestsByClusterUUID; + } + + /** + * This method creates a valid cluster UUID chain. + * + * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID + * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain + */ + private List<String> createClusterChain(final Map<String, ClusterMetadataManifest> manifestsByClusterUUID, final String clusterName) { + final List<ClusterMetadataManifest> validClusterManifests = manifestsByClusterUUID.values() + .stream() + .filter(this::isValidClusterUUID) + .collect(Collectors.toList()); + final Map<String, String> clusterUUIDGraph = validClusterManifests.stream() + .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); + final List<String> topLevelClusterUUIDs = validClusterManifests.stream() + .map(ClusterMetadataManifest::getClusterUUID) + .filter(clusterUUID -> !clusterUUIDGraph.containsValue(clusterUUID)) + .collect(Collectors.toList()); + + if (topLevelClusterUUIDs.isEmpty()) { + // This can occur only when there are no valid cluster UUIDs + assert validClusterManifests.isEmpty() : "There are no top level cluster UUIDs even when there are valid cluster UUIDs"; + logger.info("There is no valid previous cluster UUID. All cluster UUIDs evaluated are: {}", manifestsByClusterUUID.keySet()); + return Collections.emptyList(); + } + if (topLevelClusterUUIDs.size() > 1) { + logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); + // If the valid cluster UUIDs are more that 1, it means there was some race condition where + // more then 2 cluster manager nodes tried to become active cluster manager and published + // 2 cluster UUIDs which followed the same previous UUID. + final Map<String, ClusterMetadataManifest> manifestsByClusterUUIDTrimmed = trimClusterUUIDs( + manifestsByClusterUUID, + topLevelClusterUUIDs, + clusterName + ); + if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + topLevelClusterUUIDs + ) + ); + } + return createClusterChain(manifestsByClusterUUIDTrimmed, clusterName); + } + final List<String> validChain = new ArrayList<>(); + String currentUUID = topLevelClusterUUIDs.get(0); + while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + validChain.add(currentUUID); + // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph + currentUUID = clusterUUIDGraph.get(currentUUID); + } + logger.info("Known UUIDs found in remote store : [{}]", validChain); + return validChain; + } + + /** + * This method take a map of manifests for different cluster UUIDs and removes the + * manifest of a cluster UUID if the latest metadata for that cluster UUID is equivalent + * to the latest metadata of its previous UUID. + * @return Trimmed map of manifests + */ + private Map<String, ClusterMetadataManifest> trimClusterUUIDs( + final Map<String, ClusterMetadataManifest> latestManifestsByClusterUUID, + final List<String> validClusterUUIDs, + final String clusterName + ) { + final Map<String, ClusterMetadataManifest> trimmedUUIDs = new HashMap<>(latestManifestsByClusterUUID); + for (String clusterUUID : validClusterUUIDs) { + ClusterMetadataManifest currentManifest = trimmedUUIDs.get(clusterUUID); + // Here we compare the manifest of current UUID to that of previous UUID + // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, + // that means it was restored from previousUUID and no IndexMetadata update was performed on it. + if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { + ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); + if (isMetadataEqual(currentManifest, previousManifest, clusterName) + && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { + trimmedUUIDs.remove(clusterUUID); + } + } + } + return trimmedUUIDs; + } + + private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + // todo clusterName can be set as final in the constructor + if (first.getIndices().size() != second.getIndices().size()) { + return false; + } + final Map<String, UploadedIndexMetadata> secondIndices = second.getIndices() + .stream() + .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { + final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); + if (secondUploadedIndexMetadata == null) { + return false; + } + final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + if (firstIndexMetadata.equals(secondIndexMetadata) == false) { + return false; + } + } + return true; + } + + private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + + private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { + return manifest.isClusterUUIDCommitted(); + } + + /** + * Fetch ClusterMetadataManifest files from remote state store in order + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param limit max no of files to fetch + * @return all manifest file names + */ + private List<BlobMetadata> getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { + try { + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + */ + return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + limit, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ); + } catch (IOException e) { + throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); + } + } + + /** + * Fetch latest ClusterMetadataManifest file from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return latest ClusterMetadataManifest filename + */ + private Optional<String> getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + List<BlobMetadata> manifestFilesMetadata = getManifestFileNames(clusterName, clusterUUID, 1); + if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { + return Optional.of(manifestFilesMetadata.get(0).name()); + } + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); + } + + /** + * Fetch ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) + throws IllegalStateException { + try { + return getClusterMetadataManifestBlobStoreFormat(filename).read( + manifestContainer(clusterName, clusterUUID), + filename, + blobStoreRepository.getNamedXContentRegistry() + ); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + private ChecksumBlobStoreFormat<ClusterMetadataManifest> getClusterMetadataManifestBlobStoreFormat(String fileName) { + long codecVersion = getManifestCodecVersion(fileName); + if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { + return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V0; + } + + throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); + } + + private int getManifestCodecVersion(String fileName) { + String[] splitName = fileName.split(DELIMITER); + if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { + return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. + } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 + // is used. + return ClusterMetadataManifest.CODEC_V0; + } else { + throw new IllegalArgumentException("Manifest file name is corrupted"); + } + } + + public static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } + + public void writeMetadataFailed() { + getStats().stateFailed(); + } + + /** + * Exception for Remote state transfer. + */ + static class RemoteStateTransferException extends RuntimeException { + + public RemoteStateTransferException(String errorDesc) { + super(errorDesc); + } + + public RemoteStateTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + } + + /** + * Purges all remote cluster state against provided cluster UUIDs + * + * @param clusterName name of the cluster + * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged + */ + void deleteStaleUUIDsClusterMetadata(String clusterName, List<String> clusterUUIDs) { + clusterUUIDs.forEach(clusterUUID -> { + getBlobStoreTransferService().deleteAsync( + ThreadPool.Names.REMOTE_PURGE, + getCusterMetadataBasePath(clusterName, clusterUUID), + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("Deleted all remote cluster metadata for cluster UUID - {}", clusterUUID); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting all remote cluster metadata for cluster UUID {}", + clusterUUID + ), + e + ); + remoteStateStats.cleanUpAttemptFailed(); + } + } + ); + }); + } + + /** + * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests + * + * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @param manifestsToRetain no of latest manifest files to keep in remote + */ + // package private for testing + void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { + if (deleteStaleMetadataRunning.compareAndSet(false, true) == false) { + logger.info("Delete stale cluster metadata task is already in progress."); + return; + } + try { + getBlobStoreTransferService().listAllInSortedOrderAsync( + ThreadPool.Names.REMOTE_PURGE, + getManifestFolderPath(clusterName, clusterUUID), + "manifest", + Integer.MAX_VALUE, + new ActionListener<>() { + @Override + public void onResponse(List<BlobMetadata> blobMetadata) { + if (blobMetadata.size() > manifestsToRetain) { + deleteClusterMetadata( + clusterName, + clusterUUID, + blobMetadata.subList(0, manifestsToRetain - 1), + blobMetadata.subList(manifestsToRetain - 1, blobMetadata.size()) + ); + } + deleteStaleMetadataRunning.set(false); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting Remote Cluster Metadata for clusterUUIDs {}", + clusterUUID + ) + ); + deleteStaleMetadataRunning.set(false); + } + } + ); + } catch (Exception e) { + deleteStaleMetadataRunning.set(false); + throw e; + } + } + + private void deleteClusterMetadata( + String clusterName, + String clusterUUID, + List<BlobMetadata> activeManifestBlobMetadata, + List<BlobMetadata> staleManifestBlobMetadata + ) { + try { + Set<String> filesToKeep = new HashSet<>(); + Set<String> staleManifestPaths = new HashSet<>(); + Set<String> staleIndexMetadataPaths = new HashSet<>(); + Set<String> staleGlobalMetadataPaths = new HashSet<>(); + activeManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + clusterMetadataManifest.getIndices() + .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); + }); + staleManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + if (filesToKeep.contains(clusterMetadataManifest.getGlobalMetadataFileName()) == false) { + String[] globalMetadataSplitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + staleGlobalMetadataPaths.add( + new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + GLOBAL_METADATA_FORMAT.blobName( + globalMetadataSplitPath[globalMetadataSplitPath.length - 1] + ) + ); + } + clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { + if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { + staleIndexMetadataPaths.add( + new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() + + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) + ); + } + }); + }); + + if (staleManifestPaths.isEmpty()) { + logger.debug("No stale Remote Cluster Metadata files found"); + return; + } + + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); + } catch (IllegalStateException e) { + logger.error("Error while fetching Remote Cluster Metadata manifests", e); + } catch (IOException e) { + logger.error("Error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); + } catch (Exception e) { + logger.error("Unexpected error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); + } + } + + private void deleteStalePaths(String clusterName, String clusterUUID, List<String> stalePaths) throws IOException { + logger.debug(String.format(Locale.ROOT, "Deleting stale files from remote - %s", stalePaths)); + getBlobStoreTransferService().deleteBlobs(getCusterMetadataBasePath(clusterName, clusterUUID), stalePaths); + } + + /** + * Purges all remote cluster state against provided cluster UUIDs + * @param clusterState current state of the cluster + * @param committedManifest last committed ClusterMetadataManifest + */ + public void deleteStaleClusterUUIDs(ClusterState clusterState, ClusterMetadataManifest committedManifest) { + threadpool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { + String clusterName = clusterState.getClusterName().value(); + logger.debug("Deleting stale cluster UUIDs data from remote [{}]", clusterName); + Set<String> allClustersUUIDsInRemote; + try { + allClustersUUIDsInRemote = new HashSet<>(getAllClusterUUIDs(clusterState.getClusterName().value())); + } catch (IOException e) { + logger.info(String.format(Locale.ROOT, "Error while fetching all cluster UUIDs for [%s]", clusterName)); + return; + } + // Retain last 2 cluster uuids data + allClustersUUIDsInRemote.remove(committedManifest.getClusterUUID()); + allClustersUUIDsInRemote.remove(committedManifest.getPreviousClusterUUID()); + deleteStaleUUIDsClusterMetadata(clusterName, new ArrayList<>(allClustersUUIDsInRemote)); + }); + } + + public RemotePersistenceStats getStats() { + return remoteStateStats; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java new file mode 100644 index 0000000000000..f2330846fa23e --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.coordination.PersistedStateStats; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Remote state related extended stats. + * + * @opensearch.internal + */ +public class RemotePersistenceStats extends PersistedStateStats { + static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String REMOTE_UPLOAD = "remote_upload"; + private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + + public RemotePersistenceStats() { + super(REMOTE_UPLOAD); + addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + } + + public void cleanUpAttemptFailed() { + cleanupAttemptFailedCount.incrementAndGet(); + } + + public long getCleanupAttemptFailedCount() { + return cleanupAttemptFailedCount.get(); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/package-info.java b/server/src/main/java/org/opensearch/gateway/remote/package-info.java new file mode 100644 index 0000000000000..286e739f66289 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package containing class to perform operations on remote cluster state + */ +package org.opensearch.gateway.remote; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 46f296f52ae01..257aca2b67990 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -36,24 +36,30 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.NetworkExceptionHelper; import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableHttpChannel; +import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; @@ -63,9 +69,11 @@ import java.nio.channels.CancelledKeyException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -105,7 +113,8 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final Set<HttpChannel> httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final Set<HttpServerChannel> httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final HttpTracer tracer; + private final HttpTracer httpTracer; + private final Tracer tracer; protected AbstractHttpServerTransport( Settings settings, @@ -114,7 +123,8 @@ protected AbstractHttpServerTransport( ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer telemetryTracer ) { this.settings = settings; this.networkService = networkService; @@ -138,7 +148,8 @@ protected AbstractHttpServerTransport( this.port = SETTING_HTTP_PORT.get(settings); this.maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); - this.tracer = new HttpTracer(settings, clusterSettings); + this.httpTracer = new HttpTracer(settings, clusterSettings); + this.tracer = telemetryTracer; } @Override @@ -289,6 +300,7 @@ static int resolvePublishPort(Settings settings, List<TransportAddress> boundAdd } public void onException(HttpChannel channel, Exception e) { + channel.handleException(e); if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); @@ -352,19 +364,31 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { * @param httpChannel that received the http request */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { - handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + final Span span = tracer.startSpan(SpanBuilder.from(httpRequest), extractHeaders(httpRequest.getHeaders())); + try (final SpanScope httpRequestSpanScope = tracer.withSpanInScope(span)) { + HttpChannel traceableHttpChannel = TraceableHttpChannel.create(httpChannel, span, tracer); + handleIncomingRequest(httpRequest, traceableHttpChannel, httpRequest.getInboundException()); + } } // Visible for testing void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) { + RestChannel traceableRestChannel = channel; final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - if (badRequestCause != null) { - dispatcher.dispatchBadRequest(channel, threadContext, badRequestCause); - } else { - dispatcher.dispatchRequest(restRequest, channel, threadContext); + final Span span = tracer.startSpan(SpanBuilder.from(restRequest)); + try (final SpanScope spanScope = tracer.withSpanInScope(span)) { + if (channel != null) { + traceableRestChannel = TraceableRestChannel.create(channel, span, tracer); + } + if (badRequestCause != null) { + dispatcher.dispatchBadRequest(traceableRestChannel, threadContext, badRequestCause); + } else { + dispatcher.dispatchRequest(restRequest, traceableRestChannel, threadContext); + } } } + } private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { @@ -401,7 +425,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan restRequest = innerRestRequest; } - final HttpTracer trace = tracer.maybeTraceRequest(restRequest, exception); + final HttpTracer trace = httpTracer.maybeTraceRequest(restRequest, exception); /* * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid @@ -461,4 +485,9 @@ private static ActionListener<Void> earlyResponseListener(HttpRequest request, H return NO_OP; } } + + @SuppressWarnings("unchecked") + private static <Values extends Collection<String>> Map<String, Collection<String>> extractHeaders(Map<String, Values> headers) { + return (Map<String, Collection<String>>) headers; + } } diff --git a/server/src/main/java/org/opensearch/http/CorsHandler.java b/server/src/main/java/org/opensearch/http/CorsHandler.java index 4049de01175a6..464ced184d10e 100644 --- a/server/src/main/java/org/opensearch/http/CorsHandler.java +++ b/server/src/main/java/org/opensearch/http/CorsHandler.java @@ -47,12 +47,12 @@ package org.opensearch.http; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.core.common.Strings; -import org.opensearch.rest.RestRequest; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestUtils; import java.time.ZoneOffset; @@ -81,7 +81,7 @@ * This file is forked from the https://netty.io project. In particular it combines the following three * files: io.netty.handler.codec.http.cors.CorsHandler, io.netty.handler.codec.http.cors.CorsConfig, and * io.netty.handler.codec.http.cors.CorsConfigBuilder. - * + * <p> * It modifies the original netty code to operate on OpenSearch http request/response abstractions. * Additionally, it removes CORS features that are not used by OpenSearch. * diff --git a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java index 523a89e05caa3..7084600133a75 100644 --- a/server/src/main/java/org/opensearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/opensearch/http/DefaultRestChannel.java @@ -33,22 +33,23 @@ package org.opensearch.http; import org.opensearch.Build; -import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.AbstractRestChannel; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; + import java.util.ArrayList; import java.util.List; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index a3a3d46f629ce..ed20ec89a9099 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -32,17 +32,25 @@ package org.opensearch.http; -import org.opensearch.action.ActionListener; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.CloseableChannel; +import org.opensearch.core.action.ActionListener; import java.net.InetSocketAddress; +import java.util.Optional; /** * Represents an HTTP comms channel * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpChannel extends CloseableChannel { + /** + * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) + * @param ex the exception being raised + */ + default void handleException(Exception ex) {} /** * Sends an http response to the channel. The listener will be executed once the send process has been @@ -67,4 +75,17 @@ public interface HttpChannel extends CloseableChannel { */ InetSocketAddress getRemoteAddress(); + /** + * Returns the contextual property associated with this specific HTTP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default <T> Optional<T> get(String name, Class<T> clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/http/HttpInfo.java b/server/src/main/java/org/opensearch/http/HttpInfo.java index a5e981e98e3de..10f2d50dacb14 100644 --- a/server/src/main/java/org/opensearch/http/HttpInfo.java +++ b/server/src/main/java/org/opensearch/http/HttpInfo.java @@ -32,15 +32,15 @@ package org.opensearch.http; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/http/HttpPipelinedRequest.java b/server/src/main/java/org/opensearch/http/HttpPipelinedRequest.java index e88adaf5387e6..90ba6be0cc3df 100644 --- a/server/src/main/java/org/opensearch/http/HttpPipelinedRequest.java +++ b/server/src/main/java/org/opensearch/http/HttpPipelinedRequest.java @@ -33,8 +33,8 @@ package org.opensearch.http; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.rest.RestRequest; import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; import java.util.List; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/http/HttpRequest.java b/server/src/main/java/org/opensearch/http/HttpRequest.java index 639bf40cad375..3dc10777b657a 100644 --- a/server/src/main/java/org/opensearch/http/HttpRequest.java +++ b/server/src/main/java/org/opensearch/http/HttpRequest.java @@ -33,9 +33,10 @@ package org.opensearch.http; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.rest.RestRequest; import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; import java.util.Collections; import java.util.List; @@ -45,15 +46,17 @@ * A basic http request abstraction. Http modules needs to implement this interface to integrate with the * server package's rest handling. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpRequest { /** * Which HTTP version being used * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum HttpVersion { HTTP_1_0, HTTP_1_1 diff --git a/server/src/main/java/org/opensearch/http/HttpResponse.java b/server/src/main/java/org/opensearch/http/HttpResponse.java index 3c8269f4400e7..b25df41698c79 100644 --- a/server/src/main/java/org/opensearch/http/HttpResponse.java +++ b/server/src/main/java/org/opensearch/http/HttpResponse.java @@ -32,12 +32,15 @@ package org.opensearch.http; +import org.opensearch.common.annotation.PublicApi; + /** * A basic http response abstraction. Http modules must implement this interface as the server package rest * handling needs to set http headers for a response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface HttpResponse { void addHeader(String name, String value); diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 425fa23047764..890136cb67e60 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -32,10 +32,10 @@ package org.opensearch.http; -import org.opensearch.common.component.LifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.node.ReportingService; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.service.ReportingService; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; diff --git a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java index 4522a59d67c05..621ef36692178 100644 --- a/server/src/main/java/org/opensearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/opensearch/http/HttpTransportSettings.java @@ -37,9 +37,9 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import java.util.Collections; import java.util.List; @@ -182,6 +182,14 @@ public final class HttpTransportSettings { Property.NodeScope ); + // A default of 0 means that by default there is no connect timeout + public static final Setting<TimeValue> SETTING_HTTP_CONNECT_TIMEOUT = Setting.timeSetting( + "http.connect_timeout", + new TimeValue(0), + new TimeValue(0), + Property.NodeScope + ); + // Tcp socket settings public static final Setting<Boolean> OLD_SETTING_HTTP_TCP_NO_DELAY = boolSetting( diff --git a/server/src/main/java/org/opensearch/identity/IdentityService.java b/server/src/main/java/org/opensearch/identity/IdentityService.java index 54a11c8b31fb3..3129c201b9a39 100644 --- a/server/src/main/java/org/opensearch/identity/IdentityService.java +++ b/server/src/main/java/org/opensearch/identity/IdentityService.java @@ -5,8 +5,6 @@ package org.opensearch.identity; -import java.util.List; -import java.util.stream.Collectors; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; @@ -15,6 +13,9 @@ import org.opensearch.identity.tokens.TokenManager; import org.opensearch.plugins.IdentityPlugin; +import java.util.List; +import java.util.stream.Collectors; + /** * Identity and access control for OpenSearch * diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java index c6ed8d57da435..090b1f1d025e0 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java @@ -8,13 +8,13 @@ package org.opensearch.identity.noop; +import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.TokenManager; import org.opensearch.plugins.IdentityPlugin; -import org.opensearch.identity.Subject; /** * Implementation of identity plugin that does not enforce authentication or authorization - * + * <p> * This class and related classes in this package will not return nulls or fail access checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java index 801225fb16ad3..964a218db3cf5 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java @@ -8,16 +8,16 @@ package org.opensearch.identity.noop; -import java.security.Principal; -import java.util.Objects; - import org.opensearch.identity.NamedPrincipal; -import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.Subject; +import org.opensearch.identity.tokens.AuthToken; + +import java.security.Principal; +import java.util.Objects; /** * Implementation of subject that is always authenticated - * + * <p> * This class and related classes in this package will not return nulls or fail permissions checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java index a55f28e02a8aa..fa6643b7447dc 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopTokenManager.java @@ -11,7 +11,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.identity.IdentityService; +import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.AuthToken; +import org.opensearch.identity.tokens.OnBehalfOfClaims; import org.opensearch.identity.tokens.TokenManager; /** @@ -26,8 +28,26 @@ public class NoopTokenManager implements TokenManager { * @return a new Noop Token */ @Override - public AuthToken issueToken(String audience) { + public AuthToken issueOnBehalfOfToken(final Subject subject, final OnBehalfOfClaims claims) { return new AuthToken() { + @Override + public String asAuthHeaderValue() { + return "noopToken"; + } + }; + } + + /** + * Issue a new Noop Token + * @return a new Noop Token + */ + @Override + public AuthToken issueServiceAccountToken(final String audience) { + return new AuthToken() { + @Override + public String asAuthHeaderValue() { + return "noopToken"; + } }; } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java index 6e113f6eaa96a..88bb855a6e70d 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/AuthToken.java @@ -15,4 +15,6 @@ */ public interface AuthToken { + String asAuthHeaderValue(); + } diff --git a/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java index 9cd6cb6b6208a..4ad0bbe67d2a1 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/BasicAuthToken.java @@ -23,7 +23,7 @@ public final class BasicAuthToken implements AuthToken { public BasicAuthToken(final String headerValue) { final String base64Encoded = headerValue.substring(TOKEN_IDENTIFIER.length()).trim(); - final byte[] rawDecoded = Base64.getDecoder().decode(base64Encoded); + final byte[] rawDecoded = Base64.getUrlDecoder().decode(base64Encoded); final String usernamepassword = new String(rawDecoded, StandardCharsets.UTF_8); final String[] tokenParts = usernamepassword.split(":", 2); @@ -51,4 +51,13 @@ public void revoke() { this.password = ""; this.user = ""; } + + @Override + public String asAuthHeaderValue() { + if (user == null || password == null) { + return null; + } + String usernamepassword = user + ":" + password; + return Base64.getEncoder().encodeToString(usernamepassword.getBytes(StandardCharsets.UTF_8)); + } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/BearerAuthToken.java b/server/src/main/java/org/opensearch/identity/tokens/BearerAuthToken.java index eac164af1c5d3..217538c7b001b 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/BearerAuthToken.java +++ b/server/src/main/java/org/opensearch/identity/tokens/BearerAuthToken.java @@ -58,4 +58,9 @@ public String getTokenIdentifier() { public String toString() { return "Bearer auth token with header=" + header + ", payload=" + payload + ", signature=" + signature; } + + @Override + public String asAuthHeaderValue() { + return completeToken; + } } diff --git a/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java new file mode 100644 index 0000000000000..00e50a59e9486 --- /dev/null +++ b/server/src/main/java/org/opensearch/identity/tokens/OnBehalfOfClaims.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.identity.tokens; + +/** + * This class represents the claims of an OnBehalfOf token. + */ +public class OnBehalfOfClaims { + + private final String audience; + private final Long expiration_seconds; + + /** + * Constructor for OnBehalfOfClaims + * @param aud the Audience for the token + * @param expiration_seconds the length of time in seconds the token is valid + + */ + public OnBehalfOfClaims(String aud, Long expiration_seconds) { + this.audience = aud; + this.expiration_seconds = expiration_seconds; + } + + /** + * A constructor which sets the default expiration time of 5 minutes from the current time + * @param aud the Audience for the token + * @param subject the subject of the token + */ + public OnBehalfOfClaims(String aud, String subject) { + this(aud, 300L); + } + + public String getAudience() { + return audience; + } + + public Long getExpiration() { + return expiration_seconds; + } +} diff --git a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java index ae200c7461a60..4bd3ebdded588 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java +++ b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java @@ -7,9 +7,9 @@ */ package org.opensearch.identity.tokens; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.opensearch.common.Strings; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.common.Strings; import org.opensearch.rest.RestRequest; import java.util.Collections; @@ -26,7 +26,7 @@ public class RestTokenExtractor { /** * Given a rest request it will extract authentication token - * + * <p> * If no token was found, returns null. */ public static AuthToken extractToken(final RestRequest request) { diff --git a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java index 029ce430e7532..972a9a1080955 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java +++ b/server/src/main/java/org/opensearch/identity/tokens/TokenManager.java @@ -8,15 +8,26 @@ package org.opensearch.identity.tokens; +import org.opensearch.identity.Subject; + /** * This interface defines the expected methods of a token manager */ public interface TokenManager { /** - * Create a new auth token - * @param audience: The audience for the token + * Create a new on behalf of token + * + * @param claims: A list of claims for the token to be generated with * @return A new auth token */ - public AuthToken issueToken(String audience); + public AuthToken issueOnBehalfOfToken(final Subject subject, final OnBehalfOfClaims claims); + + /** + * Create a new service account token + * + * @param audience: A string representing the unique id of the extension for which a service account token should be generated + * @return a new auth token + */ + public AuthToken issueServiceAccountToken(final String audience); } diff --git a/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java index 2090d9a1ce8f0..001a34c46c72b 100644 --- a/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/CompositeIndexEventListener.java @@ -39,10 +39,10 @@ import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e4e3a79c8e60c..6ac10a221d49e 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -48,15 +48,18 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.TriFunction; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.IndexAnalyzers; @@ -76,9 +79,9 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesQueryCache; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -91,6 +94,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -117,8 +121,9 @@ * {@link #addSettingsUpdateConsumer(Setting, Consumer)}</li> * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexModule { public static final Setting<Boolean> NODE_STORE_ALLOW_MMAP = Setting.boolSetting("node.store.allow_mmap", true, Property.NodeScope); @@ -154,14 +159,105 @@ public final class IndexModule { Property.NodeScope ); - /** Which lucene file extensions to load with the mmap directory when using hybridfs store. + /** Which lucene file extensions to load with the mmap directory when using hybridfs store. This settings is ignored if {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} is set. * This is an expert setting. - * @see <a href="https://lucene.apache.org/core/9_2_0/core/org/apache/lucene/codecs/lucene92/package-summary.html#file-names">Lucene File Extensions</a>. + * @see <a href="https://lucene.apache.org/core/9_9_0/core/org/apache/lucene/codecs/lucene99/package-summary.html#file-names">Lucene File Extensions</a>. + * + * @deprecated This setting will be removed in OpenSearch 3.x. Use {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} instead. */ + @Deprecated public static final Setting<List<String>> INDEX_STORE_HYBRID_MMAP_EXTENSIONS = Setting.listSetting( "index.store.hybrid.mmap.extensions", List.of("nvd", "dvd", "tim", "tip", "dim", "kdd", "kdi", "cfs", "doc"), Function.identity(), + new Setting.Validator<List<String>>() { + + @Override + public void validate(final List<String> value) {} + + @Override + public void validate(final List<String> value, final Map<Setting<?>, Object> settings) { + if (value.equals(INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { + final List<String> nioExtensions = (List<String>) settings.get(INDEX_STORE_HYBRID_NIO_EXTENSIONS); + final List<String> defaultNioExtensions = INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY); + if (nioExtensions.equals(defaultNioExtensions) == false) { + throw new IllegalArgumentException( + "Settings " + + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() + + " & " + + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() + + " cannot both be set. Use " + + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() + + " only." + ); + } + } + } + + @Override + public Iterator<Setting<?>> settings() { + return List.<Setting<?>>of(INDEX_STORE_HYBRID_NIO_EXTENSIONS).iterator(); + } + }, + Property.IndexScope, + Property.NodeScope, + Property.Deprecated + ); + + /** Which lucene file extensions to load with nio. All others will default to mmap. Takes precedence over {@link #INDEX_STORE_HYBRID_MMAP_EXTENSIONS}. + * This is an expert setting. + * @see <a href="https://lucene.apache.org/core/9_9_0/core/org/apache/lucene/codecs/lucene99/package-summary.html#file-names">Lucene File Extensions</a>. + */ + public static final Setting<List<String>> INDEX_STORE_HYBRID_NIO_EXTENSIONS = Setting.listSetting( + "index.store.hybrid.nio.extensions", + List.of( + "segments_N", + "write.lock", + "si", + "cfe", + "fnm", + "fdx", + "fdt", + "pos", + "pay", + "nvm", + "dvm", + "tvx", + "tvd", + "liv", + "dii", + "vem" + ), + Function.identity(), + new Setting.Validator<List<String>>() { + + @Override + public void validate(final List<String> value) {} + + @Override + public void validate(final List<String> value, final Map<Setting<?>, Object> settings) { + if (value.equals(INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY)) == false) { + final List<String> mmapExtensions = (List<String>) settings.get(INDEX_STORE_HYBRID_MMAP_EXTENSIONS); + final List<String> defaultMmapExtensions = INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY); + if (mmapExtensions.equals(defaultMmapExtensions) == false) { + throw new IllegalArgumentException( + "Settings " + + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() + + " & " + + INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey() + + " cannot both be set. Use " + + INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey() + + " only." + ); + } + } + } + + @Override + public Iterator<Setting<?>> settings() { + return List.<Setting<?>>of(INDEX_STORE_HYBRID_MMAP_EXTENSIONS).iterator(); + } + }, Property.IndexScope, Property.NodeScope ); @@ -400,8 +496,9 @@ IndexEventListener freeze() { // pkg private for testing /** * Type of file system * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { HYBRIDFS("hybridfs"), NIOFS("niofs"), @@ -505,7 +602,10 @@ public IndexService newIndexService( BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, - BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier + BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier, + Supplier<TimeValue> clusterDefaultRefreshIntervalSupplier, + Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function<IndexService, CheckedFunction<DirectoryReader, DirectoryReader, IOException>> readerWrapperFactory = indexReaderWrapper @@ -561,7 +661,10 @@ public IndexService newIndexService( expressionResolver, valuesSourceRegistry, recoveryStateFactory, - translogFactorySupplier + translogFactorySupplier, + clusterDefaultRefreshIntervalSupplier, + clusterRemoteTranslogBufferIntervalSupplier, + recoverySettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 1ecc98b7f69f2..0909e2d5c8ff0 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -40,7 +40,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; -import org.opensearch.core.Assertions; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -49,7 +48,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -57,8 +56,12 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLock; import org.opensearch.env.ShardLockObtainFailedException; @@ -76,14 +79,13 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SearchIndexNameMatcher; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.SearchOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.shard.ShardPath; @@ -91,10 +93,10 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; @@ -129,8 +131,9 @@ /** * The main OpenSearch index service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexService extends AbstractIndexComponent implements IndicesClusterStateService.AllocatedIndex<IndexShard> { private final IndexEventListener eventListener; @@ -176,6 +179,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final Supplier<Sort> indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier; + private final Supplier<TimeValue> clusterDefaultRefreshIntervalSupplier; + private final Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier; + private final RecoverySettings recoverySettings; public IndexService( IndexSettings indexSettings, @@ -208,7 +214,10 @@ public IndexService( IndexNameExpressionResolver expressionResolver, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, - BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier + BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier, + Supplier<TimeValue> clusterDefaultRefreshIntervalSupplier, + Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -236,8 +245,10 @@ public IndexService( if (indexSettings.getIndexSortConfig().hasIndexSort()) { // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. // The sort order is validated right after the merge of the mapping later in the process. + boolean shouldWidenIndexSortType = this.indexSettings.shouldWidenIndexSortType(); this.indexSortSupplier = () -> indexSettings.getIndexSortConfig() .buildIndexSort( + shouldWidenIndexSortType, mapperService::fieldType, (fieldType, searchLookup) -> indexFieldData.getForField(fieldType, indexFieldData.index().getName(), searchLookup) ); @@ -275,12 +286,15 @@ public IndexService( this.readerWrapper = wrapperFactory.apply(this); this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); + this.clusterDefaultRefreshIntervalSupplier = clusterDefaultRefreshIntervalSupplier; // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); this.trimTranslogTask = new AsyncTrimTranslogTask(this); this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; + this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; + this.recoverySettings = recoverySettings; updateFsyncTaskIfNecessary(); } @@ -292,8 +306,9 @@ static boolean needsMapperService(IndexSettings indexSettings, IndexCreationCont /** * Context for index creation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum IndexCreationContext { CREATE_INDEX, METADATA_VERIFICATION @@ -440,7 +455,7 @@ public synchronized IndexShard createShard( final Consumer<ShardId> globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -473,7 +488,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); - remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY); + remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); } Directory directory = directoryFactory.newDirectory(this.indexSettings, path); @@ -482,7 +497,8 @@ public synchronized IndexShard createShard( this.indexSettings, directory, lock, - new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)) + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)), + path ); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( @@ -509,7 +525,10 @@ public synchronized IndexShard createShard( translogFactorySupplier, this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore, - remoteRefreshSegmentPressureService + remoteStoreStatsTrackerFactory, + clusterRemoteTranslogBufferIntervalSupplier, + nodeEnv.nodeId(), + recoverySettings ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); @@ -680,7 +699,7 @@ public IndexSettings getIndexSettings() { /** * Creates a new QueryShardContext. - * + * <p> * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -690,7 +709,7 @@ public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searche /** * Creates a new QueryShardContext. - * + * <p> * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -895,36 +914,47 @@ public synchronized void updateMetadata(final IndexMetadata currentIndexMetadata ); } } - if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) { - // once we change the refresh interval we schedule yet another refresh - // to ensure we are in a clean and predictable state. - // it doesn't matter if we move from or to <code>-1</code> in both cases we want - // docs to become visible immediately. This also flushes all pending indexing / search requests - // that are waiting for a refresh. - threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.warn("forced refresh failed after interval change", e); - } - - @Override - protected void doRun() throws Exception { - maybeRefreshEngine(true); - } - - @Override - public boolean isForceExecution() { - return true; - } - }); - rescheduleRefreshTasks(); - } + onRefreshIntervalChange(); updateFsyncTaskIfNecessary(); } metadataListeners.forEach(c -> c.accept(newIndexMetadata)); } + /** + * Called whenever the refresh interval changes. This can happen in 2 cases - + * 1. {@code cluster.default.index.refresh_interval} cluster setting changes. The change would only happen for + * indexes relying on cluster default. + * 2. {@code index.refresh_interval} index setting changes. + */ + public void onRefreshIntervalChange() { + if (refreshTask.getInterval().equals(getRefreshInterval())) { + return; + } + // once we change the refresh interval we schedule yet another refresh + // to ensure we are in a clean and predictable state. + // it doesn't matter if we move from or to <code>-1</code> in both cases we want + // docs to become visible immediately. This also flushes all pending indexing / search requests + // that are waiting for a refresh. + threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("forced refresh failed after interval change", e); + } + + @Override + protected void doRun() throws Exception { + maybeRefreshEngine(true); + } + + @Override + public boolean isForceExecution() { + return true; + } + }); + rescheduleRefreshTasks(); + } + private void updateFsyncTaskIfNecessary() { if (indexSettings.getTranslogDurability() == Translog.Durability.REQUEST) { try { @@ -952,8 +982,9 @@ private void rescheduleRefreshTasks() { /** * Shard Store Deleter Interface * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface ShardStoreDeleter { void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException; @@ -989,7 +1020,7 @@ private void maybeFSyncTranslogs() { } private void maybeRefreshEngine(boolean force) { - if (indexSettings.getRefreshInterval().millis() > 0 || force) { + if (getRefreshInterval().millis() > 0 || force) { for (IndexShard shard : this.shards.values()) { try { shard.scheduledRefresh(); @@ -1060,6 +1091,17 @@ private void sync(final Consumer<IndexShard> sync, final String source) { } } + /** + * Gets the refresh interval seen by the index service. Index setting overrides takes the highest precedence. + * @return the refresh interval. + */ + private TimeValue getRefreshInterval() { + if (getIndexSettings().isExplicitRefresh()) { + return getIndexSettings().getRefreshInterval(); + } + return clusterDefaultRefreshIntervalSupplier.get(); + } + /** * Base asynchronous task * @@ -1120,7 +1162,7 @@ public String toString() { final class AsyncRefreshTask extends BaseAsyncTask { AsyncRefreshTask(IndexService indexService) { - super(indexService, indexService.getIndexSettings().getRefreshInterval()); + super(indexService, indexService.getRefreshInterval()); } @Override @@ -1242,6 +1284,11 @@ AsyncRefreshTask getRefreshTask() { // for tests return refreshTask; } + // Visible for test + public TimeValue getRefreshTaskInterval() { + return refreshTask.getInterval(); + } + AsyncTranslogFSync getFsyncTask() { // for tests return fsyncTask; } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 0749ad0876534..36e48b2590a4e 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -36,16 +36,17 @@ import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; @@ -53,6 +54,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -62,7 +64,10 @@ import java.util.function.Function; import java.util.function.UnaryOperator; +import static org.opensearch.Version.V_2_7_0; +import static org.opensearch.common.util.FeatureFlags.DOC_ID_FUZZY_SET_SETTING; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; +import static org.opensearch.index.codec.fuzzy.FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; @@ -77,12 +82,46 @@ * a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will * be called for each settings update. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting<List<String>> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -232,6 +271,17 @@ public final class IndexSettings { Property.IndexScope ); + /** + * Index setting describing the maximum number of nested scopes in queries. + * The default maximum of 20. 1 means once nesting. + */ + public static final Setting<Integer> MAX_NESTED_QUERY_DEPTH_SETTING = Setting.intSetting( + "index.query.max_nested_depth", + 20, + 1, + Property.Dynamic, + Property.IndexScope + ); /** * Index setting describing for NGramTokenizer and NGramTokenFilter * the maximum difference between @@ -299,10 +349,11 @@ public final class IndexSettings { Property.Deprecated ); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); + public static final TimeValue MINIMUM_REFRESH_INTERVAL = new TimeValue(-1, TimeUnit.MILLISECONDS); public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( "index.refresh_interval", DEFAULT_REFRESH_INTERVAL, - new TimeValue(-1, TimeUnit.MILLISECONDS), + MINIMUM_REFRESH_INTERVAL, Property.Dynamic, Property.IndexScope ); @@ -510,13 +561,25 @@ public final class IndexSettings { Property.Dynamic ); + /** + * This setting controls if unreferenced files will be cleaned up in case segment merge fails due to disk full. + * <p> + * Defaults to true which means unreferenced files will be cleaned up in case segment merge fails. + */ + public static final Setting<Boolean> INDEX_UNREFERENCED_FILE_CLEANUP = Setting.boolSetting( + "index.unreferenced_file_cleanup.enabled", + true, + Property.IndexScope, + Property.Dynamic + ); + /** * Determines a balance between file-based and operations-based peer recoveries. The number of operations that will be used in an * operations-based peer recovery is limited to this proportion of the total number of documents in the shard (including deleted * documents) on the grounds that a file-based peer recovery may copy all of the documents in the shard over to the new peer, but is * significantly faster than replaying the missing operations on the peer, so once a peer falls far enough behind the primary it makes * more sense to copy all the data over again instead of replaying history. - * + * <p> * Defaults to retaining history for up to 10% of the documents in the shard. This can only be changed in tests, since this setting is * intentionally unregistered. */ @@ -550,11 +613,25 @@ public final class IndexSettings { public static final Setting<String> INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting<String> INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting<String> TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting<String> SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -594,6 +671,22 @@ public final class IndexSettings { Property.Dynamic ); + public static final Setting<Boolean> INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( + "index.optimize_doc_id_lookup.fuzzy_set.enabled", + false, + Property.IndexScope, + Property.Dynamic + ); + + public static final Setting<Double> INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING = Setting.doubleSetting( + "index.optimize_doc_id_lookup.fuzzy_set.false_positive_probability", + DEFAULT_FALSE_POSITIVE_PROBABILITY, + 0.01, + 0.50, + Property.IndexScope, + Property.Dynamic + ); + public static final TimeValue DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL = new TimeValue(650, TimeUnit.MILLISECONDS); public static final TimeValue MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL = TimeValue.ZERO; public static final Setting<TimeValue> INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( @@ -604,6 +697,14 @@ public final class IndexSettings { Property.IndexScope ); + public static final Setting<Integer> INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING = Setting.intSetting( + "index.remote_store.translog.keep_extra_gen", + 100, + 0, + Property.Dynamic, + Property.IndexScope + ); + private final Index index; private final Version version; private final Logger logger; @@ -616,6 +717,7 @@ public final class IndexSettings { private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; private final boolean isRemoteSnapshot; + private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock @@ -635,7 +737,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -645,6 +748,7 @@ public final class IndexSettings { private volatile long retentionLeaseMillis; private volatile String defaultSearchPipeline; + private final boolean widenIndexSortType; /** * The maximum age of a retention lease before it is considered expired. @@ -672,9 +776,12 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile TimeValue searchIdleAfter; private volatile int maxAnalyzedOffset; private volatile int maxTermsCount; + + private volatile int maxNestedQueryDepth; private volatile String defaultPipeline; private volatile String requiredPipeline; private volatile boolean searchThrottled; + private volatile boolean shouldCleanupUnreferencedFiles; private volatile long mappingNestedFieldsLimit; private volatile long mappingNestedDocsLimit; private volatile long mappingTotalFieldsLimit; @@ -711,6 +818,16 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile UnaryOperator<MergePolicy> mergeOnFlushPolicy; + /** + * Is fuzzy set enabled for doc id + */ + private volatile boolean enableFuzzySetForDocId; + + /** + * False positive probability to use while creating fuzzy set. + */ + private volatile double docIdFuzzySetFalsePositiveProbability; + /** * Returns the default search fields for this index. */ @@ -783,6 +900,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { @@ -792,6 +910,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti } this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); + this.shouldCleanupUnreferencedFiles = INDEX_UNREFERENCED_FILE_CLEANUP.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings); @@ -824,8 +943,10 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); + maxNestedQueryDepth = scopedSettings.get(MAX_NESTED_QUERY_DEPTH_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -840,33 +961,73 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); setMergeOnFlushPolicy(scopedSettings.get(INDEX_MERGE_ON_FLUSH_POLICY)); defaultSearchPipeline = scopedSettings.get(DEFAULT_SEARCH_PIPELINE); + /* There was unintentional breaking change got introduced with [OpenSearch-6424](https://github.com/opensearch-project/OpenSearch/pull/6424) (version 2.7). + * For indices created prior version (prior to 2.7) which has IndexSort type, they used to type cast the SortField.Type + * to higher bytes size like integer to long. This behavior was changed from OpenSearch 2.7 version not to + * up cast the SortField to gain some sort query optimizations. + * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. + */ + widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + + boolean isOptimizeDocIdLookupUsingFuzzySetFeatureEnabled = FeatureFlags.isEnabled(DOC_ID_FUZZY_SET_SETTING); + if (isOptimizeDocIdLookupUsingFuzzySetFeatureEnabled) { + enableFuzzySetForDocId = scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING); + docIdFuzzySetFalsePositiveProbability = scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING); + } - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -895,6 +1056,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); + scopedSettings.addSettingsUpdateConsumer(MAX_NESTED_QUERY_DEPTH_SETTING, this::setMaxNestedQueryDepth); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); @@ -904,6 +1066,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(FINAL_PIPELINE, this::setRequiredPipeline); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled); + scopedSettings.addSettingsUpdateConsumer(INDEX_UNREFERENCED_FILE_CLEANUP, this::setShouldCleanupUnreferencedFiles); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, this::setMappingNestedFieldsLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING, this::setMappingNestedDocsLimit); @@ -918,9 +1081,18 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setRemoteTranslogUploadBufferInterval ); + scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); + scopedSettings.addSettingsUpdateConsumer(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING, this::setEnableFuzzySetForDocId); + scopedSettings.addSettingsUpdateConsumer( + INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, + this::setDocIdFuzzySetFalsePositiveProbability + ); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { + if (this.isRemoteStoreEnabled) { + logger.warn("Search idle is not supported for remote backed indices"); + } if (this.replicationType == ReplicationType.SEGMENT && this.getNumberOfReplicas() > 0) { logger.warn("Search idle is not supported for indices with replicas using 'replication.type: SEGMENT'"); } @@ -1045,11 +1217,11 @@ public boolean isSegRepEnabled() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isSegRepWithRemoteEnabled(); + return isSegRepEnabled() && !isRemoteStoreEnabled(); } public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled() && FeatureFlags.isEnabled(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); + return isSegRepEnabled() && isRemoteStoreEnabled(); } /** @@ -1146,7 +1318,9 @@ public synchronized boolean updateIndexMetadata(IndexMetadata indexMetadata) { */ public static boolean same(final Settings left, final Settings right) { return left.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE) - .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); + .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)) + && left.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE) + .equals(right.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE)); } /** @@ -1192,10 +1366,25 @@ public TimeValue getRemoteTranslogUploadBufferInterval() { return remoteTranslogUploadBufferInterval; } + public int getRemoteTranslogExtraKeep() { + return remoteTranslogKeepExtraGen; + } + + /** + * Returns true iff the remote translog buffer interval setting exists or in other words is explicitly set. + */ + public boolean isRemoteTranslogBufferIntervalExplicit() { + return INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.exists(settings); + } + public void setRemoteTranslogUploadBufferInterval(TimeValue remoteTranslogUploadBufferInterval) { this.remoteTranslogUploadBufferInterval = remoteTranslogUploadBufferInterval; } + public void setRemoteTranslogKeepExtraGen(int extraGen) { + this.remoteTranslogKeepExtraGen = extraGen; + } + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ @@ -1383,6 +1572,17 @@ private void setMaxTermsCount(int maxTermsCount) { this.maxTermsCount = maxTermsCount; } + /** + * @return max level of nested queries and documents + */ + public int getMaxNestedQueryDepth() { + return this.maxNestedQueryDepth; + } + + private void setMaxNestedQueryDepth(int maxNestedQueryDepth) { + this.maxNestedQueryDepth = maxNestedQueryDepth; + } + /** * Returns the maximum number of allowed script_fields to retrieve in a search request */ @@ -1403,9 +1603,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public <T> T getValue(Setting<T> setting) { @@ -1527,6 +1761,18 @@ private void setSearchThrottled(boolean searchThrottled) { this.searchThrottled = searchThrottled; } + /** + * Returns true if unreferenced files should be cleaned up on merge failure for this index. + * + */ + public boolean shouldCleanupUnreferencedFiles() { + return shouldCleanupUnreferencedFiles; + } + + private void setShouldCleanupUnreferencedFiles(boolean shouldCleanupUnreferencedFiles) { + this.shouldCleanupUnreferencedFiles = shouldCleanupUnreferencedFiles; + } + public long getMappingNestedFieldsLimit() { return mappingNestedFieldsLimit; } @@ -1584,7 +1830,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1595,7 +1841,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); @@ -1613,4 +1859,44 @@ public String getDefaultSearchPipeline() { public void setDefaultSearchPipeline(String defaultSearchPipeline) { this.defaultSearchPipeline = defaultSearchPipeline; } + + /** + * Returns true if we need to maintain backward compatibility for index sorted indices created prior to version 2.7 + * @return boolean + */ + public boolean shouldWidenIndexSortType() { + return this.widenIndexSortType; + } + + public boolean isEnableFuzzySetForDocId() { + return enableFuzzySetForDocId; + } + + public void setEnableFuzzySetForDocId(boolean enableFuzzySetForDocId) { + verifyFeatureToSetDocIdFuzzySetSetting(enabled -> this.enableFuzzySetForDocId = enabled, enableFuzzySetForDocId); + } + + public double getDocIdFuzzySetFalsePositiveProbability() { + return docIdFuzzySetFalsePositiveProbability; + } + + public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { + verifyFeatureToSetDocIdFuzzySetSetting( + fpp -> this.docIdFuzzySetFalsePositiveProbability = fpp, + docIdFuzzySetFalsePositiveProbability + ); + } + + private static <T> void verifyFeatureToSetDocIdFuzzySetSetting(Consumer<T> settingUpdater, T val) { + if (FeatureFlags.isEnabled(DOC_ID_FUZZY_SET_SETTING)) { + settingUpdater.accept(val); + } else { + throw new IllegalArgumentException( + "Fuzzy set for optimizing doc id lookup " + + "cannot be enabled with feature flag [" + + FeatureFlags.DOC_ID_FUZZY_SET + + "] set to false" + ); + } + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index f73f96df4f9ad..af2e22c4aad53 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.IndexFieldData; @@ -53,7 +54,7 @@ /** * Holds all the information that is used to build the sort order of an index. - * + * <p> * The index sort settings are <b>final</b> and can be defined only at index creation. * These settings are divided in four lists that are merged during the initialization of this class: * <ul> @@ -69,8 +70,10 @@ * </li> * </ul> * - * @opensearch.internal -**/ + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") public final class IndexSortConfig { /** * The list of field names @@ -200,6 +203,7 @@ public boolean hasPrimarySortOnField(String field) { * or returns null if this index has no sort. */ public Sort buildIndexSort( + boolean shouldWidenIndexSortType, Function<String, MappedFieldType> fieldTypeLookup, BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup ) { @@ -230,7 +234,11 @@ public Sort buildIndexSort( if (fieldData == null) { throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); } - sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + if (shouldWidenIndexSortType == true) { + sortFields[i] = fieldData.wideSortField(sortSpec.missingValue, mode, null, reverse); + } else { + sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + } validateIndexSortField(sortFields[i]); } return new Sort(sortFields); diff --git a/server/src/main/java/org/opensearch/index/IndexWarmer.java b/server/src/main/java/org/opensearch/index/IndexWarmer.java index b62afe6b6dcfc..81063cb9b9c38 100644 --- a/server/src/main/java/org/opensearch/index/IndexWarmer.java +++ b/server/src/main/java/org/opensearch/index/IndexWarmer.java @@ -35,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.fielddata.IndexFieldData; @@ -111,8 +112,9 @@ void warm(OpenSearchDirectoryReader reader, IndexShard shard, IndexSettings sett /** * A handle on the execution of warm-up action. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface TerminationHandle { TerminationHandle NO_WAIT = () -> {}; @@ -124,8 +126,9 @@ public interface TerminationHandle { /** * Listener for the index warmer * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Listener { /** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the * execution of those tasks. */ diff --git a/server/src/main/java/org/opensearch/index/IndexingPressure.java b/server/src/main/java/org/opensearch/index/IndexingPressure.java index 33be340feb335..4e55c941cb684 100644 --- a/server/src/main/java/org/opensearch/index/IndexingPressure.java +++ b/server/src/main/java/org/opensearch/index/IndexingPressure.java @@ -35,10 +35,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.stats.IndexingPressureStats; diff --git a/server/src/main/java/org/opensearch/index/IndexingPressureService.java b/server/src/main/java/org/opensearch/index/IndexingPressureService.java index 35e022df22694..898c3686d7a84 100644 --- a/server/src/main/java/org/opensearch/index/IndexingPressureService.java +++ b/server/src/main/java/org/opensearch/index/IndexingPressureService.java @@ -7,8 +7,9 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.Settings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; @@ -19,8 +20,9 @@ * Sets up classes for node/shard level indexing pressure. * Provides abstraction and orchestration for indexing pressure interfaces when called from Transport Actions or for Stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.3.0") public class IndexingPressureService { private final ShardIndexingPressure shardIndexingPressure; diff --git a/server/src/main/java/org/opensearch/index/IndexingSlowLog.java b/server/src/main/java/org/opensearch/index/IndexingSlowLog.java index 86c5d4542e5fa..bc42fa304e2c5 100644 --- a/server/src/main/java/org/opensearch/index/IndexingSlowLog.java +++ b/server/src/main/java/org/opensearch/index/IndexingSlowLog.java @@ -36,18 +36,19 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.StringBuilders; import org.opensearch.common.Booleans; -import org.opensearch.common.Strings; -import org.opensearch.common.logging.OpenSearchLogMessage; import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.OpenSearchLogMessage; +import org.opensearch.common.logging.SlowLogLevel; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.shard.IndexingOperationListener; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.io.UncheckedIOException; diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java index 1335eb529d2f0..f07aa01cf05f9 100644 --- a/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java +++ b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java @@ -8,9 +8,9 @@ package org.opensearch.index; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.transport.TransportRequest; diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java index 67f2c686dbf8b..3d2340ed35a8c 100644 --- a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java +++ b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java @@ -10,7 +10,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + * <p> + * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the <code>index.datastream_merge.policy</code> setting. + * + * <p> + * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + * <p> + * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting<Integer> INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting<ByteSizeValue> INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting<ByteSizeValue> INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting<ByteSizeValue> INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting<Integer> INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting<Double> INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/MergePolicyConfig.java deleted file mode 100644 index d0416aaf54a40..0000000000000 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.index; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.TieredMergePolicy; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; - -/** - * A shard in opensearch is a Lucene index, and a Lucene index is broken - * down into segments. Segments are internal storage elements in the index - * where the index data is stored, and are immutable up to delete markers. - * Segments are, periodically, merged into larger segments to keep the - * index size at bay and expunge deletes. - * - * <p> - * Merges select segments of approximately equal size, subject to an allowed - * number of segments per tier. The merge policy is able to merge - * non-adjacent segments, and separates how many segments are merged at once from how many - * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). - * - * <p> - * All merge policy settings are <b>dynamic</b> and can be updated on a live index. - * The merge policy has the following settings: - * - * <ul> - * <li><code>index.merge.policy.expunge_deletes_allowed</code>: - * - * When expungeDeletes is called, we only merge away a segment if its delete - * percentage is over this threshold. Default is <code>10</code>. - * - * <li><code>index.merge.policy.floor_segment</code>: - * - * Segments smaller than this are "rounded up" to this size, i.e. treated as - * equal (floor) size for merge selection. This is to prevent frequent - * flushing of tiny segments, thus preventing a long tail in the index. Default - * is <code>2mb</code>. - * - * <li><code>index.merge.policy.max_merge_at_once</code>: - * - * Maximum number of segments to be merged at a time during "normal" merging. - * Default is <code>10</code>. - * - * <li><code>index.merge.policy.max_merged_segment</code>: - * - * Maximum sized segment to produce during normal merging (not explicit - * force merge). This setting is approximate: the estimate of the merged - * segment size is made by summing sizes of to-be-merged segments - * (compensating for percent deleted docs). Default is <code>5gb</code>. - * - * <li><code>index.merge.policy.segments_per_tier</code>: - * - * Sets the allowed number of segments per tier. Smaller values mean more - * merging but fewer segments. Default is <code>10</code>. Note, this value needs to be - * >= than the <code>max_merge_at_once</code> otherwise you'll force too many merges to - * occur. - * - * <li><code>index.merge.policy.deletes_pct_allowed</code>: - * - * Controls the maximum percentage of deleted documents that is tolerated in - * the index. Lower values make the index more space efficient at the - * expense of increased CPU and I/O activity. Values must be between <code>5</code> and - * <code>50</code>. Default value is <code>20</code>. - * </ul> - * - * <p> - * For normal merging, the policy first computes a "budget" of how many - * segments are allowed to be in the index. If the index is over-budget, - * then the policy sorts segments by decreasing size (proportionally considering percent - * deletes), and then finds the least-cost merge. Merge cost is measured by - * a combination of the "skew" of the merge (size of largest seg divided by - * smallest seg), total merge size and pct deletes reclaimed, so that - * merges with lower skew, smaller size and those reclaiming more deletes, - * are favored. - * - * <p> - * If a merge will produce a segment that's larger than - * <code>max_merged_segment</code> then the policy will merge fewer segments (down to - * 1 at once, if that one has deletions) to keep the segment size under - * budget. - * - * <p> - * Note, this can mean that for large shards that holds many gigabytes of - * data, the default of <code>max_merged_segment</code> (<code>5gb</code>) can cause for many - * segments to be in an index, and causing searches to be slower. Use the - * indices segments API to see the segments that an index has, and - * possibly either increase the <code>max_merged_segment</code> or issue an optimize - * call for the index (try and aim to issue it on a low traffic time). - * - * @opensearch.internal - */ - -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); - private final Logger logger; - private final boolean mergesEnabled; - - public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; - public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); - public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; - public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); - public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; - public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; - public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; - public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( - "index.compound_format", - Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, - Property.Dynamic, - Property.IndexScope - ); - - public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting( - "index.merge.policy.expunge_deletes_allowed", - DEFAULT_EXPUNGE_DELETES_ALLOWED, - 0.0d, - Property.Dynamic, - Property.IndexScope - ); - public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting( - "index.merge.policy.floor_segment", - DEFAULT_FLOOR_SEGMENT, - Property.Dynamic, - Property.IndexScope - ); - public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting( - "index.merge.policy.max_merge_at_once", - DEFAULT_MAX_MERGE_AT_ONCE, - 2, - Property.Dynamic, - Property.IndexScope - ); - public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( - "index.merge.policy.max_merged_segment", - DEFAULT_MAX_MERGED_SEGMENT, - Property.Dynamic, - Property.IndexScope - ); - public static final Setting<Double> INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting( - "index.merge.policy.segments_per_tier", - DEFAULT_SEGMENTS_PER_TIER, - 2.0d, - Property.Dynamic, - Property.IndexScope - ); - public static final Setting<Double> INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting( - "index.merge.policy.reclaim_deletes_weight", - DEFAULT_RECLAIM_DELETES_WEIGHT, - 0.0d, - Property.Dynamic, - Property.IndexScope, - Property.Deprecated - ); - public static final Setting<Double> INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING = Setting.doubleSetting( - "index.merge.policy.deletes_pct_allowed", - DEFAULT_DELETES_PCT_ALLOWED, - 5.0d, - 50.0d, - Property.Dynamic, - Property.IndexScope - ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { - this.logger = logger; - double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage - ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); - int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); - // TODO is this really a good default number for max_merge_segment, what happens for large indices, - // won't they end up with many segments? - ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); - double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); - double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); - double deletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); - this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); - if (mergesEnabled == false) { - logger.warn( - "[{}] is set to false, this should only be used in tests and can cause serious problems in production" + " environments", - INDEX_MERGE_ENABLED - ); - } - maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } - } - - void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); - } - - void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - } - - void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - } - - void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); - } - - void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); - } - - void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); - } - - void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - } - - private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { - // fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce - if (!(segmentsPerTier >= maxMergeAtOnce)) { - int newMaxMergeAtOnce = (int) segmentsPerTier; - // max merge at once should be at least 2 - if (newMaxMergeAtOnce <= 1) { - newMaxMergeAtOnce = 2; - } - logger.debug( - "changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or " + "equal to it", - maxMergeAtOnce, - newMaxMergeAtOnce, - segmentsPerTier - ); - maxMergeAtOnce = newMaxMergeAtOnce; - } - return maxMergeAtOnce; - } - - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; - } - - private static double parseNoCFSRatio(String noCFSRatio) { - noCFSRatio = noCFSRatio.trim(); - if (noCFSRatio.equalsIgnoreCase("true")) { - return 1.0d; - } else if (noCFSRatio.equalsIgnoreCase("false")) { - return 0.0; - } else { - try { - double value = Double.parseDouble(noCFSRatio); - if (value < 0.0 || value > 1.0) { - throw new IllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); - } - return value; - } catch (NumberFormatException ex) { - throw new IllegalArgumentException( - "Expected a boolean or a value in the interval [0..1] but was: " + "[" + noCFSRatio + "]", - ex - ); - } - } - } -} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java index 9e170b448d641..a93a362a70c78 100644 --- a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java +++ b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.lucene.index.ConcurrentMergeScheduler; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -48,14 +49,14 @@ * * <ul> * <li> <code>index.merge.scheduler.max_thread_count</code>: - * + * <p> * The maximum number of threads that may be merging at once. Defaults to * <code>Math.max(1, Math.min(4, {@link OpenSearchExecutors#allocatedProcessors(Settings)} / 2))</code> * which works well for a good solid-state-disk (SSD). If your index is on * spinning platter drives instead, decrease this to 1. * * <li><code>index.merge.scheduler.auto_throttle</code>: - * + * <p> * If this is true (the default), then the merge scheduler will rate-limit IO * (writes) for merges to an adaptive value depending on how many merges are * requested over time. An application with a low indexing rate that @@ -64,8 +65,9 @@ * move higher to allow merges to keep up with ongoing indexing. * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MergeSchedulerConfig { public static final Setting<Integer> MAX_THREAD_COUNT_SETTING = new Setting<>( diff --git a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java index 637282374de73..df1666e72f2ee 100644 --- a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java +++ b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java @@ -42,7 +42,7 @@ /** * Wrapper around {@link TieredMergePolicy} which doesn't respect - * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges. + * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges, but DOES respect it on only_expunge_deletes. * See https://issues.apache.org/jira/browse/LUCENE-7976. * * @opensearch.internal @@ -71,7 +71,7 @@ public MergeSpecification findForcedMerges( @Override public MergeSpecification findForcedDeletesMerges(SegmentInfos infos, MergeContext mergeContext) throws IOException { - return forcedMergePolicy.findForcedDeletesMerges(infos, mergeContext); + return regularMergePolicy.findForcedDeletesMerges(infos, mergeContext); } public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { @@ -80,7 +80,7 @@ public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { } public double getForceMergeDeletesPctAllowed() { - return forcedMergePolicy.getForceMergeDeletesPctAllowed(); + return regularMergePolicy.getForceMergeDeletesPctAllowed(); } public void setFloorSegmentMB(double mbFrac) { diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java new file mode 100644 index 0000000000000..8987a492e9a90 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * ReplicationStats is used to provide segment replication statistics at an index, + * node and cluster level on a segment replication enabled cluster. + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class ReplicationStats implements ToXContentFragment, Writeable { + + public long maxBytesBehind; + public long maxReplicationLag; + public long totalBytesBehind; + + public ReplicationStats(long maxBytesBehind, long totalBytesBehind, long maxReplicationLag) { + this.maxBytesBehind = maxBytesBehind; + this.totalBytesBehind = totalBytesBehind; + this.maxReplicationLag = maxReplicationLag; + } + + public ReplicationStats(StreamInput in) throws IOException { + this.maxBytesBehind = in.readVLong(); + this.totalBytesBehind = in.readVLong(); + this.maxReplicationLag = in.readVLong(); + } + + public ReplicationStats() { + + } + + public void add(ReplicationStats other) { + if (other != null) { + maxBytesBehind = Math.max(other.maxBytesBehind, maxBytesBehind); + totalBytesBehind += other.totalBytesBehind; + maxReplicationLag = Math.max(other.maxReplicationLag, maxReplicationLag); + } + } + + public long getMaxBytesBehind() { + return this.maxBytesBehind; + } + + public long getTotalBytesBehind() { + return this.totalBytesBehind; + } + + public long getMaxReplicationLag() { + return this.maxReplicationLag; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(maxBytesBehind); + out.writeVLong(totalBytesBehind); + out.writeVLong(maxReplicationLag); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.SEGMENT_REPLICATION); + builder.field(Fields.MAX_BYTES_BEHIND, maxBytesBehind); + builder.field(Fields.TOTAL_BYTES_BEHIND, totalBytesBehind); + builder.field(Fields.MAX_REPLICATION_LAG, maxReplicationLag); + builder.endObject(); + return builder; + } + + /** + * Fields for segment replication statistics + * + * @opensearch.internal + */ + static final class Fields { + static final String SEGMENT_REPLICATION = "segment_replication"; + static final String MAX_BYTES_BEHIND = "max_bytes_behind"; + static final String TOTAL_BYTES_BEHIND = "total_bytes_behind"; + static final String MAX_REPLICATION_LAG = "max_replication_lag"; + } +} diff --git a/server/src/main/java/org/opensearch/index/SearchSlowLog.java b/server/src/main/java/org/opensearch/index/SearchSlowLog.java index 0b2e3a6b7cbc3..cfdc2cf348d4d 100644 --- a/server/src/main/java/org/opensearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/opensearch/index/SearchSlowLog.java @@ -33,10 +33,12 @@ package org.opensearch.index; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.common.logging.OpenSearchLogMessage; import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.OpenSearchLogMessage; +import org.opensearch.common.logging.SlowLogLevel; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java index efbd5efbb07bc..884686ee48fa1 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPerGroupStats.java @@ -8,12 +8,13 @@ package org.opensearch.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.Set; @@ -21,8 +22,9 @@ /** * Return Segment Replication stats for a Replication Group. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class SegmentReplicationPerGroupStats implements Writeable, ToXContentFragment { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index 7e54d5179ea35..ce38dd3bb236c 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; @@ -20,9 +19,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; @@ -42,7 +42,8 @@ public class SegmentReplicationPressureService implements Closeable { private volatile boolean isSegmentReplicationBackpressureEnabled; private volatile int maxCheckpointsBehind; private volatile double maxAllowedStaleReplicas; - private volatile TimeValue maxReplicationTime; + private volatile TimeValue replicationTimeLimitBackpressure; + private volatile TimeValue replicationTimeLimitFailReplica; private static final Logger logger = LogManager.getLogger(SegmentReplicationPressureService.class); @@ -65,13 +66,23 @@ public class SegmentReplicationPressureService implements Closeable { Setting.Property.NodeScope ); - public static final Setting<TimeValue> MAX_REPLICATION_TIME_SETTING = Setting.positiveTimeSetting( + // Time limit on max allowed replica staleness after which backpressure kicks in on primary. + public static final Setting<TimeValue> MAX_REPLICATION_TIME_BACKPRESSURE_SETTING = Setting.positiveTimeSetting( "segrep.pressure.time.limit", TimeValue.timeValueMinutes(5), Setting.Property.Dynamic, Setting.Property.NodeScope ); + // Time limit on max allowed replica staleness after which we start failing the replica shard. + // Defaults to 0(disabled) + public static final Setting<TimeValue> MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING = Setting.positiveTimeSetting( + "segrep.replication.time.limit", + TimeValue.timeValueMinutes(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting<Double> MAX_ALLOWED_STALE_SHARDS = Setting.doubleSetting( "segrep.pressure.replica.stale.limit", .5, @@ -87,7 +98,7 @@ public class SegmentReplicationPressureService implements Closeable { private final SegmentReplicationStatsTracker tracker; private final ShardStateAction shardStateAction; - private final AsyncFailStaleReplicaTask failStaleReplicaTask; + private volatile AsyncFailStaleReplicaTask failStaleReplicaTask; @Inject public SegmentReplicationPressureService( @@ -95,10 +106,11 @@ public SegmentReplicationPressureService( ClusterService clusterService, IndicesService indicesService, ShardStateAction shardStateAction, + SegmentReplicationStatsTracker tracker, ThreadPool threadPool ) { this.indicesService = indicesService; - this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + this.tracker = tracker; this.shardStateAction = shardStateAction; this.threadPool = threadPool; @@ -112,8 +124,11 @@ public SegmentReplicationPressureService( this.maxCheckpointsBehind = MAX_INDEXING_CHECKPOINTS.get(settings); clusterSettings.addSettingsUpdateConsumer(MAX_INDEXING_CHECKPOINTS, this::setMaxCheckpointsBehind); - this.maxReplicationTime = MAX_REPLICATION_TIME_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_TIME_SETTING, this::setMaxReplicationTime); + this.replicationTimeLimitBackpressure = MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING, this::setReplicationTimeLimitBackpressure); + + this.replicationTimeLimitFailReplica = MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, this::setReplicationTimeLimitFailReplica); this.maxAllowedStaleReplicas = MAX_ALLOWED_STALE_SHARDS.get(settings); clusterSettings.addSettingsUpdateConsumer(MAX_ALLOWED_STALE_SHARDS, this::setMaxAllowedStaleReplicas); @@ -137,7 +152,7 @@ public void isSegrepLimitBreached(ShardId shardId) { } private void validateReplicationGroup(IndexShard shard) { - final Set<SegmentReplicationShardStats> replicaStats = shard.getReplicationStats(); + final Set<SegmentReplicationShardStats> replicaStats = shard.getReplicationStatsForTrackedReplicas(); final Set<SegmentReplicationShardStats> staleReplicas = getStaleReplicas(replicaStats); if (staleReplicas.isEmpty() == false) { // inSyncIds always considers the primary id, so filter it out. @@ -157,7 +172,7 @@ private void validateReplicationGroup(IndexShard shard) { private Set<SegmentReplicationShardStats> getStaleReplicas(final Set<SegmentReplicationShardStats> replicas) { return replicas.stream() .filter(entry -> entry.getCheckpointsBehindCount() > maxCheckpointsBehind) - .filter(entry -> entry.getCurrentReplicationTimeMillis() > maxReplicationTime.millis()) + .filter(entry -> entry.getCurrentReplicationTimeMillis() > replicationTimeLimitBackpressure.millis()) .collect(Collectors.toSet()); } @@ -185,8 +200,21 @@ public void setMaxAllowedStaleReplicas(double maxAllowedStaleReplicas) { this.maxAllowedStaleReplicas = maxAllowedStaleReplicas; } - public void setMaxReplicationTime(TimeValue maxReplicationTime) { - this.maxReplicationTime = maxReplicationTime; + public void setReplicationTimeLimitFailReplica(TimeValue replicationTimeLimitFailReplica) { + this.replicationTimeLimitFailReplica = replicationTimeLimitFailReplica; + updateAsyncFailReplicaTask(); + } + + private synchronized void updateAsyncFailReplicaTask() { + try { + failStaleReplicaTask.close(); + } finally { + failStaleReplicaTask = new AsyncFailStaleReplicaTask(this); + } + } + + public void setReplicationTimeLimitBackpressure(TimeValue replicationTimeLimitBackpressure) { + this.replicationTimeLimitBackpressure = replicationTimeLimitBackpressure; } @Override @@ -209,12 +237,13 @@ final static class AsyncFailStaleReplicaTask extends AbstractAsyncTask { @Override protected boolean mustReschedule() { - return true; + return pressureService.shouldScheduleAsyncFailTask(); } @Override protected void runInternal() { - if (pressureService.isSegmentReplicationBackpressureEnabled) { + // Do not fail the replicas if time limit is set to 0 (i.e. disabled). + if (pressureService.shouldScheduleAsyncFailTask()) { final SegmentReplicationStats stats = pressureService.tracker.getStats(); // Find the shardId in node which is having stale replicas with highest current replication time. @@ -240,7 +269,7 @@ protected void runInternal() { } final IndexShard primaryShard = indexService.getShard(shardId.getId()); for (SegmentReplicationShardStats staleReplica : staleReplicas) { - if (staleReplica.getCurrentReplicationTimeMillis() > 2 * pressureService.maxReplicationTime.millis()) { + if (staleReplica.getCurrentReplicationTimeMillis() > pressureService.replicationTimeLimitFailReplica.millis()) { pressureService.shardStateAction.remoteShardFailed( shardId, staleReplica.getAllocationId(), @@ -282,4 +311,8 @@ public String toString() { } + boolean shouldScheduleAsyncFailTask() { + return TimeValue.ZERO.equals(replicationTimeLimitFailReplica) == false; + } + } diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java new file mode 100644 index 0000000000000..492f253bbcb7c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.Version; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Segment replication rejection stats. + * + * @opensearch.internal + */ +public class SegmentReplicationRejectionStats implements Writeable, ToXContentFragment { + + /** + * Total rejections due to segment replication backpressure + */ + private long totalRejectionCount; + + public SegmentReplicationRejectionStats(final long totalRejectionCount) { + this.totalRejectionCount = totalRejectionCount; + } + + public SegmentReplicationRejectionStats(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalRejectionCount = in.readVLong(); + } + } + + public long getTotalRejectionCount() { + return totalRejectionCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("segment_replication_backpressure"); + builder.field("total_rejected_requests", totalRejectionCount); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejectionCount); + } + } + + @Override + public String toString() { + return "SegmentReplicationRejectionStats{ totalRejectedRequestCount=" + totalRejectionCount + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java index b0e6e5076d03c..e381ade253422 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java @@ -9,11 +9,12 @@ package org.opensearch.index; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.indices.replication.SegmentReplicationState; @@ -23,12 +24,17 @@ /** * SegRep stats for a single shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class SegmentReplicationShardStats implements Writeable, ToXContentFragment { private final String allocationId; private final long checkpointsBehindCount; private final long bytesBehindCount; + // Total Replication lag observed. + private final long currentReplicationLagMillis; + // Total time taken for replicas to catch up. Similar to replication lag except this + // doesn't include time taken by primary to upload data to remote store. private final long currentReplicationTimeMillis; private final long lastCompletedReplicationTimeMillis; @@ -40,12 +46,14 @@ public SegmentReplicationShardStats( long checkpointsBehindCount, long bytesBehindCount, long currentReplicationTimeMillis, + long currentReplicationLagMillis, long lastCompletedReplicationTime ) { this.allocationId = allocationId; this.checkpointsBehindCount = checkpointsBehindCount; this.bytesBehindCount = bytesBehindCount; this.currentReplicationTimeMillis = currentReplicationTimeMillis; + this.currentReplicationLagMillis = currentReplicationLagMillis; this.lastCompletedReplicationTimeMillis = lastCompletedReplicationTime; } @@ -55,6 +63,7 @@ public SegmentReplicationShardStats(StreamInput in) throws IOException { this.bytesBehindCount = in.readVLong(); this.currentReplicationTimeMillis = in.readVLong(); this.lastCompletedReplicationTimeMillis = in.readVLong(); + this.currentReplicationLagMillis = in.readVLong(); } public String getAllocationId() { @@ -73,6 +82,19 @@ public long getCurrentReplicationTimeMillis() { return currentReplicationTimeMillis; } + /** + * Total Replication lag observed. + * @return currentReplicationLagMillis + */ + public long getCurrentReplicationLagMillis() { + return currentReplicationLagMillis; + } + + /** + * Total time taken for replicas to catch up. Similar to replication lag except this doesn't include time taken by + * primary to upload data to remote store. + * @return lastCompletedReplicationTimeMillis + */ public long getLastCompletedReplicationTimeMillis() { return lastCompletedReplicationTimeMillis; } @@ -93,6 +115,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("checkpoints_behind", checkpointsBehindCount); builder.field("bytes_behind", new ByteSizeValue(bytesBehindCount).toString()); builder.field("current_replication_time", new TimeValue(currentReplicationTimeMillis)); + builder.field("current_replication_lag", new TimeValue(currentReplicationLagMillis)); builder.field("last_completed_replication_time", new TimeValue(lastCompletedReplicationTimeMillis)); if (currentReplicationState != null) { builder.startObject(); @@ -110,6 +133,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(bytesBehindCount); out.writeVLong(currentReplicationTimeMillis); out.writeVLong(lastCompletedReplicationTimeMillis); + out.writeVLong(currentReplicationLagMillis); } @Override @@ -121,6 +145,8 @@ public String toString() { + checkpointsBehindCount + ", bytesBehindCount=" + bytesBehindCount + + ", currentReplicationLagMillis=" + + currentReplicationLagMillis + ", currentReplicationTimeMillis=" + currentReplicationTimeMillis + ", lastCompletedReplicationTimeMillis=" diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java index 0f32c73f80336..cdf22b05d5861 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStats.java @@ -11,9 +11,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index c9ddf494ae230..f5fc8aa1c1eea 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -9,8 +9,8 @@ package org.opensearch.index; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import java.util.HashMap; @@ -33,6 +33,14 @@ public SegmentReplicationStatsTracker(IndicesService indicesService) { rejectionCount = ConcurrentCollections.newConcurrentMap(); } + public SegmentReplicationRejectionStats getTotalRejectionStats() { + return new SegmentReplicationRejectionStats(this.rejectionCount.values().stream().mapToInt(AtomicInteger::get).sum()); + } + + protected Map<ShardId, AtomicInteger> getRejectionCount() { + return rejectionCount; + } + public SegmentReplicationStats getStats() { Map<ShardId, SegmentReplicationPerGroupStats> stats = new HashMap<>(); for (IndexService indexService : indicesService) { @@ -59,7 +67,7 @@ public void incrementRejectionCount(ShardId shardId) { public SegmentReplicationPerGroupStats getStatsForShard(IndexShard indexShard) { return new SegmentReplicationPerGroupStats( indexShard.shardId(), - indexShard.getReplicationStats(), + indexShard.getReplicationStatsForTrackedReplicas(), Optional.ofNullable(rejectionCount.get(indexShard.shardId())).map(AtomicInteger::get).orElse(0) ); } diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java index 8a9f3eea6030b..a6135186fb5ff 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java @@ -9,18 +9,18 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.ShardIndexingPressureTracker.CommonOperationTracker; import org.opensearch.index.ShardIndexingPressureTracker.OperationTracker; import org.opensearch.index.ShardIndexingPressureTracker.PerformanceTracker; import org.opensearch.index.ShardIndexingPressureTracker.RejectionTracker; import org.opensearch.index.ShardIndexingPressureTracker.StatsTracker; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.stats.IndexingPressurePerShardStats; +import org.opensearch.index.stats.ShardIndexingPressureStats; import java.util.Collections; import java.util.HashMap; @@ -34,7 +34,7 @@ * Interfaces returns Releasable which when triggered will release the acquired accounting tokens values and also * perform necessary actions such as throughput evaluation once the request completes. * Consumers of these interfaces are expected to trigger close on releasable, reliably for consistency. - * + * <p> * Overall ShardIndexingPressure provides: * 1. Memory Accounting at shard level. This can be enabled/disabled based on dynamic setting. * 2. Memory Accounting at Node level. Tracking is done using the IndexingPressure artefacts to support feature seamless toggling. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java index 9f8ac7ea76cfd..e5c1af2e9c9f0 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java @@ -14,11 +14,11 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.ShardIndexingPressureTracker.OperationTracker; import org.opensearch.index.ShardIndexingPressureTracker.PerformanceTracker; import org.opensearch.index.ShardIndexingPressureTracker.RejectionTracker; import org.opensearch.index.ShardIndexingPressureTracker.StatsTracker; -import org.opensearch.core.index.shard.ShardId; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -30,16 +30,16 @@ /** * The Shard Indexing Pressure Memory Manager is the construct responsible for increasing and decreasing the allocated shard limit * based on incoming requests. A shard limits defines the maximum memory that a shard can occupy in the heap for request objects. - * + * <p> * Based on the overall memory utilization on the node, and current traffic needs shard limits will be modified: - * + * <p> * 1. If the limits assigned to a shard is breached (Primary Parameter) while the node level overall occupancy across all shards * is not greater than primary_parameter.node.soft_limit, MemoryManager will increase the shard limits without any deeper evaluation. * 2. If the limits assigned to the shard is breached(Primary Parameter) and the node level overall occupancy across all shards * is greater than primary_parameter.node.soft_limit, then MemoryManager will evaluate deeper parameters for shards to identify any * issues, such as throughput degradation (Secondary Parameter - 1) and time since last request was successful (Secondary Parameter - 2). * This helps identify detect any duress state with the shard, requesting more memory. - * + * <p> * Secondary Parameters covered above: * 1. ThroughputDegradationLimitsBreached - When the moving window throughput average has increased by a factor compared to * the historical throughput average. If the factor by which it has increased is greater than the degradation limit threshold, this @@ -47,7 +47,7 @@ * 2. LastSuccessfulRequestDurationLimitsBreached - When the time since the last successful request completed is greater than the max * timeout threshold value, while there a number of outstanding requests greater than the max outstanding requests then this parameter * is considered to be breached. - * + * <p> * MemoryManager attempts to increase of decrease the shard limits in case the shard utilization goes below operating_factor.lower or * goes above operating_factor.upper of current shard limits. MemoryManager attempts to update the new shard limit such that the new value * remains withing the operating_factor.optimal range of current shard utilization. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java index b41dd1359394b..9b24d119f24fd 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java @@ -22,24 +22,24 @@ * Shard indexing pressure store acts as a central repository for all the shard-level tracker objects currently being * used at the Node level, for tracking indexing pressure requests. * Store manages the tracker lifecycle, from creation, access, until it is evicted to be collected. - * + * <p> * Trackers are maintained at two levels for access simplicity and better memory management: - * + * <p> * 1. shardIndexingPressureHotStore : As the name suggests, it is hot store for tracker objects which are currently live i.e. being used * to track an ongoing request. - * + * <p> * 2. shardIndexingPressureColdStore : This acts as the store for all the shard tracking objects which are currently being used * by the framework. In addition to hot trackers, the recently used trackers which are although not currently live, but again can be used * in near future, are also part of this store. To limit any memory implications, this store has an upper limit on the maximum number of * trackers its can hold at any given time, which is a configurable dynamic setting. - * + * <p> * Tracking objects when created are part of both the hot store as well as cold store. However, once the object * is no more live it is removed from the hot store. Objects in the cold store are evicted once the cold store * reaches its maximum limit. Think of it like a periodic purge when upper limit is hit. * During get if tracking object is not present in the hot store, a lookup is made into the cache store. If found, * object is brought into the hot store again, until it remains active. If not present in the either store, a fresh * object is instantiated and registered in both the stores for concurrent accesses. - * + * <p> * Note: The implementation of shardIndexingPressureColdStore methods is such that get, * update and evict operations can be abstracted out to support any other strategy such as LRU, if * discovered a need later. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java index 7d67b47141ef5..e0edb8260fd0f 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java @@ -14,19 +14,19 @@ * This class is responsible for all the tracking that needs to be performed at every Shard Level for Indexing Operations on the node. * Info is maintained at the granularity of three kinds of write operation (tasks) on the node i.e. coordinating, primary and replica. * This is useful in evaluating the shard indexing back-pressure on the node, to throttle requests and also to publish runtime stats. - * + * <p> * There can be four kinds of operation tracking on a node which needs to performed for a shard: * 1. Coordinating Operation : To track all the individual shard bulk request on the coordinator node. * 2. Primary Operation : To track all the individual shard bulk request on the primary node. * 3. Replica Operation : To track all the individual shard bulk request on the replica node. * 4. Common Operation : To track values applicable across the specific shard role. - * + * <p> * ShardIndexingPressureTracker therefore provides the construct to track all the write requests targeted for a ShardId on the node, * across all possible transport-write-actions i.e. Coordinator, Primary and Replica. * Tracker is uniquely identified against a Shard-Id on the node. Currently the knowledge of shard roles (such as primary vs replica) * is not explicit to the tracker, and it is able to track different values simultaneously based on the interaction hooks of the * operation type i.e. write-action layers. - * + * <p> * There is room for introducing more unique identity to the trackers based on Shard-Role or Shard-Allocation-Id, but that will also * increase the complexity of handling shard-lister events and handling other race scenarios such as request-draining etc. * To prefer simplicity we have modelled by keeping explicit fields for different operation tracking, while tracker by itself is diff --git a/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java new file mode 100644 index 0000000000000..d5d354c6c960a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -0,0 +1,323 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.TieredMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +/** + * A shard in opensearch is a Lucene index, and a Lucene index is broken + * down into segments. Segments are internal storage elements in the index + * where the index data is stored, and are immutable up to delete markers. + * Segments are, periodically, merged into larger segments to keep the + * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * + * + * <p> + * Tiered merge policy select segments of approximately equal size, subject to an allowed + * number of segments per tier. The merge policy is able to merge + * non-adjacent segments, and separates how many segments are merged at once from how many + * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). + * + * <p> + * All merge policy settings are <b>dynamic</b> and can be updated on a live index. + * The merge policy has the following settings: + * + * <ul> + * <li><code>index.merge.policy.expunge_deletes_allowed</code>: + * + * When expungeDeletes is called, we only merge away a segment if its delete + * percentage is over this threshold. Default is <code>10</code>. + * + * <li><code>index.merge.policy.floor_segment</code>: + * + * Segments smaller than this are "rounded up" to this size, i.e. treated as + * equal (floor) size for merge selection. This is to prevent frequent + * flushing of tiny segments, thus preventing a long tail in the index. Default + * is <code>2mb</code>. + * + * <li><code>index.merge.policy.max_merge_at_once</code>: + * + * Maximum number of segments to be merged at a time during "normal" merging. + * Default is <code>10</code>. + * + * <li><code>index.merge.policy.max_merged_segment</code>: + * + * Maximum sized segment to produce during normal merging (not explicit + * force merge). This setting is approximate: the estimate of the merged + * segment size is made by summing sizes of to-be-merged segments + * (compensating for percent deleted docs). Default is <code>5gb</code>. + * + * <li><code>index.merge.policy.segments_per_tier</code>: + * + * Sets the allowed number of segments per tier. Smaller values mean more + * merging but fewer segments. Default is <code>10</code>. Note, this value needs to be + * >= than the <code>max_merge_at_once</code> otherwise you'll force too many merges to + * occur. + * + * <li><code>index.merge.policy.deletes_pct_allowed</code>: + * + * Controls the maximum percentage of deleted documents that is tolerated in + * the index. Lower values make the index more space efficient at the + * expense of increased CPU and I/O activity. Values must be between <code>5</code> and + * <code>50</code>. Default value is <code>20</code>. + * </ul> + * + * <p> + * For normal merging, the policy first computes a "budget" of how many + * segments are allowed to be in the index. If the index is over-budget, + * then the policy sorts segments by decreasing size (proportionally considering percent + * deletes), and then finds the least-cost merge. Merge cost is measured by + * a combination of the "skew" of the merge (size of largest seg divided by + * smallest seg), total merge size and pct deletes reclaimed, so that + * merges with lower skew, smaller size and those reclaiming more deletes, + * are favored. + * + * <p> + * If a merge will produce a segment that's larger than + * <code>max_merged_segment</code> then the policy will merge fewer segments (down to + * 1 at once, if that one has deletions) to keep the segment size under + * budget. + * + * <p> + * Note, this can mean that for large shards that holds many gigabytes of + * data, the default of <code>max_merged_segment</code> (<code>5gb</code>) can cause for many + * segments to be in an index, and causing searches to be slower. Use the + * indices segments API to see the segments that an index has, and + * possibly either increase the <code>max_merged_segment</code> or issue an optimize + * call for the index (try and aim to issue it on a low traffic time). + * + * @opensearch.internal + */ + +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; + public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; + public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; + public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + + public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( + "index.compound_format", + Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Property.Dynamic, + Property.IndexScope + ); + + public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting( + "index.merge.policy.expunge_deletes_allowed", + DEFAULT_EXPUNGE_DELETES_ALLOWED, + 0.0d, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.policy.floor_segment", + DEFAULT_FLOOR_SEGMENT, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting( + "index.merge.policy.max_merge_at_once", + DEFAULT_MAX_MERGE_AT_ONCE, + 2, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.policy.max_merged_segment", + DEFAULT_MAX_MERGED_SEGMENT, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting<Double> INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting( + "index.merge.policy.segments_per_tier", + DEFAULT_SEGMENTS_PER_TIER, + 2.0d, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting<Double> INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting( + "index.merge.policy.reclaim_deletes_weight", + DEFAULT_RECLAIM_DELETES_WEIGHT, + 0.0d, + Property.Dynamic, + Property.IndexScope, + Property.Deprecated + ); + public static final Setting<Double> INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING = Setting.doubleSetting( + "index.merge.policy.deletes_pct_allowed", + DEFAULT_DELETES_PCT_ALLOWED, + 5.0d, + 50.0d, + Property.Dynamic, + Property.IndexScope + ); + + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage + ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); + int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); + // TODO is this really a good default number for max_merge_segment, what happens for large indices, + // won't they end up with many segments? + ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); + double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); + double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); + double deletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + if (mergesEnabled == false) { + logger.warn( + "[{}] is set to false, this should only be used in tests and can cause serious problems in production" + " environments", + INDEX_MERGE_ENABLED + ); + } + maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); + } + + void setSegmentsPerTier(Double segmentsPerTier) { + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + } + + void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + } + + void setMaxMergesAtOnce(Integer maxMergeAtOnce) { + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + } + + void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + } + + void setExpungeDeletesAllowed(Double value) { + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); + } + + void setNoCFSRatio(Double noCFSRatio) { + tieredMergePolicy.setNoCFSRatio(noCFSRatio); + } + + void setDeletesPctAllowed(Double deletesPctAllowed) { + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); + } + + private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { + // fixing maxMergeAtOnce, see TieredMergePolicy#setMaxMergeAtOnce + if (!(segmentsPerTier >= maxMergeAtOnce)) { + int newMaxMergeAtOnce = (int) segmentsPerTier; + // max merge at once should be at least 2 + if (newMaxMergeAtOnce <= 1) { + newMaxMergeAtOnce = 2; + } + logger.debug( + "changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or " + "equal to it", + maxMergeAtOnce, + newMaxMergeAtOnce, + segmentsPerTier + ); + maxMergeAtOnce = newMaxMergeAtOnce; + } + return maxMergeAtOnce; + } + + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; + } + + public static double parseNoCFSRatio(String noCFSRatio) { + noCFSRatio = noCFSRatio.trim(); + if (noCFSRatio.equalsIgnoreCase("true")) { + return 1.0d; + } else if (noCFSRatio.equalsIgnoreCase("false")) { + return 0.0; + } else { + try { + double value = Double.parseDouble(noCFSRatio); + if (value < 0.0 || value > 1.0) { + throw new IllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); + } + return value; + } catch (NumberFormatException ex) { + throw new IllegalArgumentException( + "Expected a boolean or a value in the interval [0..1] but was: " + "[" + noCFSRatio + "]", + ex + ); + } + } + } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/VersionType.java b/server/src/main/java/org/opensearch/index/VersionType.java index 111aa68152d4d..01cf73ca950d0 100644 --- a/server/src/main/java/org/opensearch/index/VersionType.java +++ b/server/src/main/java/org/opensearch/index/VersionType.java @@ -31,10 +31,11 @@ package org.opensearch.index; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.uid.Versions; import java.io.IOException; import java.util.Locale; @@ -42,8 +43,9 @@ /** * Types of index versions * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum VersionType implements Writeable { INTERNAL((byte) 0) { @Override @@ -244,7 +246,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on write. - * + * <p> * Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true; * * @param currentVersion the current version for the document @@ -265,7 +267,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on read. - * + * <p> * Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true; * * @param currentVersion the current version for the document diff --git a/server/src/main/java/org/opensearch/index/analysis/Analysis.java b/server/src/main/java/org/opensearch/index/analysis/Analysis.java index f4465c9dffac6..b9a219057f326 100644 --- a/server/src/main/java/org/opensearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/opensearch/index/analysis/Analysis.java @@ -87,6 +87,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.regex.Pattern; import static java.util.Collections.unmodifiableMap; @@ -98,6 +99,9 @@ public class Analysis { private static final Logger LOGGER = LogManager.getLogger(Analysis.class); + // Regular expression to support hashtag tokenization + private static final Pattern HASH_TAG_RULE_PATTERN = Pattern.compile("^\\s*#\\s*=>"); + public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) { String value = settings.get("stem_exclusion"); if ("_none_".equals(value)) { @@ -222,16 +226,6 @@ public static <T> List<T> parseWordList(Environment env, Settings settings, Stri return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser); } - public static <T> List<T> parseWordList( - Environment env, - Settings settings, - String settingPrefix, - CustomMappingRuleParser<T> parser, - boolean removeComments - ) { - return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser, removeComments); - } - /** * Parses a list of words from the specified settings or from a file, with the given parser. * @@ -246,17 +240,6 @@ public static <T> List<T> parseWordList( String settingPath, String settingList, CustomMappingRuleParser<T> parser - ) { - return parseWordList(env, settings, settingPath, settingList, parser, true); - } - - public static <T> List<T> parseWordList( - Environment env, - Settings settings, - String settingPath, - String settingList, - CustomMappingRuleParser<T> parser, - boolean removeComments ) { List<String> words = getWordList(env, settings, settingPath, settingList); if (words == null) { @@ -266,7 +249,7 @@ public static <T> List<T> parseWordList( int lineNum = 0; for (String word : words) { lineNum++; - if (removeComments == false || word.startsWith("#") == false) { + if (word.startsWith("#") == false || HASH_TAG_RULE_PATTERN.matcher(word).find() == true) { try { rules.add(parser.apply(word)); } catch (RuntimeException ex) { diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java index 4385680d9eb93..af71b470711a0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisMode.java @@ -32,13 +32,16 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; + /** * Enum representing the mode in which token filters and analyzers are allowed to operate. * While most token filters are allowed both in index and search time analyzers, some are * restricted to be used only at index time, others at search time. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum AnalysisMode { /** diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index cfdf416b2d533..793cdcd5c5c1a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -38,6 +38,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; @@ -67,8 +68,9 @@ * An internal registry for tokenizer, token filter, char filter and analyzer. * This class exists per node and allows to create per-index {@link IndexAnalyzers} via {@link #build(IndexSettings)} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AnalysisRegistry implements Closeable { public static final String INDEX_ANALYSIS_CHAR_FILTER = "index.analysis.char_filter"; public static final String INDEX_ANALYSIS_FILTER = "index.analysis.filter"; @@ -244,7 +246,7 @@ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { /** * Creates a custom analyzer from a collection of {@link NameOrDefinition} specifications for each component - * + * <p> * Callers are responsible for closing the returned Analyzer */ public NamedAnalyzer buildCustomAnalyzer( diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalyzerProvider.java b/server/src/main/java/org/opensearch/index/analysis/AnalyzerProvider.java index 24ddabca16f1e..db0b3e8d288d6 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalyzerProvider.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalyzerProvider.java @@ -33,13 +33,15 @@ package org.opensearch.index.analysis; import org.apache.lucene.analysis.Analyzer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Provider; /** * Base interface for all analyzer providers * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AnalyzerProvider<T extends Analyzer> extends Provider<T> { String name(); diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java b/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java index 0ee51b32aab46..b24d932123b44 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalyzerScope.java @@ -32,11 +32,14 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; + /** * Enum to identify the scope of an analyzer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum AnalyzerScope { INDEX, INDICES, diff --git a/server/src/main/java/org/opensearch/index/analysis/CharFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/CharFilterFactory.java index dc9d3704b1a3c..938932280a52a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/CharFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/CharFilterFactory.java @@ -32,13 +32,16 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; + import java.io.Reader; /** * Base character filter factory behavior used in analysis chain * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface CharFilterFactory { String name(); diff --git a/server/src/main/java/org/opensearch/index/analysis/CustomAnalyzer.java b/server/src/main/java/org/opensearch/index/analysis/CustomAnalyzer.java index 8db5c89d75808..b0f4687c0b8b1 100644 --- a/server/src/main/java/org/opensearch/index/analysis/CustomAnalyzer.java +++ b/server/src/main/java/org/opensearch/index/analysis/CustomAnalyzer.java @@ -35,7 +35,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.Reader; diff --git a/server/src/main/java/org/opensearch/index/analysis/IndexAnalyzers.java b/server/src/main/java/org/opensearch/index/analysis/IndexAnalyzers.java index 4d72ee48cf33a..fe06504c71333 100644 --- a/server/src/main/java/org/opensearch/index/analysis/IndexAnalyzers.java +++ b/server/src/main/java/org/opensearch/index/analysis/IndexAnalyzers.java @@ -31,6 +31,7 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.io.IOUtils; import java.io.Closeable; @@ -52,8 +53,9 @@ * * @see AnalysisRegistry * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexAnalyzers implements Closeable { private final Map<String, NamedAnalyzer> analyzers; private final Map<String, NamedAnalyzer> normalizers; diff --git a/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java b/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java index 0295815e1c048..b425f17a85d6a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java +++ b/server/src/main/java/org/opensearch/index/analysis/NameOrDefinition.java @@ -32,10 +32,11 @@ package org.opensearch.index.analysis; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; @@ -48,8 +49,9 @@ /** * Provides the name and settings for an analyzer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NameOrDefinition implements Writeable, ToXContentFragment { // exactly one of these two members is not null public final String name; diff --git a/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java b/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java index 2a88d375a7df8..07523f9ee6dc5 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java +++ b/server/src/main/java/org/opensearch/index/analysis/NamedAnalyzer.java @@ -34,6 +34,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.mapper.MapperException; import java.util.ArrayList; @@ -44,8 +45,9 @@ * Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated * with a name ({@link #name()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedAnalyzer extends DelegatingAnalyzerWrapper { private final String name; diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java index ab8d23339029c..30fe31105e1d9 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java @@ -36,7 +36,7 @@ /** * A CharFilterFactory that also supports normalization - * + * <p> * The default implementation of {@link #normalize(Reader)} delegates to * {@link #create(Reader)} * diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java index be761aee0d36c..2ed621cdd22b1 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java @@ -36,7 +36,7 @@ /** * A TokenFilterFactory that may be used for normalization - * + * <p> * The default implementation delegates {@link #normalize(TokenStream)} to * {@link #create(TokenStream)}}. * diff --git a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index a65e1898bea0d..8719d127781e0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -102,7 +102,7 @@ public void close() throws IOException { /** * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. - * + * <p> * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to * PreBuiltAnalyzerProviderFactory either in server or analysis-common. * diff --git a/server/src/main/java/org/opensearch/index/analysis/ReloadableCustomAnalyzer.java b/server/src/main/java/org/opensearch/index/analysis/ReloadableCustomAnalyzer.java index ba8996eb9c17b..c1bfca93b90f7 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ReloadableCustomAnalyzer.java +++ b/server/src/main/java/org/opensearch/index/analysis/ReloadableCustomAnalyzer.java @@ -37,7 +37,7 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.util.CloseableThreadLocal; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.Reader; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index e66ae20508dfe..16a1f9a067998 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -155,11 +155,11 @@ public TokenStream create(TokenStream tokenStream) { filter.setTokenSeparator(tokenSeparator); filter.setFillerToken(fillerToken); if (outputUnigrams || (minShingleSize != maxShingleSize)) { - /** - * We disable the graph analysis on this token stream - * because it produces shingles of different size. - * Graph analysis on such token stream is useless and dangerous as it may create too many paths - * since shingles of different size are not aligned in terms of positions. + /* + We disable the graph analysis on this token stream + because it produces shingles of different size. + Graph analysis on such token stream is useless and dangerous as it may create too many paths + since shingles of different size are not aligned in terms of positions. */ filter.addAttribute(DisableGraphAttribute.class); } diff --git a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java index 1b9d781b177ce..af18927de8a98 100644 --- a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java @@ -34,6 +34,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.fetch.subphase.highlight.FastVectorHighlighter; import java.util.List; @@ -42,8 +43,9 @@ /** * Base token filter factory used in analysis chain * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TokenFilterFactory { String name(); @@ -51,7 +53,7 @@ public interface TokenFilterFactory { /** * Normalize a tokenStream for use in multi-term queries - * + * <p> * The default implementation is a no-op */ default TokenStream normalize(TokenStream tokenStream) { @@ -86,7 +88,7 @@ default TokenFilterFactory getChainAwareTokenFilterFactory( /** * Return a version of this TokenFilterFactory appropriate for synonym parsing - * + * <p> * Filters that should not be applied to synonyms (for example, those that produce * multiple tokens) should throw an exception * diff --git a/server/src/main/java/org/opensearch/index/analysis/TokenizerFactory.java b/server/src/main/java/org/opensearch/index/analysis/TokenizerFactory.java index b667175e89b4a..a1a9c4966865d 100644 --- a/server/src/main/java/org/opensearch/index/analysis/TokenizerFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/TokenizerFactory.java @@ -33,14 +33,16 @@ package org.opensearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; +import org.opensearch.common.annotation.PublicApi; import java.util.function.Supplier; /** * Base tokenizer factory used in analysis chain * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TokenizerFactory { String name(); diff --git a/server/src/main/java/org/opensearch/index/cache/IndexCache.java b/server/src/main/java/org/opensearch/index/cache/IndexCache.java index 90462e9919970..1067863fe9675 100644 --- a/server/src/main/java/org/opensearch/index/cache/IndexCache.java +++ b/server/src/main/java/org/opensearch/index/cache/IndexCache.java @@ -32,6 +32,7 @@ package org.opensearch.index.cache; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; @@ -44,8 +45,9 @@ /** * Wrapping class for the index cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexCache extends AbstractIndexComponent implements Closeable { private final QueryCache queryCache; diff --git a/server/src/main/java/org/opensearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/opensearch/index/cache/bitset/BitsetFilterCache.java index f4436fedb25f0..88d20bca7b6d4 100644 --- a/server/src/main/java/org/opensearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/opensearch/index/cache/bitset/BitsetFilterCache.java @@ -48,6 +48,7 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.Cache; import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.cache.RemovalListener; @@ -57,6 +58,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexWarmer; @@ -65,7 +67,6 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardUtils; import org.opensearch.threadpool.ThreadPool; @@ -85,8 +86,9 @@ * and require that it should always be around should use this cache, otherwise the * {@link org.opensearch.index.cache.query.QueryCache} should be used instead. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class BitsetFilterCache extends AbstractIndexComponent implements IndexReader.ClosedListener, @@ -195,8 +197,9 @@ public void onRemoval(RemovalNotification<IndexReader.CacheKey, Cache<Query, Val /** * Value for bitset filter cache * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Value { final BitSet bitset; diff --git a/server/src/main/java/org/opensearch/index/cache/bitset/ShardBitsetFilterCache.java b/server/src/main/java/org/opensearch/index/cache/bitset/ShardBitsetFilterCache.java index 3e7720dabad40..2f770e4307eb8 100644 --- a/server/src/main/java/org/opensearch/index/cache/bitset/ShardBitsetFilterCache.java +++ b/server/src/main/java/org/opensearch/index/cache/bitset/ShardBitsetFilterCache.java @@ -32,16 +32,18 @@ package org.opensearch.index.cache.bitset; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.AbstractIndexShardComponent; -import org.opensearch.core.index.shard.ShardId; /** * Bitset Filter Cache for shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardBitsetFilterCache extends AbstractIndexShardComponent { private final CounterMetric totalMetric = new CounterMetric(); diff --git a/server/src/main/java/org/opensearch/index/cache/query/QueryCache.java b/server/src/main/java/org/opensearch/index/cache/query/QueryCache.java index 384f66fcfc132..2c8188a523ade 100644 --- a/server/src/main/java/org/opensearch/index/cache/query/QueryCache.java +++ b/server/src/main/java/org/opensearch/index/cache/query/QueryCache.java @@ -32,6 +32,7 @@ package org.opensearch.index.cache.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexComponent; import java.io.Closeable; @@ -39,8 +40,9 @@ /** * Base interface for a query cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface QueryCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { void clear(String reason); diff --git a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java index a4f2628b5e5a3..d844e5cbb8897 100644 --- a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java @@ -33,10 +33,11 @@ package org.opensearch.index.cache.query; import org.apache.lucene.search.DocIdSet; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,8 +47,9 @@ /** * Stats for the query cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryCacheStats implements Writeable, ToXContentFragment { private long ramBytesUsed; diff --git a/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java b/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java index ab8b4706e4ebe..6def55fcb985b 100644 --- a/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java +++ b/server/src/main/java/org/opensearch/index/cache/request/RequestCacheStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.cache.request; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,8 +45,9 @@ /** * Request for the query cache statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RequestCacheStats implements Writeable, ToXContentFragment { private long memorySize; diff --git a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java index c3c552b5f732d..bb35a09ccab46 100644 --- a/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java +++ b/server/src/main/java/org/opensearch/index/cache/request/ShardRequestCache.java @@ -33,14 +33,16 @@ package org.opensearch.index.cache.request; import org.apache.lucene.util.Accountable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.core.common.bytes.BytesReference; /** * Tracks the portion of the request cache in use for a particular shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardRequestCache { final CounterMetric evictionsMetric = new CounterMetric(); diff --git a/server/src/main/java/org/opensearch/index/codec/CodecAliases.java b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java new file mode 100644 index 0000000000000..066c092e86db8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Set; + +/** + * This {@link CodecAliases} to provide aliases for the {@link Codec}. + * + * @opensearch.internal + */ +@ExperimentalApi +public interface CodecAliases { + + /** + * Retrieves a set of aliases for an codec. + * + * @return A non-null set of alias strings. If no aliases are available, an empty set should be returned. + */ + default Set<String> aliases() { + return Set.of(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index da64e2954b0d8..67f38536a0d11 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -34,19 +34,15 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec.Mode; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.customcodecs.ZstdCodec; -import org.opensearch.index.codec.customcodecs.ZstdNoDictCodec; import org.opensearch.index.mapper.MapperService; import java.util.Map; -import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; - /** * Since Lucene 4.0 low level index segments are read and written through a * codec layer that allows to use use-case specific file formats & @@ -60,28 +56,27 @@ public class CodecService { private final Map<String, Codec> codecs; public static final String DEFAULT_CODEC = "default"; + public static final String LZ4 = "lz4"; public static final String BEST_COMPRESSION_CODEC = "best_compression"; + public static final String ZLIB = "zlib"; /** * the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public static final String ZSTD_CODEC = "zstd"; - public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; public CodecService(@Nullable MapperService mapperService, IndexSettings indexSettings, Logger logger) { final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder(); assert null != indexSettings; - int compressionLevel = indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene95Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); - codecs.put(ZSTD_CODEC, new ZstdCodec(compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(compressionLevel)); + codecs.put(DEFAULT_CODEC, new Lucene99Codec()); + codecs.put(LZ4, new Lucene99Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene99Codec(Mode.BEST_COMPRESSION)); + codecs.put(ZLIB, new Lucene99Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); + codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); - codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); + codecs.put(ZLIB, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/opensearch/index/codec/CodecSettings.java b/server/src/main/java/org/opensearch/index/codec/CodecSettings.java new file mode 100644 index 0000000000000..2d371dfc190db --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/CodecSettings.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.settings.Setting; + +/** + * This {@link CodecSettings} allows us to manage the settings with {@link Codec}. + * + * @opensearch.internal + */ +public interface CodecSettings { + boolean supports(Setting<?> setting); +} diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index f1b515534bdeb..1ad17f121560c 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,13 +36,19 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; +import org.opensearch.index.codec.fuzzy.FuzzySetFactory; +import org.opensearch.index.codec.fuzzy.FuzzySetParameters; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import java.util.Map; + /** * {@link PerFieldMappingPostingFormatCodec This postings format} is the default * {@link PostingsFormat} for OpenSearch. It utilizes the @@ -53,10 +59,12 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene95Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene99Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); + private final FuzzySetFactory fuzzySetFactory; + private PostingsFormat docIdPostingsFormat; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) @@ -67,6 +75,12 @@ public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService map super(compressionMode); this.mapperService = mapperService; this.logger = logger; + fuzzySetFactory = new FuzzySetFactory( + Map.of( + IdFieldMapper.NAME, + new FuzzySetParameters(() -> mapperService.getIndexSettings().getDocIdFuzzySetFalsePositiveProbability()) + ) + ); } @Override @@ -76,6 +90,11 @@ public PostingsFormat getPostingsFormatForField(String field) { logger.warn("no index mapper found for field: [{}] returning default postings format", field); } else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) { return CompletionFieldMapper.CompletionFieldType.postingsFormat(); + } else if (IdFieldMapper.NAME.equals(field) && mapperService.getIndexSettings().isEnableFuzzySetForDocId()) { + if (docIdPostingsFormat == null) { + docIdPostingsFormat = new FuzzyFilterPostingsFormat(super.getPostingsFormatForField(field), fuzzySetFactory); + } + return docIdPostingsFormat; } return super.getPostingsFormatForField(field); } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java deleted file mode 100644 index 8aa422a47a073..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.FilterCodec; -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; -import org.opensearch.index.mapper.MapperService; - -/** - * - * Extends {@link FilterCodec} to reuse the functionality of Lucene Codec. - * Supports two modes zstd and zstd_no_dict. - * - * @opensearch.internal - */ -public abstract class Lucene95CustomCodec extends FilterCodec { - public static final int DEFAULT_COMPRESSION_LEVEL = 3; - - /** Each mode represents a compression algorithm. */ - public enum Mode { - ZSTD, - ZSTD_NO_DICT - } - - private final StoredFieldsFormat storedFieldsFormat; - - /** - * Creates a new compression codec with the default compression level. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - */ - public Lucene95CustomCodec(Mode mode) { - this(mode, DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new compression codec with the given compression level. We use - * lowercase letters when registering the codec so that we remain consistent with - * the other compression codecs: default, lucene_default, and best_compression. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - * @param compressionLevel The compression level. - */ - public Lucene95CustomCodec(Mode mode, int compressionLevel) { - super("Lucene95CustomCodec", new Lucene95Codec()); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - public Lucene95CustomCodec(Mode mode, int compressionLevel, MapperService mapperService, Logger logger) { - super("Lucene95CustomCodec", new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - @Override - public StoredFieldsFormat storedFieldsFormat() { - return storedFieldsFormat; - } - - @Override - public String toString() { - return getClass().getSimpleName(); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java deleted file mode 100644 index 2816e2907a5f6..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.StoredFieldsReader; -import org.apache.lucene.codecs.StoredFieldsWriter; -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.SegmentInfo; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; - -import java.io.IOException; -import java.util.Objects; - -/** Stored field format used by pluggable codec */ -public class Lucene95CustomStoredFieldsFormat extends StoredFieldsFormat { - - /** A key that we use to map to a mode */ - public static final String MODE_KEY = Lucene95CustomStoredFieldsFormat.class.getSimpleName() + ".mode"; - - private static final int ZSTD_BLOCK_LENGTH = 10 * 48 * 1024; - private static final int ZSTD_MAX_DOCS_PER_BLOCK = 4096; - private static final int ZSTD_BLOCK_SHIFT = 10; - - private final CompressionMode zstdCompressionMode; - private final CompressionMode zstdNoDictCompressionMode; - - private final Lucene95CustomCodec.Mode mode; - private final int compressionLevel; - - /** default constructor */ - public Lucene95CustomStoredFieldsFormat() { - this(Lucene95CustomCodec.Mode.ZSTD, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode) { - this(mode, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance with the specified mode and compression level. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - * @param compressionLevel The compression level for the mode. - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode, int compressionLevel) { - this.mode = Objects.requireNonNull(mode); - this.compressionLevel = compressionLevel; - zstdCompressionMode = new ZstdCompressionMode(compressionLevel); - zstdNoDictCompressionMode = new ZstdNoDictCompressionMode(compressionLevel); - } - - /** - * Returns a {@link StoredFieldsReader} to load stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param fn The fieldInfos. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { - String value = si.getAttribute(MODE_KEY); - if (value == null) { - throw new IllegalStateException("missing value for " + MODE_KEY + " for segment: " + si.name); - } - Lucene95CustomCodec.Mode mode = Lucene95CustomCodec.Mode.valueOf(value); - return impl(mode).fieldsReader(directory, si, fn, context); - } - - /** - * Returns a {@link StoredFieldsReader} to write stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { - String previous = si.putAttribute(MODE_KEY, mode.name()); - if (previous != null && previous.equals(mode.name()) == false) { - throw new IllegalStateException( - "found existing value for " + MODE_KEY + " for segment: " + si.name + " old = " + previous + ", new = " + mode.name() - ); - } - return impl(mode).fieldsWriter(directory, si, context); - } - - StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { - switch (mode) { - case ZSTD: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstd", - zstdCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - case ZSTD_NO_DICT: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstdNoDict", - zstdNoDictCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - default: - throw new AssertionError(); - } - } - - Lucene95CustomCodec.Mode getMode() { - return mode; - } - - public int getCompressionLevel() { - return compressionLevel; - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java deleted file mode 100644 index 04c110fceacdf..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.index.mapper.MapperService; - -/** - * ZstdCodec provides ZSTD compressor using the <a href="https://github.com/luben/zstd-jni">zstd-jni</a> library. - */ -public class ZstdCodec extends Lucene95CustomCodec { - - /** - * Creates a new ZstdCodec instance with the default compression level. - */ - public ZstdCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdCodec(int compressionLevel) { - super(Mode.ZSTD, compressionLevel); - } - - public ZstdCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java deleted file mode 100644 index 7057dac3d6bd2..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdCompressCtx; -import com.github.luben.zstd.ZstdDecompressCtx; -import com.github.luben.zstd.ZstdDictCompress; -import com.github.luben.zstd.ZstdDictDecompress; -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** Zstandard Compression Mode */ -public class ZstdCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DICT_SIZE_FACTOR = 6; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance. - * - * @param compressionLevel The compression level to use. - */ - protected ZstdCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable compress function*/ - private void doCompress(byte[] bytes, int offset, int length, ZstdCompressCtx cctx, DataOutput out) throws IOException { - if (length == 0) { - out.writeVInt(0); - return; - } - final int maxCompressedLength = (int) Zstd.compressBound(length); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = cctx.compressByteArray(compressedBuffer, 0, compressedBuffer.length, bytes, offset, length); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - final int dictLength = length / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR); - final int blockLength = (length - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(dictLength); - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - try (ZstdCompressCtx cctx = new ZstdCompressCtx()) { - cctx.setLevel(compressionLevel); - - // dictionary compression first - doCompress(bytes, offset, dictLength, cctx, out); - cctx.loadDict(new ZstdDictCompress(bytes, offset, dictLength, compressionLevel)); - - for (int start = offset + dictLength; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - doCompress(bytes, start, l, cctx, out); - } - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressedBuffer; - - /** default decompressor */ - public ZstdDecompressor() { - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable decompress function*/ - private void doDecompress(DataInput in, ZstdDecompressCtx dctx, BytesRef bytes, int decompressedLen) throws IOException { - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, compressedLength); - in.readBytes(compressedBuffer, 0, compressedLength); - - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + decompressedLen); - int uncompressed = dctx.decompressByteArray(bytes.bytes, bytes.length, decompressedLen, compressedBuffer, 0, compressedLength); - - if (decompressedLen != uncompressed) { - throw new IllegalStateException(decompressedLen + " " + uncompressed); - } - bytes.length += uncompressed; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - final int dictLength = in.readVInt(); - final int blockLength = in.readVInt(); - bytes.bytes = ArrayUtil.growNoCopy(bytes.bytes, dictLength); - bytes.offset = bytes.length = 0; - - try (ZstdDecompressCtx dctx = new ZstdDecompressCtx()) { - - // decompress dictionary first - doDecompress(in, dctx, bytes, dictLength); - - dctx.loadDict(new ZstdDictDecompress(bytes.bytes, 0, dictLength)); - - int offsetInBlock = dictLength; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - int l = Math.min(blockLength, originalLength - offsetInBlock); - doDecompress(in, dctx, bytes, l); - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted"; - } - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java deleted file mode 100644 index 134f9a14422ad..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.index.mapper.MapperService; - -/** - * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. - */ -public class ZstdNoDictCodec extends Lucene95CustomCodec { - - /** - * Creates a new ZstdNoDictCodec instance with the default compression level. - */ - public ZstdNoDictCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdNoDictCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdNoDictCodec(int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel); - } - - public ZstdNoDictCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java deleted file mode 100644 index 7a1d661550768..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** ZSTD Compression Mode (without a dictionary support). */ -public class ZstdNoDictCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdNoDictCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance with the given compression level. - * - * @param compressionLevel The compression level. - */ - protected ZstdNoDictCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - int blockLength = (length + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - for (int start = offset; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - - if (l == 0) { - out.writeVInt(0); - return; - } - - final int maxCompressedLength = (int) Zstd.compressBound(l); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = (int) Zstd.compressByteArray( - compressedBuffer, - 0, - compressedBuffer.length, - bytes, - start, - l, - compressionLevel - ); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressed; - - /** default decompressor */ - public ZstdDecompressor() { - compressed = BytesRef.EMPTY_BYTES; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - - final int blockLength = in.readVInt(); - bytes.offset = bytes.length = 0; - int offsetInBlock = 0; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - compressed = ArrayUtil.growNoCopy(compressed, compressedLength); - in.readBytes(compressed, 0, compressedLength); - - int l = Math.min(blockLength, originalLength - offsetInBlock); - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + l); - - byte[] output = new byte[l]; - - final int uncompressed = (int) Zstd.decompressByteArray(output, 0, l, compressed, 0, compressedLength); - System.arraycopy(output, 0, bytes.bytes, bytes.length, uncompressed); - - bytes.length += uncompressed; - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted."; - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java deleted file mode 100644 index e996873963b1b..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * A plugin that implements compression codecs with native implementation. - */ -package org.opensearch.index.codec.customcodecs; diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java new file mode 100644 index 0000000000000..09976297361fa --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/AbstractFuzzySet.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.hash.T1ha1; + +import java.io.IOException; +import java.util.Iterator; + +/** + * Encapsulates common behaviour implementation for a fuzzy set. + */ +public abstract class AbstractFuzzySet implements FuzzySet { + + /** + * Add an item to this fuzzy set. + * @param value The value to be added + */ + protected abstract void add(BytesRef value); + + /** + * Add all items to the underlying set. + * Implementations can choose to perform this using an optimized strategy based on the type of set. + * @param valuesIteratorProvider Supplier for an iterator over All values which should be added to the set. + */ + protected void addAll(CheckedSupplier<Iterator<BytesRef>, IOException> valuesIteratorProvider) throws IOException { + Iterator<BytesRef> values = valuesIteratorProvider.get(); + while (values.hasNext()) { + add(values.next()); + } + } + + public Result contains(BytesRef val) { + return containsHash(generateKey(val)); + } + + protected abstract Result containsHash(long hash); + + protected long generateKey(BytesRef value) { + return T1ha1.hash(value.bytes, value.offset, value.length, 0L); + } + + protected void assertAllElementsExist(CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) throws IOException { + Iterator<BytesRef> iter = iteratorProvider.get(); + int cnt = 0; + while (iter.hasNext()) { + BytesRef item = iter.next(); + assert contains(item) == Result.MAYBE + : "Expected Filter to return positive response for elements added to it. Elements matched: " + cnt; + cnt++; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java new file mode 100644 index 0000000000000..b8a8352183ca8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/BloomFilter.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.Assertions; + +import java.io.IOException; +import java.util.Iterator; + +/** + * The code is based on Lucene's implementation of Bloom Filter. + * It represents a subset of the Lucene implementation needed for OpenSearch use cases. + * Since the Lucene implementation is marked experimental, + * this aims to ensure we can provide a bwc implementation during upgrades. + */ +public class BloomFilter extends AbstractFuzzySet { + + private static final Logger logger = LogManager.getLogger(BloomFilter.class); + + // The sizes of BitSet used are all numbers that, when expressed in binary form, + // are all ones. This is to enable fast downsizing from one bitset to another + // by simply ANDing each set index in one bitset with the size of the target bitset + // - this provides a fast modulo of the number. Values previously accumulated in + // a large bitset and then mapped to a smaller set can be looked up using a single + // AND operation of the query term's hash rather than needing to perform a 2-step + // translation of the query term that mirrors the stored content's reprojections. + static final int[] usableBitSetSizes; + + static { + usableBitSetSizes = new int[26]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + usableBitSetSizes[i] = (1 << (i + 6)) - 1; + } + } + + private final LongArrayBackedBitSet bitset; + private final int setSize; + private final int hashCount; + + BloomFilter(long maxDocs, double maxFpp, CheckedSupplier<Iterator<BytesRef>, IOException> fieldIteratorProvider) throws IOException { + int setSize = (int) Math.ceil((maxDocs * Math.log(maxFpp)) / Math.log(1 / Math.pow(2, Math.log(2)))); + setSize = getNearestSetSize(setSize < Integer.MAX_VALUE / 2 ? 2 * setSize : Integer.MAX_VALUE); + int optimalK = (int) Math.round(((double) setSize / maxDocs) * Math.log(2)); + this.bitset = new LongArrayBackedBitSet(setSize); + this.setSize = setSize; + this.hashCount = optimalK; + addAll(fieldIteratorProvider); + if (Assertions.ENABLED) { + assertAllElementsExist(fieldIteratorProvider); + } + logger.debug("Bloom filter created with fpp: {}, setSize: {}, hashCount: {}", maxFpp, setSize, hashCount); + } + + BloomFilter(IndexInput in) throws IOException { + hashCount = in.readInt(); + setSize = in.readInt(); + this.bitset = new LongArrayBackedBitSet(in); + } + + @Override + public void writeTo(DataOutput out) throws IOException { + out.writeInt(hashCount); + out.writeInt(setSize); + bitset.writeTo(out); + } + + private static int getNearestSetSize(int maxNumberOfBits) { + assert maxNumberOfBits > 0 : "Provided size estimate for bloom filter is illegal (<=0) : " + maxNumberOfBits; + int result = usableBitSetSizes[0]; + for (int i = 0; i < usableBitSetSizes.length; i++) { + if (usableBitSetSizes[i] <= maxNumberOfBits) { + result = usableBitSetSizes[i]; + } + } + return result; + } + + @Override + public SetType setType() { + return SetType.BLOOM_FILTER_V1; + } + + @Override + public Result containsHash(long hash) { + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + int bloomPos = (lsb + i * msb); + if (!mayContainValue(bloomPos)) { + return Result.NO; + } + } + return Result.MAYBE; + } + + protected void add(BytesRef value) { + long hash = generateKey(value); + int msb = (int) (hash >>> Integer.SIZE); + int lsb = (int) hash; + for (int i = 0; i < hashCount; i++) { + // Bitmasking using bloomSize is effectively a modulo operation since set sizes are always power of 2 + int bloomPos = (lsb + i * msb) & setSize; + bitset.set(bloomPos); + } + } + + @Override + public boolean isSaturated() { + long numBitsSet = bitset.cardinality(); + // Don't bother saving bitsets if >90% of bits are set - we don't want to + // throw any more memory at this problem. + return (float) numBitsSet / (float) setSize > 0.9f; + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.sizeOf(bitset.ramBytesUsed()); + } + + private boolean mayContainValue(int aHash) { + // Bloom sizes are always base 2 and so can be ANDed for a fast modulo + int pos = aHash & setSize; + return bitset.get(pos); + } + + @Override + public void close() throws IOException { + IOUtils.close(bitset); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java new file mode 100644 index 0000000000000..01f8054fc91be --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormat.java @@ -0,0 +1,492 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Based on code from the Apache Lucene project (https://github.com/apache/lucene) under the Apache License, version 2.0. + * Copyright 2001-2022 The Apache Software Foundation + * Modifications (C) OpenSearch Contributors. All Rights Reserved. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.CompiledAutomaton; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Based on Lucene's BloomFilterPostingsFormat. + * Discussion with Lucene community based on which the decision to have this in OpenSearch code was taken + * is captured here: https://github.com/apache/lucene/issues/12986 + * + * The class deals with persisting the bloom filter through the postings format, + * and reading the field via a bloom filter fronted terms enum (to reduce disk seeks in case of absence of requested values) + * The class should be handled during lucene upgrades. There are bwc tests present to verify the format continues to work after upgrade. + */ + +public final class FuzzyFilterPostingsFormat extends PostingsFormat { + + private static final Logger logger = LogManager.getLogger(FuzzyFilterPostingsFormat.class); + + /** + * This name is stored in headers. If changing the implementation for the format, this name/version should be updated + * so that reads can work as expected. + */ + public static final String FUZZY_FILTER_CODEC_NAME = "FuzzyFilterCodec99"; + + public static final int VERSION_START = 0; + public static final int VERSION_CURRENT = VERSION_START; + + /** Extension of Fuzzy Filters file */ + public static final String FUZZY_FILTER_FILE_EXTENSION = "fzd"; + + private final PostingsFormat delegatePostingsFormat; + private final FuzzySetFactory fuzzySetFactory; + + public FuzzyFilterPostingsFormat(PostingsFormat delegatePostingsFormat, FuzzySetFactory fuzzySetFactory) { + super(FUZZY_FILTER_CODEC_NAME); + this.delegatePostingsFormat = delegatePostingsFormat; + this.fuzzySetFactory = fuzzySetFactory; + } + + // Needed for SPI + public FuzzyFilterPostingsFormat() { + this(null, null); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + if (delegatePostingsFormat == null) { + throw new UnsupportedOperationException( + "Error - " + getClass().getName() + " has been constructed without a choice of PostingsFormat" + ); + } + FieldsConsumer fieldsConsumer = delegatePostingsFormat.fieldsConsumer(state); + return new FuzzyFilteredFieldsConsumer(fieldsConsumer, state); + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new FuzzyFilteredFieldsProducer(state); + } + + static class FuzzyFilteredFieldsProducer extends FieldsProducer { + private FieldsProducer delegateFieldsProducer; + HashMap<String, FuzzySet> fuzzySetsByFieldName = new HashMap<>(); + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsProducer(SegmentReadState state) throws IOException { + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + IndexInput filterIn = null; + boolean success = false; + try { + // Using IndexInput directly instead of ChecksumIndexInput since we want to support RandomAccessInput + filterIn = state.directory.openInput(fuzzyFilterFileName, state.context); + + CodecUtil.checkIndexHeader( + filterIn, + FUZZY_FILTER_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + // Load the delegate postings format + PostingsFormat delegatePostingsFormat = PostingsFormat.forName(filterIn.readString()); + this.delegateFieldsProducer = delegatePostingsFormat.fieldsProducer(state); + int numFilters = filterIn.readInt(); + for (int i = 0; i < numFilters; i++) { + int fieldNum = filterIn.readInt(); + FuzzySet set = FuzzySetFactory.deserializeFuzzySet(filterIn); + closeables.add(set); + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(fieldNum); + fuzzySetsByFieldName.put(fieldInfo.name, set); + } + CodecUtil.retrieveChecksum(filterIn); + + // Can we disable it if we foresee performance issues? + CodecUtil.checksumEntireFile(filterIn); + success = true; + closeables.add(filterIn); + } finally { + if (!success) { + IOUtils.closeWhileHandlingException(filterIn, delegateFieldsProducer); + } + } + } + + @Override + public Iterator<String> iterator() { + return delegateFieldsProducer.iterator(); + } + + @Override + public void close() throws IOException { + // Why closing here? + IOUtils.closeWhileHandlingException(closeables); + delegateFieldsProducer.close(); + } + + @Override + public Terms terms(String field) throws IOException { + FuzzySet filter = fuzzySetsByFieldName.get(field); + if (filter == null) { + return delegateFieldsProducer.terms(field); + } else { + Terms result = delegateFieldsProducer.terms(field); + if (result == null) { + return null; + } + return new FuzzyFilteredTerms(result, filter); + } + } + + @Override + public int size() { + return delegateFieldsProducer.size(); + } + + static class FuzzyFilteredTerms extends Terms { + private Terms delegateTerms; + private FuzzySet filter; + + public FuzzyFilteredTerms(Terms terms, FuzzySet filter) { + this.delegateTerms = terms; + this.filter = filter; + } + + @Override + public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException { + return delegateTerms.intersect(compiled, startTerm); + } + + @Override + public TermsEnum iterator() throws IOException { + return new FilterAppliedTermsEnum(delegateTerms, filter); + } + + @Override + public long size() throws IOException { + return delegateTerms.size(); + } + + @Override + public long getSumTotalTermFreq() throws IOException { + return delegateTerms.getSumTotalTermFreq(); + } + + @Override + public long getSumDocFreq() throws IOException { + return delegateTerms.getSumDocFreq(); + } + + @Override + public int getDocCount() throws IOException { + return delegateTerms.getDocCount(); + } + + @Override + public boolean hasFreqs() { + return delegateTerms.hasFreqs(); + } + + @Override + public boolean hasOffsets() { + return delegateTerms.hasOffsets(); + } + + @Override + public boolean hasPositions() { + return delegateTerms.hasPositions(); + } + + @Override + public boolean hasPayloads() { + return delegateTerms.hasPayloads(); + } + + @Override + public BytesRef getMin() throws IOException { + return delegateTerms.getMin(); + } + + @Override + public BytesRef getMax() throws IOException { + return delegateTerms.getMax(); + } + } + + static final class FilterAppliedTermsEnum extends BaseTermsEnum { + + private Terms delegateTerms; + private TermsEnum delegateTermsEnum; + private final FuzzySet filter; + + public FilterAppliedTermsEnum(Terms delegateTerms, FuzzySet filter) throws IOException { + this.delegateTerms = delegateTerms; + this.filter = filter; + } + + void reset(Terms delegateTerms) throws IOException { + this.delegateTerms = delegateTerms; + this.delegateTermsEnum = null; + } + + private TermsEnum delegate() throws IOException { + if (delegateTermsEnum == null) { + /* pull the iterator only if we really need it - + * this can be a relativly heavy operation depending on the + * delegate postings format and the underlying directory + * (clone IndexInput) */ + delegateTermsEnum = delegateTerms.iterator(); + } + return delegateTermsEnum; + } + + @Override + public BytesRef next() throws IOException { + return delegate().next(); + } + + @Override + public boolean seekExact(BytesRef text) throws IOException { + // The magical fail-fast speed up that is the entire point of all of + // this code - save a disk seek if there is a match on an in-memory + // structure + // that may occasionally give a false positive but guaranteed no false + // negatives + if (filter.contains(text) == FuzzySet.Result.NO) { + return false; + } + return delegate().seekExact(text); + } + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + return delegate().seekCeil(text); + } + + @Override + public void seekExact(long ord) throws IOException { + delegate().seekExact(ord); + } + + @Override + public BytesRef term() throws IOException { + return delegate().term(); + } + + @Override + public long ord() throws IOException { + return delegate().ord(); + } + + @Override + public int docFreq() throws IOException { + return delegate().docFreq(); + } + + @Override + public long totalTermFreq() throws IOException { + return delegate().totalTermFreq(); + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return delegate().postings(reuse, flags); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return delegate().impacts(flags); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(filter=" + filter.toString() + ")"; + } + } + + @Override + public void checkIntegrity() throws IOException { + delegateFieldsProducer.checkIntegrity(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(fields=" + fuzzySetsByFieldName.size() + ",delegate=" + delegateFieldsProducer + ")"; + } + } + + class FuzzyFilteredFieldsConsumer extends FieldsConsumer { + private FieldsConsumer delegateFieldsConsumer; + private Map<FieldInfo, FuzzySet> fuzzySets = new HashMap<>(); + private SegmentWriteState state; + private List<Closeable> closeables = new ArrayList<>(); + + public FuzzyFilteredFieldsConsumer(FieldsConsumer fieldsConsumer, SegmentWriteState state) { + this.delegateFieldsConsumer = fieldsConsumer; + this.state = state; + } + + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + + // Delegate must write first: it may have opened files + // on creating the class + // (e.g. Lucene41PostingsConsumer), and write() will + // close them; alternatively, if we delayed pulling + // the fields consumer until here, we could do it + // afterwards: + delegateFieldsConsumer.write(fields, norms); + + for (String field : fields) { + Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field); + FuzzySet fuzzySet = fuzzySetFactory.createFuzzySet(state.segmentInfo.maxDoc(), fieldInfo.name, () -> iterator(terms)); + if (fuzzySet == null) { + break; + } + assert fuzzySets.containsKey(fieldInfo) == false; + closeables.add(fuzzySet); + fuzzySets.put(fieldInfo, fuzzySet); + } + } + + private Iterator<BytesRef> iterator(Terms terms) throws IOException { + TermsEnum termIterator = terms.iterator(); + return new Iterator<>() { + + private BytesRef currentTerm; + private PostingsEnum postingsEnum; + + @Override + public boolean hasNext() { + try { + do { + currentTerm = termIterator.next(); + if (currentTerm == null) { + return false; + } + postingsEnum = termIterator.postings(postingsEnum, 0); + if (postingsEnum.nextDoc() != PostingsEnum.NO_MORE_DOCS) { + return true; + } + } while (true); + } catch (IOException ex) { + throw new IllegalStateException("Cannot read terms: " + termIterator.attributes()); + } + } + + @Override + public BytesRef next() { + return currentTerm; + } + }; + } + + private boolean closed; + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + delegateFieldsConsumer.close(); + + // Now we are done accumulating values for these fields + List<Map.Entry<FieldInfo, FuzzySet>> nonSaturatedSets = new ArrayList<>(); + + for (Map.Entry<FieldInfo, FuzzySet> entry : fuzzySets.entrySet()) { + FuzzySet fuzzySet = entry.getValue(); + if (!fuzzySet.isSaturated()) { + nonSaturatedSets.add(entry); + } + } + String fuzzyFilterFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + FUZZY_FILTER_FILE_EXTENSION + ); + try (IndexOutput fuzzyFilterFileOutput = state.directory.createOutput(fuzzyFilterFileName, state.context)) { + logger.trace( + "Writing fuzzy filter postings with version: {} for segment: {}", + VERSION_CURRENT, + state.segmentInfo.toString() + ); + CodecUtil.writeIndexHeader( + fuzzyFilterFileOutput, + FUZZY_FILTER_CODEC_NAME, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + + // remember the name of the postings format we will delegate to + fuzzyFilterFileOutput.writeString(delegatePostingsFormat.getName()); + + // First field in the output file is the number of fields+sets saved + fuzzyFilterFileOutput.writeInt(nonSaturatedSets.size()); + for (Map.Entry<FieldInfo, FuzzySet> entry : nonSaturatedSets) { + FieldInfo fieldInfo = entry.getKey(); + FuzzySet fuzzySet = entry.getValue(); + saveAppropriatelySizedFuzzySet(fuzzyFilterFileOutput, fuzzySet, fieldInfo); + } + CodecUtil.writeFooter(fuzzyFilterFileOutput); + } + // We are done with large bitsets so no need to keep them hanging around + fuzzySets.clear(); + IOUtils.closeWhileHandlingException(closeables); + } + + private void saveAppropriatelySizedFuzzySet(IndexOutput fileOutput, FuzzySet fuzzySet, FieldInfo fieldInfo) throws IOException { + fileOutput.writeInt(fieldInfo.number); + fileOutput.writeString(fuzzySet.setType().getSetName()); + fuzzySet.writeTo(fileOutput); + } + } + + @Override + public String toString() { + return "FuzzyFilterPostingsFormat(" + delegatePostingsFormat + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java new file mode 100644 index 0000000000000..df443ffbca33d --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySet.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedFunction; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Fuzzy Filter interface + */ +public interface FuzzySet extends Accountable, Closeable { + + /** + * Name used for a codec to be aware of what fuzzy set has been used. + */ + SetType setType(); + + /** + * @param value the item whose membership needs to be checked. + */ + Result contains(BytesRef value); + + boolean isSaturated(); + + void writeTo(DataOutput out) throws IOException; + + /** + * Enum to represent result of membership check on a fuzzy set. + */ + enum Result { + /** + * A definite no for the set membership of an item. + */ + NO, + + /** + * Fuzzy sets cannot guarantee that a given item is present in the set or not due the data being stored in + * a lossy format (e.g. fingerprint, hash). + * Hence, we return a response denoting that the item maybe present. + */ + MAYBE + } + + /** + * Enum to declare supported properties and mappings for a fuzzy set implementation. + */ + enum SetType { + BLOOM_FILTER_V1("bloom_filter_v1", BloomFilter::new, List.of("bloom_filter")); + + /** + * Name persisted in postings file. This will be used when reading to determine the bloom filter implementation. + */ + private final String setName; + + /** + * Interface for reading the actual fuzzy set implementation into java object. + */ + private final CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer; + + SetType(String setName, CheckedFunction<IndexInput, ? extends FuzzySet, IOException> deserializer, List<String> aliases) { + if (aliases.size() < 1) { + throw new IllegalArgumentException("Alias list is empty. Could not create Set Type: " + setName); + } + this.setName = setName; + this.deserializer = deserializer; + } + + public String getSetName() { + return setName; + } + + public CheckedFunction<IndexInput, ? extends FuzzySet, IOException> getDeserializer() { + return deserializer; + } + + public static SetType from(String name) { + for (SetType type : SetType.values()) { + if (type.setName.equals(name)) { + return type; + } + } + throw new IllegalArgumentException("There is no implementation for fuzzy set: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java new file mode 100644 index 0000000000000..5d1fd03f099d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetFactory.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.CheckedSupplier; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; + +/** + * Factory class to create fuzzy set. + * Supports bloom filters for now. More sets can be added as required. + */ +public class FuzzySetFactory { + + private final Map<String, FuzzySetParameters> setTypeForField; + + public FuzzySetFactory(Map<String, FuzzySetParameters> setTypeForField) { + this.setTypeForField = setTypeForField; + } + + public FuzzySet createFuzzySet(int maxDocs, String fieldName, CheckedSupplier<Iterator<BytesRef>, IOException> iteratorProvider) + throws IOException { + FuzzySetParameters params = setTypeForField.get(fieldName); + if (params == null) { + throw new IllegalArgumentException("No fuzzy set defined for field: " + fieldName); + } + switch (params.getSetType()) { + case BLOOM_FILTER_V1: + return new BloomFilter(maxDocs, params.getFalsePositiveProbability(), iteratorProvider); + default: + throw new IllegalArgumentException("No Implementation for set type: " + params.getSetType()); + } + } + + public static FuzzySet deserializeFuzzySet(IndexInput in) throws IOException { + FuzzySet.SetType setType = FuzzySet.SetType.from(in.readString()); + return setType.getDeserializer().apply(in); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java new file mode 100644 index 0000000000000..7bb96e7c34f0b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/FuzzySetParameters.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import java.util.function.Supplier; + +/** + * Wrapper for params to create a fuzzy set. + */ +public class FuzzySetParameters { + private final Supplier<Double> falsePositiveProbabilityProvider; + private final FuzzySet.SetType setType; + + public static final double DEFAULT_FALSE_POSITIVE_PROBABILITY = 0.2047d; + + public FuzzySetParameters(Supplier<Double> falsePositiveProbabilityProvider) { + this.falsePositiveProbabilityProvider = falsePositiveProbabilityProvider; + this.setType = FuzzySet.SetType.BLOOM_FILTER_V1; + } + + public double getFalsePositiveProbability() { + return falsePositiveProbabilityProvider.get(); + } + + public FuzzySet.SetType getSetType() { + return setType; + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java new file mode 100644 index 0000000000000..08d6059c1e82e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/IndexInputImmutableLongArray.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.OpenSearchException; +import org.opensearch.common.util.LongArray; + +import java.io.IOException; + +/** + * A Long array backed by RandomAccessInput. + * This implementation supports read operations only. + */ +class IndexInputImmutableLongArray implements LongArray { + + private final RandomAccessInput input; + private final long size; + + IndexInputImmutableLongArray(long size, RandomAccessInput input) { + this.size = size; + this.input = input; + } + + @Override + public void close() {} + + @Override + public long size() { + return size; + } + + @Override + public synchronized long get(long index) { + try { + // Multiplying by 8 since each long is 8 bytes, and we need to get the long value at (index * 8) in the + // RandomAccessInput being accessed. + return input.readLong(index << 3); + } catch (IOException ex) { + throw new OpenSearchException(ex); + } + } + + @Override + public long set(long index, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long increment(long index, long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void fill(long fromIndex, long toIndex, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.shallowSizeOfInstance(IndexInputImmutableLongArray.class); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java new file mode 100644 index 0000000000000..bd4936aeec366 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/LongArrayBackedBitSet.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Accountable; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.LongArray; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A bitset backed by a long-indexed array. + */ +class LongArrayBackedBitSet implements Accountable, Closeable { + + private long underlyingArrayLength = 0L; + private LongArray longArray; + + /** + * Constructor which uses an on heap array. This should be using during construction of the bitset. + * @param capacity The maximum capacity to provision for the bitset. + */ + LongArrayBackedBitSet(long capacity) { + // Since the bitset is backed by a long array, we only need 1 element for every 64 bits in the underlying array. + underlyingArrayLength = (capacity >> 6) + 1L; + this.longArray = BigArrays.NON_RECYCLING_INSTANCE.withCircuitBreaking().newLongArray(underlyingArrayLength); + } + + /** + * Constructor which uses Lucene's IndexInput to read the bitset into a read-only buffer. + * @param in IndexInput containing the serialized bitset. + * @throws IOException + */ + LongArrayBackedBitSet(IndexInput in) throws IOException { + underlyingArrayLength = in.readLong(); + // Multiplying by 8 since the length above is of the long array, so we will have + // 8 times the number of bytes in our stream. + long streamLength = underlyingArrayLength << 3; + this.longArray = new IndexInputImmutableLongArray(underlyingArrayLength, in.randomAccessSlice(in.getFilePointer(), streamLength)); + in.skipBytes(streamLength); + } + + public void writeTo(DataOutput out) throws IOException { + out.writeLong(underlyingArrayLength); + for (int idx = 0; idx < underlyingArrayLength; idx++) { + out.writeLong(longArray.get(idx)); + } + } + + /** + * This is an O(n) operation, and will iterate over all the elements in the underlying long array + * to determine cardinality of the set. + * @return number of set bits in the bitset. + */ + public long cardinality() { + long tot = 0; + for (int i = 0; i < underlyingArrayLength; ++i) { + tot += Long.bitCount(longArray.get(i)); + } + return tot; + } + + /** + * Retrieves whether the bit is set or not at the given index. + * @param index the index to look up for the bit + * @return true if bit is set, false otherwise + */ + public boolean get(long index) { + long i = index >> 6; // div 64 + long val = longArray.get(i); + long bitmask = 1L << index; + return (val & bitmask) != 0; + } + + /** + * Sets the bit at the given index. + * @param index the index to set the bit at. + */ + public void set(long index) { + long wordNum = index >> 6; // div 64 + long bitmask = 1L << index; + long val = longArray.get(wordNum); + longArray.set(wordNum, val | bitmask); + } + + @Override + public long ramBytesUsed() { + return 128L + longArray.ramBytesUsed(); + } + + @Override + public void close() throws IOException { + IOUtils.close(longArray); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java new file mode 100644 index 0000000000000..7aeac68cd192a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/fuzzy/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** classes responsible for handling all fuzzy codecs and operations */ +package org.opensearch.index.codec.fuzzy; diff --git a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesRangeQuery.java b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesRangeQuery.java index 04dd669d60883..234c67cc637f1 100644 --- a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesRangeQuery.java +++ b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesRangeQuery.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -87,12 +86,12 @@ public String toString(String field) { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { if (Long.compareUnsigned(lowerValue, Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG) == 0 && Long.compareUnsigned(upperValue, Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG) == 0) { return new FieldExistsQuery(field); } - return super.rewrite(reader); + return super.rewrite(searcher); } abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException; diff --git a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java new file mode 100644 index 0000000000000..669dbb1e1bfc7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.document; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.LongHashSet; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Objects; + +/** + * The {@link org.apache.lucene.document.SortedNumericDocValuesSetQuery} implementation for unsigned long numeric data type. + * + * @opensearch.internal + */ +public abstract class SortedUnsignedLongDocValuesSetQuery extends Query { + + private final String field; + private final LongHashSet numbers; + + SortedUnsignedLongDocValuesSetQuery(String field, BigInteger[] numbers) { + this.field = Objects.requireNonNull(field); + Arrays.sort(numbers); + this.numbers = new LongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); + } + + @Override + public String toString(String field) { + return new StringBuilder().append(field).append(": ").append(numbers.toString()).toString(); + } + + @Override + public void visit(QueryVisitor visitor) { + if (visitor.acceptField(field)) { + visitor.visitLeaf(this); + } + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + if (numbers.size() == 0) { + return new MatchNoDocsQuery(); + } + return super.rewrite(indexSearcher); + } + + @Override + public boolean equals(Object other) { + if (sameClassAs(other) == false) { + return false; + } + SortedUnsignedLongDocValuesSetQuery that = (SortedUnsignedLongDocValuesSetQuery) other; + return field.equals(that.field) && numbers.equals(that.numbers); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), field, numbers); + } + + abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException; + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new ConstantScoreWeight(this, boost) { + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return DocValues.isCacheable(ctx, field); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + SortedNumericDocValues values = getValues(context.reader(), field); + if (values == null) { + return null; + } + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + final TwoPhaseIterator iterator; + if (singleton != null) { + iterator = new TwoPhaseIterator(singleton) { + @Override + public boolean matches() throws IOException { + long value = singleton.longValue(); + return Long.compareUnsigned(value, numbers.minValue) >= 0 + && Long.compareUnsigned(value, numbers.maxValue) <= 0 + && numbers.contains(value); + } + + @Override + public float matchCost() { + return 5; // 2 comparisions, possible lookup in the set + } + }; + } else { + iterator = new TwoPhaseIterator(values) { + @Override + public boolean matches() throws IOException { + int count = values.docValueCount(); + for (int i = 0; i < count; i++) { + final long value = values.nextValue(); + if (Long.compareUnsigned(value, numbers.minValue) < 0) { + continue; + } else if (Long.compareUnsigned(value, numbers.maxValue) > 0) { + return false; // values are sorted, terminate + } else if (numbers.contains(value)) { + return true; + } + } + return false; + } + + @Override + public float matchCost() { + return 5; // 2 comparisons, possible lookup in the set + } + }; + } + return new ConstantScoreScorer(this, score(), scoreMode, iterator); + } + }; + } + + public static Query newSlowSetQuery(String field, BigInteger... values) { + return new SortedUnsignedLongDocValuesSetQuery(field, values) { + @Override + SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException { + FieldInfo info = reader.getFieldInfos().fieldInfo(field); + if (info == null) { + // Queries have some optimizations when one sub scorer returns null rather + // than a scorer that does not match any documents + return null; + } + return DocValues.getSortedNumeric(reader, field); + } + }; + } + + public static Query newSlowExactQuery(String field, BigInteger value) { + return new SortedUnsignedLongDocValuesRangeQuery(field, value, value) { + @Override + SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException { + FieldInfo info = reader.getFieldInfos().fieldInfo(field); + if (info == null) { + // Queries have some optimizations when one sub scorer returns null rather + // than a scorer that does not match any documents + return null; + } + return DocValues.getSortedNumeric(reader, field); + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/CommitStats.java b/server/src/main/java/org/opensearch/index/engine/CommitStats.java index 2c78f675c5154..b30ce720b2649 100644 --- a/server/src/main/java/org/opensearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/opensearch/index/engine/CommitStats.java @@ -32,11 +32,12 @@ package org.opensearch.index.engine; import org.apache.lucene.index.SegmentInfos; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * a class the returns dynamic information with respect to the last commit point of this shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class CommitStats implements Writeable, ToXContentFragment { private final Map<String, String> userData; diff --git a/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java b/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java index c6cf7e8c1b53f..99f80df376df9 100644 --- a/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java +++ b/server/src/main/java/org/opensearch/index/engine/CompletionStatsCache.java @@ -37,12 +37,12 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.suggest.document.CompletionTerms; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.Nullable; import org.opensearch.common.regex.Regex; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.search.suggest.completion.CompletionStats; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index 326e6aef45b08..c945d082c9a35 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -38,7 +38,10 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; @@ -59,8 +62,10 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -69,11 +74,11 @@ import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.opensearch.common.metrics.CounterMetric; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.Mapping; @@ -84,12 +89,11 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.DocsStats; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogDeletionPolicy; -import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; +import org.opensearch.index.translog.TranslogManager; import org.opensearch.search.suggest.completion.CompletionStats; import java.io.Closeable; @@ -123,8 +127,9 @@ /** * Base OpenSearch Engine class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Engine implements LifecycleAware, Closeable { public static final String SYNC_COMMIT_ID = "sync_id"; // TODO: remove sync_id in 3.0 @@ -134,12 +139,15 @@ public abstract class Engine implements LifecycleAware, Closeable { public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; public static final String SEARCH_SOURCE = "search"; // TODO: Make source of search enum? public static final String CAN_MATCH_SEARCH_SOURCE = "can_match"; + public static final String FORCE_MERGE = "force merge"; + public static final String MERGE_FAILED = "merge failed"; protected final ShardId shardId; protected final Logger logger; protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CounterMetric totalUnreferencedFileCleanUpsPerformed = new CounterMetric(); private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); @@ -262,6 +270,13 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the unreferenced file cleanup count for this engine. + */ + public long unreferencedFileCleanUpsPerformed() { + return totalUnreferencedFileCleanUpsPerformed.count(); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -451,8 +466,9 @@ public Condition newCondition() { * Holds result meta data (e.g. translog location, updated version) * for an executed write {@link Operation} * - * @opensearch.internal + * @opensearch.api **/ + @PublicApi(since = "1.0.0") public abstract static class Result { private final Operation.TYPE operationType; private final Result.Type resultType; @@ -568,8 +584,9 @@ void freeze() { /** * Type of the result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { SUCCESS, FAILURE, @@ -580,8 +597,9 @@ public enum Type { /** * Index result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IndexResult extends Result { private final boolean created; @@ -617,8 +635,9 @@ public boolean isCreated() { /** * The delete result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DeleteResult extends Result { private final boolean found; @@ -654,8 +673,9 @@ public boolean isFound() { /** * A noop result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NoOpResult extends Result { NoOpResult(long term, long seqNo) { @@ -817,8 +837,9 @@ boolean assertSearcherIsWarmedUp(String source, SearcherScope scope) { /** * Scope of the searcher * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum SearcherScope { EXTERNAL, INTERNAL @@ -950,6 +971,10 @@ protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegm } } + boolean shouldCleanupUnreferencedFiles() { + return engineConfig.getIndexSettings().shouldCleanupUnreferencedFiles(); + } + private Map<String, Long> getSegmentFileSizes(SegmentReader segmentReader) { Directory directory = null; SegmentCommitInfo segmentCommitInfo = segmentReader.getSegmentInfo(); @@ -1291,6 +1316,15 @@ public void failEngine(String reason, @Nullable Exception failure) { ); } } + + // If cleanup of unreferenced flag is enabled and force merge or regular merge failed due to IOException, + // clean all unreferenced files on best effort basis created during failed merge and reset the + // shard state back to last Lucene Commit. + if (shouldCleanupUnreferencedFiles() && isMergeFailureDueToIOException(failure, reason)) { + logger.info("Cleaning up unreferenced files as merge failed due to: {}", reason); + cleanUpUnreferencedFiles(); + } + eventListener.onFailedEngine(reason, failure); } } catch (Exception inner) { @@ -1309,6 +1343,34 @@ public void failEngine(String reason, @Nullable Exception failure) { } } + /** + * Cleanup all unreferenced files generated during failed segment merge. This resets shard state to last Lucene + * commit. + */ + private void cleanUpUnreferencedFiles() { + try ( + IndexWriter writer = new IndexWriter( + store.directory(), + new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setCommitOnClose(false) + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND) + ) + ) { + // do nothing except increasing metric count and close this will kick off IndexFileDeleter which will + // remove all unreferenced files + totalUnreferencedFileCleanUpsPerformed.inc(); + } catch (Exception ex) { + logger.error("Error while deleting unreferenced file ", ex); + } + } + + /** Check whether the merge failure happened due to IOException. */ + private boolean isMergeFailureDueToIOException(Exception failure, String reason) { + return (reason.equals(FORCE_MERGE) || reason.equals(MERGE_FAILED)) + && ExceptionsHelper.unwrap(failure, IOException.class) instanceof IOException; + } + /** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Exception e) { if (Lucene.isCorruptionException(e)) { @@ -1321,8 +1383,9 @@ protected boolean maybeFailEngine(String source, Exception e) { /** * Event listener for the engine * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface EventListener { /** * Called when a fatal exception occurred @@ -1333,8 +1396,9 @@ default void onFailedEngine(String reason, @Nullable Exception e) {} /** * Supplier for the searcher * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class SearcherSupplier implements Releasable { private final Function<Searcher, Searcher> wrapper; private final AtomicBoolean released = new AtomicBoolean(false); @@ -1368,8 +1432,9 @@ public final void close() { /** * The engine searcher * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Searcher extends IndexSearcher implements Releasable { private final String source; private final Closeable onClose; @@ -1427,8 +1492,9 @@ public abstract static class Operation { /** * type of operation (index, delete), subclasses use static types * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum TYPE { INDEX, DELETE, @@ -1466,8 +1532,9 @@ public Operation(Term uid, long seqNo, long primaryTerm, long version, VersionTy /** * Origin of the operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Origin { PRIMARY, REPLICA, @@ -1525,8 +1592,9 @@ public long startTime() { /** * Index operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Index extends Operation { private final ParsedDocument doc; @@ -1643,8 +1711,9 @@ public long getIfPrimaryTerm() { /** * Delete operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Delete extends Operation { private final String id; @@ -1731,8 +1800,9 @@ public long getIfPrimaryTerm() { /** * noop operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NoOp extends Operation { private final String reason; @@ -1781,8 +1851,9 @@ public int estimatedSizeInBytes() { /** * Get operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Get { private final boolean realtime; private final Term uid; @@ -1857,8 +1928,9 @@ public long getIfPrimaryTerm() { /** * The Get result * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class GetResult implements Releasable { private final boolean exists; private final long version; @@ -1980,8 +2052,9 @@ public long getLastWriteNanos() { * * @see EngineConfig#getWarmer() * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Warmer { /** * Called once a new top-level reader is opened. diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 71bd64426161e..bf3e10d684c94 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -41,28 +41,32 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.similarities.Similarity; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.MemorySizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.RetentionLeases; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndexingMemoryController; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.threadpool.ThreadPool; import java.util.Comparator; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.function.BooleanSupplier; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -72,8 +76,9 @@ * Once {@link Engine} has been created with this object, changes to this * object will affect the {@link Engine} instance. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class EngineConfig { private final ShardId shardId; private final IndexSettings indexSettings; @@ -103,7 +108,7 @@ public final class EngineConfig { private final LongSupplier globalCheckpointSupplier; private final Supplier<RetentionLeases> retentionLeasesSupplier; private final boolean isReadOnlyReplica; - private final BooleanSupplier primaryModeSupplier; + private final BooleanSupplier startedPrimarySupplier; private final Comparator<LeafReader> leafSorter; /** @@ -128,18 +133,29 @@ public Supplier<RetentionLeases> retentionLeasesSupplier() { public static final Setting<String> INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { switch (s) { case "default": + case "lz4": case "best_compression": - case "zstd": - case "zstd_no_dict": + case "zlib": case "lucene_default": return s; default: - if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones - throw new IllegalArgumentException( - "unknown value for [index.codec] must be one of [default, best_compression, zstd, zstd_no_dict] but was: " + s - ); + if (Codec.availableCodecs().contains(s)) { + return s; } - return s; + + for (String codecName : Codec.availableCodecs()) { + Codec codec = Codec.forName(codecName); + if (codec instanceof CodecAliases) { + CodecAliases codecWithAlias = (CodecAliases) codec; + if (codecWithAlias.aliases().contains(s)) { + return s; + } + } + } + + throw new IllegalArgumentException( + "unknown value for [index.codec] must be one of [default, lz4, best_compression, zlib] but was: " + s + ); } }, Property.IndexScope, Property.NodeScope); @@ -148,13 +164,63 @@ public Supplier<RetentionLeases> retentionLeasesSupplier() { * Compression Level gives a trade-off between compression ratio and speed. The higher compression level results in higher compression ratio but slower compression and decompression speeds. * This setting is <b>not</b> realtime updateable. */ - public static final Setting<Integer> INDEX_CODEC_COMPRESSION_LEVEL_SETTING = Setting.intSetting( + + public static final Setting<Integer> INDEX_CODEC_COMPRESSION_LEVEL_SETTING = new Setting<>( "index.codec.compression_level", - 3, - 1, - 6, + Integer.toString(3), + new Setting.IntegerParser(1, 6, "index.codec.compression_level", false), Property.IndexScope - ); + ) { + @Override + public Set<SettingDependency> getSettingsDependencies(String key) { + return Set.of(new SettingDependency() { + @Override + public Setting<String> getSetting() { + return INDEX_CODEC_SETTING; + } + + @Override + public void validate(String key, Object value, Object dependency) { + if (!(dependency instanceof String)) { + throw new IllegalArgumentException("Codec should be of string type."); + } + doValidateCodecSettings((String) dependency); + } + }); + } + }; + + private static void doValidateCodecSettings(final String codec) { + switch (codec) { + case "best_compression": + case "zlib": + case "lucene_default": + case "default": + case "lz4": + break; + default: + if (Codec.availableCodecs().contains(codec)) { + Codec luceneCodec = Codec.forName(codec); + if (luceneCodec instanceof CodecSettings + && ((CodecSettings) luceneCodec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)) { + return; + } + } + for (String codecName : Codec.availableCodecs()) { + Codec availableCodec = Codec.forName(codecName); + if (availableCodec instanceof CodecAliases) { + CodecAliases availableCodecWithAlias = (CodecAliases) availableCodec; + if (availableCodecWithAlias.aliases().contains(codec)) { + if (availableCodec instanceof CodecSettings + && ((CodecSettings) availableCodec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)) { + return; + } + } + } + } + } + throw new IllegalArgumentException("Compression level cannot be set for the " + codec + " codec."); + } /** * Configures an index to optimize documents with auto generated ids for append only. If this setting is updated from <code>false</code> @@ -192,6 +258,7 @@ private EngineConfig(Builder builder) { this.codecService = builder.codecService; this.eventListener = builder.eventListener; codecName = builder.indexSettings.getValue(INDEX_CODEC_SETTING); + // We need to make the indexing buffer for this shard at least as large // as the amount of memory that is available for all engines on the // local node so that decisions to flush segments to disk are made by @@ -220,7 +287,7 @@ private EngineConfig(Builder builder) { this.primaryTermSupplier = builder.primaryTermSupplier; this.tombstoneDocSupplier = builder.tombstoneDocSupplier; this.isReadOnlyReplica = builder.isReadOnlyReplica; - this.primaryModeSupplier = builder.primaryModeSupplier; + this.startedPrimarySupplier = builder.startedPrimarySupplier; this.translogFactory = builder.translogFactory; this.leafSorter = builder.leafSorter; } @@ -428,11 +495,11 @@ public boolean isReadOnlyReplica() { } /** - * Returns the underlying primaryModeSupplier. + * Returns the underlying startedPrimarySupplier. * @return the primary mode supplier. */ - public BooleanSupplier getPrimaryModeSupplier() { - return primaryModeSupplier; + public BooleanSupplier getStartedPrimarySupplier() { + return startedPrimarySupplier; } /** @@ -447,8 +514,9 @@ public TranslogFactory getTranslogFactory() { * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface TombstoneDocSupplier { /** * Creates a tombstone document for a delete operation. @@ -509,7 +577,7 @@ public static class Builder { private TombstoneDocSupplier tombstoneDocSupplier; private TranslogDeletionPolicyFactory translogDeletionPolicyFactory; private boolean isReadOnlyReplica; - private BooleanSupplier primaryModeSupplier; + private BooleanSupplier startedPrimarySupplier; private TranslogFactory translogFactory = new InternalTranslogFactory(); Comparator<LeafReader> leafSorter; @@ -633,8 +701,8 @@ public Builder readOnlyReplica(boolean isReadOnlyReplica) { return this; } - public Builder primaryModeSupplier(BooleanSupplier primaryModeSupplier) { - this.primaryModeSupplier = primaryModeSupplier; + public Builder startedPrimarySupplier(BooleanSupplier startedPrimarySupplier) { + this.startedPrimarySupplier = startedPrimarySupplier; return this; } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index 744df13265250..77e2f1c55201d 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -19,18 +19,18 @@ import org.apache.lucene.search.similarities.Similarity; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecServiceConfig; import org.opensearch.index.codec.CodecServiceFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.RetentionLeases; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogFactory; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.threadpool.ThreadPool; @@ -152,7 +152,7 @@ public EngineConfig newEngineConfig( LongSupplier primaryTermSupplier, EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, boolean isReadOnlyReplica, - BooleanSupplier primaryModeSupplier, + BooleanSupplier startedPrimarySupplier, TranslogFactory translogFactory, Comparator<LeafReader> leafSorter ) { @@ -185,7 +185,7 @@ public EngineConfig newEngineConfig( .primaryTermSupplier(primaryTermSupplier) .tombstoneDocSupplier(tombstoneDocSupplier) .readOnlyReplica(isReadOnlyReplica) - .primaryModeSupplier(primaryModeSupplier) + .startedPrimarySupplier(startedPrimarySupplier) .translogFactory(translogFactory) .leafSorter(leafSorter) .build(); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineException.java b/server/src/main/java/org/opensearch/index/engine/EngineException.java index 484bc6c7eea6a..7d039d8922a19 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineException.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineException.java @@ -33,6 +33,7 @@ package org.opensearch.index.engine; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -41,8 +42,9 @@ /** * Exception if there are any errors in the engine * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class EngineException extends OpenSearchException { public EngineException(ShardId shardId, String msg, Object... params) { diff --git a/server/src/main/java/org/opensearch/index/engine/EngineFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineFactory.java index ced02ab3041c9..4e7e69bd3dd53 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineFactory.java @@ -31,12 +31,15 @@ package org.opensearch.index.engine; +import org.opensearch.common.annotation.PublicApi; + /** * Simple Engine Factory * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface EngineFactory { Engine newReadWriteEngine(EngineConfig config); diff --git a/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java b/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java index 803d106a2f25e..c297022f5766d 100644 --- a/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java +++ b/server/src/main/java/org/opensearch/index/engine/IndexVersionValue.java @@ -45,6 +45,7 @@ final class IndexVersionValue extends VersionValue { private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexVersionValue.class); + private static final long TRANSLOG_LOC_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Translog.Location.class); private final Translog.Location translogLocation; @@ -55,7 +56,7 @@ final class IndexVersionValue extends VersionValue { @Override public long ramBytesUsed() { - return RAM_BYTES_USED + RamUsageEstimator.shallowSizeOf(translogLocation); + return RAM_BYTES_USED + (translogLocation == null ? 0L : TRANSLOG_LOC_RAM_BYTES_USED); } @Override diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 6f8b6d449695e..e204656d3f106 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -67,12 +67,12 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.opensearch.ExceptionsHelper; -import org.opensearch.core.Assertions; import org.opensearch.action.index.IndexRequest; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.LoggerInfoStream; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -81,13 +81,14 @@ import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.opensearch.common.metrics.CounterMetric; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.KeyedLock; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.fieldvisitor.IdOnlyFieldVisitor; @@ -103,15 +104,14 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.OpenSearchMergePolicy; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.translog.InternalTranslogManager; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.index.translog.TranslogDeletionPolicy; -import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogException; -import org.opensearch.index.translog.InternalTranslogManager; -import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.listener.CompositeTranslogEventListener; +import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.search.suggest.completion.CompletionStats; import org.opensearch.threadpool.ThreadPool; @@ -292,7 +292,7 @@ public void onFailure(String reason, Exception ex) { new CompositeTranslogEventListener(Arrays.asList(internalTranslogEventListener, translogEventListener), shardId), this::ensureOpen, engineConfig.getTranslogFactory(), - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ); this.translogManager = translogManagerRef; this.softDeletesPolicy = newSoftDeletesPolicy(); @@ -402,7 +402,7 @@ public CompletionStats completionStats(String... fieldNamePatterns) { * The main purpose for this is that if we have external refreshes happening we don't issue extra * refreshes to clear version map memory etc. this can cause excessive segment creation if heavy indexing * is happening and the refresh interval is low (ie. 1 sec) - * + * <p> * This also prevents segment starvation where an internal reader holds on to old segments literally forever * since no indexing is happening and refreshes are only happening to the external reader manager, while with * this specialized implementation an external refresh will immediately be reflected on the internal reader @@ -710,6 +710,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) final OpVsLuceneDocStatus status; VersionValue versionValue = getVersionFromMap(op.uid().bytes()); assert incrementVersionLookup(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); if (versionValue != null) { status = compareOpToVersionMapOnSeqNo(op.id(), op.seqNo(), op.primaryTerm(), versionValue); } else { @@ -722,10 +723,8 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) } else if (op.seqNo() > docAndSeqNo.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.hasProcessed(op.seqNo()) : "local checkpoint tracker is not updated seq_no=" - + op.seqNo() - + " id=" - + op.id(); + assert localCheckpointTracker.hasProcessed(op.seqNo()) || segRepEnabled + : "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; @@ -927,6 +926,7 @@ public IndexResult index(Index index) throws IOException { plan.currentNotFoundOrDeleted ); } + } if (index.origin().isFromTranslog() == false) { final Translog.Location location; @@ -1005,10 +1005,18 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : index.seqNo() + ">=" + maxSeqNoOfUpdatesOrDeletes; plan = IndexingStrategy.optimizedAppendOnly(index.version(), 0); } else { + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = IndexingStrategy.processAsStaleOp(index.version()); + if (segRepEnabled) { + // For segrep based indices, we can't completely rely on localCheckpointTracker + // as the preserved checkpoint may not have all the operations present in lucene + // we don't need to index it again as stale op as it would create multiple documents for same seq no + plan = IndexingStrategy.processButSkipLucene(false, index.version()); + } else { + plan = IndexingStrategy.processAsStaleOp(index.version()); + } } else { plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.version(), 0); } @@ -1404,7 +1412,9 @@ private Exception tryAcquireInFlightDocs(Operation operation, int addingDocs) { final long totalDocs = indexWriter.getPendingNumDocs() + inFlightDocCount.addAndGet(addingDocs); if (totalDocs > maxDocs) { releaseInFlightDocs(addingDocs); - return new IllegalArgumentException("Number of documents in the index can't exceed [" + maxDocs + "]"); + return new IllegalArgumentException( + "Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs + "] documents per shard" + ); } else { return null; } @@ -1442,9 +1452,17 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws // See testRecoveryWithOutOfOrderDelete for an example of peer recovery plan = DeletionStrategy.processButSkipLucene(false, delete.version()); } else { + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(delete.version()); + if (segRepEnabled) { + // For segrep based indices, we can't completely rely on localCheckpointTracker + // as the preserved checkpoint may not have all the operations present in lucene + // we don't need to index it again as stale op as it would create multiple documents for same seq no + plan = DeletionStrategy.processButSkipLucene(false, delete.version()); + } else { + plan = DeletionStrategy.processAsStaleOp(delete.version()); + } } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version(), 0); } @@ -1846,6 +1864,13 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { try { translogManager.rollTranslogGeneration(); logger.trace("starting commit for flush; commitTranslog=true"); + // with Segment Replication we need to hold the latest commit before a new one is created and ensure it is released + // only after the active reader is updated. This ensures that a flush does not wipe out a required commit point file + // while we are + // in refresh listeners. + final GatedCloseable<IndexCommit> latestCommit = engineConfig.getIndexSettings().isSegRepEnabled() + ? acquireLastIndexCommit(false) + : null; commitIndexWriter(indexWriter, translogManager.getTranslogUUID()); logger.trace("finished commit for flush"); @@ -1859,6 +1884,11 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { // we need to refresh in order to clear older version values refresh("version_table_flush", SearcherScope.INTERNAL, true); + + if (latestCommit != null) { + latestCommit.close(); + } + translogManager.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -2010,7 +2040,7 @@ public void forceMerge( throw ex; } catch (Exception e) { try { - maybeFailEngine("force merge", e); + maybeFailEngine(FORCE_MERGE, e); } catch (Exception inner) { e.addSuppressed(inner); } @@ -2118,41 +2148,32 @@ protected SegmentInfos getLastCommittedSegmentInfos() { @Override protected SegmentInfos getLatestSegmentInfos() { - OpenSearchDirectoryReader reader = null; - try { - reader = internalReaderManager.acquire(); - return ((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(); + try (final GatedCloseable<SegmentInfos> snapshot = getSegmentInfosSnapshot()) { + return snapshot.get(); } catch (IOException e) { throw new EngineException(shardId, e.getMessage(), e); - } finally { - try { - internalReaderManager.release(reader); - } catch (IOException e) { - throw new EngineException(shardId, e.getMessage(), e); - } } } /** - * Fetch the latest {@link SegmentInfos} object via {@link #getLatestSegmentInfos()} - * but also increment the ref-count to ensure that these segment files are retained - * until the reference is closed. On close, the ref-count is decremented. + * Fetch the latest {@link SegmentInfos} from the current ReaderManager's active DirectoryReader. + * This method will hold the reader reference until the returned {@link GatedCloseable} is closed. */ @Override public GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() { - final SegmentInfos segmentInfos = getLatestSegmentInfos(); + final OpenSearchDirectoryReader reader; try { - indexWriter.incRefDeleter(segmentInfos); + reader = internalReaderManager.acquire(); + return new GatedCloseable<>(((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(), () -> { + try { + internalReaderManager.release(reader); + } catch (AlreadyClosedException e) { + logger.warn("Engine is already closed.", e); + } + }); } catch (IOException e) { throw new EngineException(shardId, e.getMessage(), e); } - return new GatedCloseable<>(segmentInfos, () -> { - try { - indexWriter.decRefDeleter(segmentInfos); - } catch (AlreadyClosedException e) { - logger.warn("Engine is already closed.", e); - } - }); } @Override @@ -2469,7 +2490,7 @@ protected void doRun() throws Exception { * confidence that the call stack does not contain catch statements that would cause the error that might be thrown * here from being caught and never reaching the uncaught exception handler. */ - failEngine("merge failed", new MergePolicy.MergeException(exc)); + failEngine(MERGE_FAILED, new MergePolicy.MergeException(exc)); } }); } @@ -2768,7 +2789,7 @@ public final long lastRefreshedCheckpoint() { * Returns the current local checkpoint getting refreshed internally. */ public final long currentOngoingRefreshCheckpoint() { - return lastRefreshedCheckpointListener.pendingCheckpoint; + return lastRefreshedCheckpointListener.pendingCheckpoint.get(); } private final Object refreshIfNeededMutex = new Object(); @@ -2788,29 +2809,33 @@ protected final void refreshIfNeeded(String source, long requestingSeqNo) { private final class LastRefreshedCheckpointListener implements ReferenceManager.RefreshListener { final AtomicLong refreshedCheckpoint; - volatile long pendingCheckpoint; + volatile AtomicLong pendingCheckpoint; LastRefreshedCheckpointListener(long initialLocalCheckpoint) { this.refreshedCheckpoint = new AtomicLong(initialLocalCheckpoint); - this.pendingCheckpoint = initialLocalCheckpoint; + this.pendingCheckpoint = new AtomicLong(initialLocalCheckpoint); } @Override public void beforeRefresh() { // all changes until this point should be visible after refresh - pendingCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); + pendingCheckpoint.updateAndGet(curr -> Math.max(curr, localCheckpointTracker.getProcessedCheckpoint())); } @Override public void afterRefresh(boolean didRefresh) { if (didRefresh) { - updateRefreshedCheckpoint(pendingCheckpoint); + updateRefreshedCheckpoint(pendingCheckpoint.get()); } } void updateRefreshedCheckpoint(long checkpoint) { refreshedCheckpoint.updateAndGet(curr -> Math.max(curr, checkpoint)); assert refreshedCheckpoint.get() >= checkpoint : refreshedCheckpoint.get() + " < " + checkpoint; + // This shouldn't be required ideally, but we're also invoking this method from refresh as of now. + // This change is added as safety check to ensure that our checkpoint values are consistent at all times. + pendingCheckpoint.updateAndGet(curr -> Math.max(curr, checkpoint)); + } } diff --git a/server/src/main/java/org/opensearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/opensearch/index/engine/LiveVersionMap.java index 07fcc546d2fb2..87ff449ee74e0 100644 --- a/server/src/main/java/org/opensearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/opensearch/index/engine/LiveVersionMap.java @@ -36,9 +36,9 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.KeyedLock; -import org.opensearch.common.lease.Releasable; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java index 23fe59456887e..00d15478f9866 100644 --- a/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java @@ -47,10 +47,10 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.util.ArrayUtil; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.fieldvisitor.FieldsVisitor; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index b55508b7facd3..ed8dba2f8902d 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -13,31 +13,36 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; import org.apache.lucene.search.ReferenceManager; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogManager; -import org.opensearch.index.translog.WriteOnlyTranslogManager; +import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogException; +import org.opensearch.index.translog.TranslogManager; +import org.opensearch.index.translog.WriteOnlyTranslogManager; import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.search.suggest.completion.CompletionStats; import java.io.Closeable; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; @@ -47,8 +52,9 @@ * is enabled. This Engine does not create an IndexWriter, rather it refreshes a {@link NRTReplicationReaderManager} * with new Segments when received from an external source. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NRTReplicationEngine extends Engine { private volatile SegmentInfos lastCommittedSegmentInfos; @@ -56,9 +62,10 @@ public class NRTReplicationEngine extends Engine { private final CompletionStatsCache completionStatsCache; private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; - private final boolean shouldCommit; + private final Lock flushLock = new ReentrantLock(); + protected final ReplicaFileTracker replicaFileTracker; - private volatile long lastReceivedGen = SequenceNumbers.NO_OPS_PERFORMED; + private volatile long lastReceivedPrimaryGen = SequenceNumbers.NO_OPS_PERFORMED; private static final int SI_COUNTER_INCREMENT = 10; @@ -67,8 +74,14 @@ public NRTReplicationEngine(EngineConfig engineConfig) { store.incRef(); NRTReplicationReaderManager readerManager = null; WriteOnlyTranslogManager translogManagerRef = null; + boolean success = false; try { - lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + this.replicaFileTracker = new ReplicaFileTracker(store::deleteQuiet); + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + // always protect latest commit on disk. + replicaFileTracker.incRef(this.lastCommittedSegmentInfos.files(true)); + // cleanup anything not referenced by the latest infos. + cleanUnreferencedFiles(); readerManager = buildReaderManager(); final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( this.lastCommittedSegmentInfos.getUserData().entrySet() @@ -84,7 +97,7 @@ public NRTReplicationEngine(EngineConfig engineConfig) { for (ReferenceManager.RefreshListener listener : engineConfig.getInternalRefreshListener()) { this.readerManager.addListener(listener); } - final Map<String, String> userData = store.readLastCommittedSegmentsInfo().getUserData(); + final Map<String, String> userData = this.lastCommittedSegmentInfos.getUserData(); final String translogUUID = Objects.requireNonNull(userData.get(Translog.TRANSLOG_UUID_KEY)); translogManagerRef = new WriteOnlyTranslogManager( engineConfig.getTranslogConfig(), @@ -112,21 +125,32 @@ public void onAfterTranslogSync() { }, this, engineConfig.getTranslogFactory(), - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ); this.translogManager = translogManagerRef; - this.shouldCommit = engineConfig.getIndexSettings().isRemoteStoreEnabled() == false; - } catch (IOException e) { - IOUtils.closeWhileHandlingException(store::decRef, readerManager, translogManagerRef); + success = true; + } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(readerManager, translogManagerRef); + if (isClosed.get() == false) { + // failure, we need to dec the store reference + store.decRef(); + } + } } } + public void cleanUnreferencedFiles() throws IOException { + replicaFileTracker.deleteUnreferencedFiles(store.directory().listAll()); + } + private NRTReplicationReaderManager buildReaderManager() throws IOException { return new NRTReplicationReaderManager( OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId), - store::incRefFileDeleter, - store::decRefFileDeleter + replicaFileTracker::incRef, + replicaFileTracker::decRef ); } @@ -142,38 +166,40 @@ public synchronized void updateSegments(final SegmentInfos infos) throws IOExcep final long maxSeqNo = Long.parseLong(infos.userData.get(MAX_SEQ_NO)); final long incomingGeneration = infos.getGeneration(); readerManager.updateSegments(infos); - - // Commit and roll the translog when we receive a different generation than what was last received. - // lower/higher gens are possible from a new primary that was just elected. - if (incomingGeneration != lastReceivedGen) { - commitSegmentInfos(); + // Ensure that we commit and clear the local translog if a new commit has been made on the primary. + // We do not compare against the last local commit gen here because it is possible to receive + // a lower gen from a newly elected primary shard that is behind this shard's last commit gen. + // In that case we still commit into the next local generation. + if (incomingGeneration != this.lastReceivedPrimaryGen) { + flush(false, true); translogManager.getDeletionPolicy().setLocalCheckpointOfSafeCommit(maxSeqNo); translogManager.rollTranslogGeneration(); } - lastReceivedGen = incomingGeneration; + this.lastReceivedPrimaryGen = incomingGeneration; localCheckpointTracker.fastForwardProcessedSeqNo(maxSeqNo); } } /** * Persist the latest live SegmentInfos. - * - * This method creates a commit point from the latest SegmentInfos. It is intended to be used when this shard is about to be promoted as the new primary. - * - * TODO: If this method is invoked while the engine is currently updating segments on its reader, wait for that update to complete so the updated segments are used. - * + * <p> + * This method creates a commit point from the latest SegmentInfos. * * @throws IOException - When there is an IO error committing the SegmentInfos. */ private void commitSegmentInfos(SegmentInfos infos) throws IOException { - if (shouldCommit) { - store.commitSegmentInfos(infos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); - } + // get a reference to the previous commit files so they can be decref'd once a new commit is made. + final Collection<String> previousCommitFiles = getLastCommittedSegmentInfos().files(true); + store.commitSegmentInfos(infos, localCheckpointTracker.getMaxSeqNo(), localCheckpointTracker.getProcessedCheckpoint()); this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + // incref the latest on-disk commit. + replicaFileTracker.incRef(this.lastCommittedSegmentInfos.files(true)); + // decref the prev commit. + replicaFileTracker.decRef(previousCommitFiles); translogManager.syncTranslog(); } - protected void commitSegmentInfos() throws IOException { + private void commitSegmentInfos() throws IOException { commitSegmentInfos(getLatestSegmentInfos()); } @@ -340,7 +366,28 @@ public boolean shouldPeriodicallyFlush() { } @Override - public void flush(boolean force, boolean waitIfOngoing) throws EngineException {} + public void flush(boolean force, boolean waitIfOngoing) throws EngineException { + ensureOpen(); + // readLock is held here to wait/block any concurrent close that acquires the writeLock. + try (final ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + if (flushLock.tryLock() == false) { + if (waitIfOngoing == false) { + return; + } + flushLock.lock(); + } + // we are now locked. + try { + commitSegmentInfos(); + } catch (IOException e) { + maybeFailEngine("flush", e); + throw new FlushFailedEngineException(shardId, e); + } finally { + flushLock.unlock(); + } + } + } @Override public void forceMerge( @@ -354,6 +401,9 @@ public void forceMerge( @Override public GatedCloseable<IndexCommit> acquireLastIndexCommit(boolean flushFirst) throws EngineException { + if (flushFirst) { + flush(false, true); + } try { final IndexCommit indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, store.directory()); return new GatedCloseable<>(indexCommit, () -> {}); @@ -378,27 +428,41 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; try { - // if remote store is enabled, all segments durably persisted - if (shouldCommit) { - final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); - /* - This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied - from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is - used to generate new segment file names. The ideal solution is to identify the counter from previous primary. - */ + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + /* + This is a workaround solution which decreases the chances of conflict on replica nodes when same file is copied + from two different primaries during failover. Increasing counter helps in avoiding this conflict as counter is + used to generate new segment file names. The ideal solution is to identify the counter from previous primary. + This is not required for remote store implementations given on failover the replica re-syncs with the store + during promotion. + */ + if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); + } + try { commitSegmentInfos(latestSegmentInfos); - } else { - store.directory().sync(List.of(store.directory().listAll())); - store.directory().syncMetaData(); + } catch (IOException e) { + // mark the store corrupted unless we are closing as result of engine failure. + // in this case Engine#failShard will handle store corruption. + if (failEngineLock.isHeldByCurrentThread() == false && store.isMarkedCorrupted() == false) { + try { + store.markStoreCorrupted(e); + } catch (IOException ex) { + logger.warn("Unable to mark store corrupted", ex); + } + } } - IOUtils.close(readerManager, translogManager, store::decRef); + IOUtils.close(readerManager, translogManager); } catch (Exception e) { - logger.warn("failed to close engine", e); + logger.error("failed to close engine", e); } finally { - logger.debug("engine closed [{}]", reason); - closedLatch.countDown(); + try { + store.decRef(); + logger.debug("engine closed [{}]", reason); + } finally { + closedLatch.countDown(); + } } } } @@ -445,6 +509,20 @@ protected SegmentInfos getLatestSegmentInfos() { return readerManager.getSegmentInfos(); } + @Override + public synchronized GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() { + // get reference to latest infos + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + // incref all files + try { + final Collection<String> files = latestSegmentInfos.files(false); + replicaFileTracker.incRef(files); + return new GatedCloseable<>(latestSegmentInfos, () -> { replicaFileTracker.decRef(files); }); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + } + protected LocalCheckpointTracker getLocalCheckpointTracker() { return localCheckpointTracker; } diff --git a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java index 5c548df1cbb60..9071b0e7a1eb3 100644 --- a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java @@ -44,13 +44,13 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; +import org.opensearch.index.translog.NoOpTranslogManager; import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogConfig; -import org.opensearch.index.translog.TranslogException; -import org.opensearch.index.translog.NoOpTranslogManager; -import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; import org.opensearch.index.translog.TranslogDeletionPolicy; +import org.opensearch.index.translog.TranslogException; +import org.opensearch.index.translog.TranslogManager; import java.io.IOException; import java.io.UncheckedIOException; @@ -203,7 +203,7 @@ public void trimUnreferencedTranslogFiles() throws TranslogException { engineConfig.getGlobalCheckpointSupplier(), engineConfig.getPrimaryTermSupplier(), seqNo -> {}, - engineConfig.getPrimaryModeSupplier() + engineConfig.getStartedPrimarySupplier() ) ) { translog.trimUnreferencedReaders(); diff --git a/server/src/main/java/org/opensearch/index/engine/OpenSearchConcurrentMergeScheduler.java b/server/src/main/java/org/opensearch/index/engine/OpenSearchConcurrentMergeScheduler.java index ce0f1e85e294d..c57c0090c180b 100644 --- a/server/src/main/java/org/opensearch/index/engine/OpenSearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/opensearch/index/engine/OpenSearchConcurrentMergeScheduler.java @@ -41,15 +41,15 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.merge.MergeStats; import org.opensearch.index.merge.OnGoingMerge; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.Collections; diff --git a/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java b/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java index 06e5ed3539142..e1a8f29612871 100644 --- a/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/OpenSearchReaderManager.java @@ -32,15 +32,14 @@ package org.opensearch.index.engine; -import java.io.IOException; - import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.search.ReferenceManager; - import org.apache.lucene.search.SearcherManager; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import java.io.IOException; + /** * Utility class to safely share {@link OpenSearchDirectoryReader} instances across * multiple threads, while periodically reopening. This class ensures each diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 3d91fb348a066..7ff3145055df8 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -48,11 +48,11 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; -import org.opensearch.index.translog.Translog; -import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.NoOpTranslogManager; +import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogDeletionPolicy; +import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.TranslogStats; import org.opensearch.search.suggest.completion.CompletionStats; import org.opensearch.transport.Transports; @@ -278,7 +278,7 @@ private static TranslogStats translogStats(final EngineConfig config, final Segm config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), seqNo -> {}, - config.getPrimaryModeSupplier() + config.getStartedPrimarySupplier() ) ) { return translog.stats(); diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java new file mode 100644 index 0000000000000..a9cc24abe3c01 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Consumer; + +/** + * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of + * segment files that should be preserved on replicas between replication events. + * <p> + * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java + * + * @opensearch.internal + */ +final class ReplicaFileTracker { + + public static final Logger logger = LogManager.getLogger(ReplicaFileTracker.class); + private final Map<String, Integer> refCounts = new HashMap<>(); + private final Consumer<String> fileDeleter; + private final Set<String> EXCLUDE_FILES = Set.of("write.lock"); + + public ReplicaFileTracker(Consumer<String> fileDeleter) { + this.fileDeleter = fileDeleter; + } + + public synchronized void incRef(Collection<String> fileNames) { + for (String fileName : fileNames) { + refCounts.merge(fileName, 1, Integer::sum); + } + } + + public synchronized int refCount(String file) { + return Optional.ofNullable(refCounts.get(file)).orElse(0); + } + + public synchronized void decRef(Collection<String> fileNames) { + Set<String> toDelete = new HashSet<>(); + for (String fileName : fileNames) { + Integer curCount = refCounts.get(fileName); + assert curCount != null : "fileName=" + fileName; + assert curCount > 0; + if (curCount == 1) { + refCounts.remove(fileName); + toDelete.add(fileName); + } else { + refCounts.put(fileName, curCount - 1); + } + } + if (toDelete.isEmpty() == false) { + delete(toDelete); + } + } + + public void deleteUnreferencedFiles(String... toDelete) { + for (String file : toDelete) { + if (canDelete(file)) { + delete(file); + } + } + } + + private synchronized void delete(Collection<String> toDelete) { + for (String fileName : toDelete) { + delete(fileName); + } + } + + private synchronized void delete(String fileName) { + assert canDelete(fileName); + fileDeleter.accept(fileName); + } + + private synchronized boolean canDelete(String fileName) { + return EXCLUDE_FILES.contains(fileName) == false && refCounts.containsKey(fileName) == false; + } + +} diff --git a/server/src/main/java/org/opensearch/index/engine/SafeCommitInfo.java b/server/src/main/java/org/opensearch/index/engine/SafeCommitInfo.java index fcf1cb186269c..4b54033dc6728 100644 --- a/server/src/main/java/org/opensearch/index/engine/SafeCommitInfo.java +++ b/server/src/main/java/org/opensearch/index/engine/SafeCommitInfo.java @@ -31,13 +31,15 @@ package org.opensearch.index.engine; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.seqno.SequenceNumbers; /** * Information about the safe commit, for making decisions about recoveries. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SafeCommitInfo { public final long localCheckpoint; diff --git a/server/src/main/java/org/opensearch/index/engine/Segment.java b/server/src/main/java/org/opensearch/index/engine/Segment.java index 035d5bb293303..7881abcf58e0c 100644 --- a/server/src/main/java/org/opensearch/index/engine/Segment.java +++ b/server/src/main/java/org/opensearch/index/engine/Segment.java @@ -34,17 +34,18 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortedNumericSelector; +import org.apache.lucene.search.SortedSetSortField; import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Map; @@ -53,8 +54,9 @@ /** * A segment in the engine * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Segment implements Writeable { private String name; diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index dbadd50ebd1a1..34aecfc62b8b2 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -33,12 +33,16 @@ package org.opensearch.index.engine; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.ReplicationStats; +import org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat; +import org.opensearch.index.remote.RemoteSegmentStats; import java.io.IOException; import java.util.Collections; @@ -48,8 +52,9 @@ /** * Tracker for segment stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SegmentsStats implements Writeable, ToXContentFragment { private long count; @@ -58,9 +63,14 @@ public class SegmentsStats implements Writeable, ToXContentFragment { private long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE; private long bitsetMemoryInBytes; private final Map<String, Long> fileSizes; - + private final RemoteSegmentStats remoteSegmentStats; private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L); + /** + * Segment replication statistics. + */ + private final ReplicationStats replicationStats; + /* * A map to provide a best-effort approach describing Lucene index files. * @@ -86,11 +96,14 @@ public class SegmentsStats implements Writeable, ToXContentFragment { Map.entry("tvx", "Term Vector Index"), Map.entry("tvd", "Term Vector Documents"), Map.entry("tvf", "Term Vector Fields"), - Map.entry("liv", "Live Documents") + Map.entry("liv", "Live Documents"), + Map.entry(FuzzyFilterPostingsFormat.FUZZY_FILTER_FILE_EXTENSION, "Fuzzy Filter") ); public SegmentsStats() { fileSizes = new HashMap<>(); + remoteSegmentStats = new RemoteSegmentStats(); + replicationStats = new ReplicationStats(); } public SegmentsStats(StreamInput in) throws IOException { @@ -111,6 +124,13 @@ public SegmentsStats(StreamInput in) throws IOException { bitsetMemoryInBytes = in.readLong(); maxUnsafeAutoIdTimestamp = in.readLong(); fileSizes = in.readMap(StreamInput::readString, StreamInput::readLong); + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + remoteSegmentStats = in.readOptionalWriteable(RemoteSegmentStats::new); + replicationStats = in.readOptionalWriteable(ReplicationStats::new); + } else { + remoteSegmentStats = new RemoteSegmentStats(); + replicationStats = new ReplicationStats(); + } } public void add(long count) { @@ -133,6 +153,14 @@ public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) { this.bitsetMemoryInBytes += bitsetMemoryInBytes; } + public void addRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { + this.remoteSegmentStats.add(remoteSegmentStats); + } + + public void addReplicationStats(ReplicationStats replicationStats) { + this.replicationStats.add(replicationStats); + } + public void addFileSizes(final Map<String, Long> newFileSizes) { newFileSizes.forEach((k, v) -> this.fileSizes.merge(k, v, (a, b) -> { assert a != null; @@ -151,6 +179,8 @@ public void add(SegmentsStats mergeStats) { addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes); addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes); addFileSizes(mergeStats.fileSizes); + addRemoteSegmentStats(mergeStats.remoteSegmentStats); + addReplicationStats(mergeStats.replicationStats); } /** @@ -198,6 +228,15 @@ public Map<String, Long> getFileSizes() { return Collections.unmodifiableMap(this.fileSizes); } + /** Returns remote_store based stats **/ + public RemoteSegmentStats getRemoteSegmentStats() { + return remoteSegmentStats; + } + + public ReplicationStats getReplicationStats() { + return replicationStats; + } + /** * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine. * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs @@ -221,6 +260,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory()); builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); + remoteSegmentStats.toXContent(builder, params); + replicationStats.toXContent(builder, params); builder.startObject(Fields.FILE_SIZES); for (Map.Entry<String, Long> entry : fileSizes.entrySet()) { builder.startObject(entry.getKey()); @@ -287,6 +328,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(bitsetMemoryInBytes); out.writeLong(maxUnsafeAutoIdTimestamp); out.writeMap(this.fileSizes, StreamOutput::writeString, StreamOutput::writeLong); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(remoteSegmentStats); + out.writeOptionalWriteable(replicationStats); + } } public void clearFileSizes() { diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 417cdd5a8f030..dea389bb6a0ff 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -52,7 +52,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.KnnCollector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.set.Sets; @@ -90,6 +90,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo( @@ -108,6 +109,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo( @@ -126,6 +128,7 @@ public final class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); public static Set<String> ALL_FIELD_NAMES = Sets.newHashSet(FAKE_SOURCE_FIELD.name, FAKE_ROUTING_FIELD.name, FAKE_ID_FIELD.name); @@ -271,12 +274,12 @@ public ByteVectorValues getByteVectorValues(String field) throws IOException { } @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, byte[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } @Override - public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, float[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/server/src/main/java/org/opensearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/opensearch/index/engine/VersionConflictEngineException.java index 75cb19a5e7443..7804b8985e94d 100644 --- a/server/src/main/java/org/opensearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/opensearch/index/engine/VersionConflictEngineException.java @@ -32,9 +32,9 @@ package org.opensearch.index.engine; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.seqno.SequenceNumbers; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java index a2a70e280187a..3a2504ce92158 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/AbstractNumericDocValues.java @@ -43,6 +43,9 @@ * aggregations, which only use {@link #advanceExact(int)} and * {@link #longValue()}. * + * In case when optimizations based on point values are used, the {@link #advance(int)} + * and, optionally, {@link #cost()} have to be implemented as well. + * * @opensearch.internal */ public abstract class AbstractNumericDocValues extends NumericDocValues { diff --git a/server/src/main/java/org/opensearch/index/fielddata/DocValueBits.java b/server/src/main/java/org/opensearch/index/fielddata/DocValueBits.java index 18fa26bd10623..cf62688b88b65 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/DocValueBits.java +++ b/server/src/main/java/org/opensearch/index/fielddata/DocValueBits.java @@ -32,13 +32,16 @@ package org.opensearch.index.fielddata; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * Base class for doc value bit sets * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DocValueBits { /** diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java index e09de53dc05f7..6db6bbccacae5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldData.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Numbers; import org.opensearch.common.geo.GeoPoint; @@ -76,6 +77,10 @@ public double doubleValue() throws IOException { throw new UnsupportedOperationException(); } + @Override + public int advance(int target) throws IOException { + return DocIdSetIterator.NO_MORE_DOCS; + } }; } @@ -561,6 +566,10 @@ public boolean advanceExact(int doc) throws IOException { return values.advanceExact(doc); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -591,6 +600,10 @@ public int docValueCount() { return values.docValueCount(); } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } /** @@ -622,6 +635,12 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + docID = values.advance(target); + return docID; + } } /** @@ -683,6 +702,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -715,6 +739,11 @@ public boolean advanceExact(int target) throws IOException { public long longValue() throws IOException { return value.longValue(); } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -742,6 +771,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java index 896039313ea1e..85b435e969bfa 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java @@ -34,10 +34,11 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * Encapsulates heap usage for field data * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldDataStats implements Writeable, ToXContentFragment { private static final String FIELDDATA = "fielddata"; diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java index 36e6a242ecdec..a63e1d418e3ba 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -48,9 +49,10 @@ import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; @@ -64,8 +66,9 @@ * Thread-safe utility class that allows to get per-segment values via the * {@link #load(LeafReaderContext)} method. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexFieldData<FD extends LeafFieldData> { /** @@ -94,6 +97,13 @@ public interface IndexFieldData<FD extends LeafFieldData> { */ SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse); + /** + * Returns the {@link SortField} to use for index sorting where we widen the sort field type to higher or equal bytes. + */ + default SortField wideSortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + return sortField(missingValue, sortMode, nested, reverse); + } + /** * Build a sort implementation specialized for aggregations. */ @@ -141,13 +151,23 @@ public void disableSkipping() { this.enableSkipping = false; } + protected Pruning filterPruning(Pruning pruning) { + if (this.enableSkipping) { + return pruning; + } + return Pruning.NONE; + } + /** * Simple wrapper class around a filter that matches parent documents * and a filter that matches child documents. For every root document R, * R will be in the parent filter and its children documents will be the * documents that are contained in the inner set between the previous * parent + 1, or 0 if there is no previous parent, and R (excluded). + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Nested { private final BitSetProducer rootFilter; @@ -286,8 +306,9 @@ public abstract BucketedSort newBucketedSort( /** * Base builder interface * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Builder { IndexFieldData<?> build(IndexFieldDataCache cache, CircuitBreakerService breakerService); @@ -296,8 +317,9 @@ interface Builder { /** * Base Global field data class * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Global<FD extends LeafFieldData> extends IndexFieldData<FD> { IndexFieldData<FD> loadGlobal(DirectoryReader indexReader); diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataCache.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataCache.java index 72df2453fc7fd..5cdcc3f82879f 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataCache.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataCache.java @@ -35,13 +35,15 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Accountable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; /** * A simple field data cache abstraction on the *index* level. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexFieldDataCache { <FD extends LeafFieldData, IFD extends IndexFieldData<FD>> FD load(LeafReaderContext context, IFD indexFieldData) throws Exception; @@ -62,8 +64,9 @@ <FD extends LeafFieldData, IFD extends IndexFieldData.Global<FD>> IFD load(Direc /** * The listener interface * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Listener { /** diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataService.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataService.java index 0b370893cd90d..98900482176e5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataService.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldDataService.java @@ -36,12 +36,12 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.search.lookup.SearchLookup; diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index ae8ffd8fe6b97..b0ff944d014de 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -42,6 +42,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource; +import org.opensearch.index.fielddata.fieldcomparator.HalfFloatValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.IntValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.LongValuesComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.UnsignedLongValuesComparatorSource; @@ -151,6 +152,25 @@ public final SortField sortField(Object missingValue, MultiValueMode sortMode, N return sortField(getNumericType(), missingValue, sortMode, nested, reverse); } + @Override + public final SortField wideSortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + // This is to support backward compatibility, the minimum number of bytes prior to OpenSearch 2.7 were 16 bytes, + // i.e all sort fields were upcasted to Long/Double with 16 bytes. + // Now from OpenSearch 2.7, the minimum number of bytes for sort field is 8 bytes, so if it comes as SortField INT, + // we need to up cast it to LONG to support backward compatibility info stored in segment info + if (getNumericType().sortFieldType == SortField.Type.INT) { + XFieldComparatorSource source = comparatorSource(NumericType.LONG, missingValue, sortMode, nested); + SortedNumericSelector.Type selectorType = sortMode == MultiValueMode.MAX + ? SortedNumericSelector.Type.MAX + : SortedNumericSelector.Type.MIN; + SortField sortField = new SortedNumericSortField(getFieldName(), SortField.Type.LONG, reverse, selectorType); + sortField.setMissingValue(source.missingObject(missingValue, reverse)); + return sortField; + } + // If already more than INT, up cast not needed. + return sortField(getNumericType(), missingValue, sortMode, nested, reverse); + } + /** * Builds a {@linkplain BucketedSort} for the {@code targetNumericType}, * casting the values if their native type doesn't match. @@ -201,6 +221,8 @@ private XFieldComparatorSource comparatorSource( final XFieldComparatorSource source; switch (targetNumericType) { case HALF_FLOAT: + source = new HalfFloatValuesComparatorSource(this, missingValue, sortMode, nested); + break; case FLOAT: source = new FloatValuesComparatorSource(this, missingValue, sortMode, nested); break; @@ -224,7 +246,7 @@ private XFieldComparatorSource comparatorSource( source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } if (targetNumericType != getNumericType()) { - source.disableSkipping(); // disable skipping logic for caste of sort field + source.disableSkipping(); // disable skipping logic for cast of sort field } return source; } diff --git a/server/src/main/java/org/opensearch/index/fielddata/LeafFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/LeafFieldData.java index 2982363937710..da7179254d70f 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/LeafFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/LeafFieldData.java @@ -33,6 +33,7 @@ package org.opensearch.index.fielddata; import org.apache.lucene.util.Accountable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.index.mapper.DocValueFetcher; import org.opensearch.search.DocValueFormat; @@ -42,8 +43,9 @@ /** * The thread safe {@link org.apache.lucene.index.LeafReader} level cache of the data. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LeafFieldData extends Accountable, Releasable { /** diff --git a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java index 2df4baeb8631b..3090b8e7f5b15 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java @@ -73,7 +73,7 @@ protected MultiGeoPointValues() {} /** * Return the next value associated with the current document. This must not be * called more than {@link #docValueCount()} times. - * + * <p> * Note: the returned {@link GeoPoint} might be shared across invocations. * * @return the next value for the current docID set to {@link #advanceExact(int)}. diff --git a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java index 94ea91849ff12..f69cfacaf35d4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/NumericDoubleValues.java @@ -34,14 +34,16 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.DoubleValues; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; /** * A per-document numeric value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class NumericDoubleValues extends DoubleValues { /** Sole constructor. (For invocation by subclass @@ -69,6 +71,11 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } @@ -93,6 +100,23 @@ public long longValue() throws IOException { public int docID() { return docID; } + + @Override + public int advance(int target) throws IOException { + return NumericDoubleValues.this.advance(target); + } }; } + + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/RamAccountingTermsEnum.java b/server/src/main/java/org/opensearch/index/fielddata/RamAccountingTermsEnum.java index cd858278afaa3..517361e75d3ea 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/RamAccountingTermsEnum.java +++ b/server/src/main/java/org/opensearch/index/fielddata/RamAccountingTermsEnum.java @@ -34,7 +34,7 @@ import org.apache.lucene.index.FilteredTermsEnum; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.index.fielddata.plain.AbstractIndexOrdinalsFieldData; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java index 1d1524e223f00..eb3462743593d 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java @@ -37,16 +37,17 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; -import org.opensearch.script.JodaCompatibleZonedDateTime; import java.io.IOException; import java.math.BigInteger; import java.time.Instant; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.AbstractList; import java.util.Arrays; import java.util.Comparator; @@ -55,13 +56,14 @@ /** * Script level doc values, the assumption is that any implementation will * implement a {@link Longs#getValue getValue} method. - * + * <p> * Implementations should not internally re-use objects for the values that they * return as a single {@link ScriptDocValues} instance can be reused to return * values form multiple documents. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ScriptDocValues<T> extends AbstractList<T> { /** @@ -159,7 +161,7 @@ public int size() { * * @opensearch.internal */ - public static final class Dates extends ScriptDocValues<JodaCompatibleZonedDateTime> { + public static final class Dates extends ScriptDocValues<ZonedDateTime> { private final SortedNumericDocValues in; private final boolean isNanos; @@ -167,7 +169,7 @@ public static final class Dates extends ScriptDocValues<JodaCompatibleZonedDateT /** * Values wrapped in {@link java.time.ZonedDateTime} objects. */ - private JodaCompatibleZonedDateTime[] dates; + private ZonedDateTime[] dates; private int count; public Dates(SortedNumericDocValues in, boolean isNanos) { @@ -179,12 +181,12 @@ public Dates(SortedNumericDocValues in, boolean isNanos) { * Fetch the first field value or 0 millis after epoch if there are no * in. */ - public JodaCompatibleZonedDateTime getValue() { + public ZonedDateTime getValue() { return get(0); } @Override - public JodaCompatibleZonedDateTime get(int index) { + public ZonedDateTime get(int index) { if (count == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " @@ -223,13 +225,13 @@ void refreshArray() throws IOException { } if (dates == null || count > dates.length) { // Happens for the document. We delay allocating dates so we can allocate it with a reasonable size. - dates = new JodaCompatibleZonedDateTime[count]; + dates = new ZonedDateTime[count]; } for (int i = 0; i < count; ++i) { if (isNanos) { - dates[i] = new JodaCompatibleZonedDateTime(DateUtils.toInstant(in.nextValue()), ZoneOffset.UTC); + dates[i] = ZonedDateTime.ofInstant(DateUtils.toInstant(in.nextValue()), ZoneOffset.UTC); } else { - dates[i] = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(in.nextValue()), ZoneOffset.UTC); + dates[i] = ZonedDateTime.ofInstant(Instant.ofEpochMilli(in.nextValue()), ZoneOffset.UTC); } } } @@ -589,11 +591,11 @@ public BytesRef get(int index) { + "Use doc[<field>].size()==0 to check if a document is missing a field!" ); } - /** - * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the - * returned value and the same instance might be used to - * return values from multiple documents. - **/ + /* + We need to make a copy here because {@link BinaryScriptDocValues} might reuse the + returned value and the same instance might be used to + return values from multiple documents. + */ return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java index 138b417571784..d8f49ba3d9c60 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ShardFieldData.java @@ -34,10 +34,11 @@ import org.apache.lucene.util.Accountable; import org.opensearch.common.FieldMemoryStats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.regex.Regex; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.shard.ShardId; import java.util.HashMap; @@ -47,8 +48,9 @@ /** * On heap field data for shards * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardFieldData implements IndexFieldDataCache.Listener { private final CounterMetric evictionsMetric = new CounterMetric(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java index 4ee494ffb30aa..816445bb319f1 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SingletonSortedNumericDoubleValues.java @@ -69,4 +69,8 @@ public double nextValue() throws IOException { return in.doubleValue(); } + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java index 39aca38c331ea..e2739e462dea5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsNumericDocValues.java @@ -74,4 +74,9 @@ public NumericDoubleValues getDoubleValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } + } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java index 150e114d342de..98a44c246f654 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToNumericDoubleValues.java @@ -67,4 +67,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java index 1bae845c9b0d2..279a78ac51adf 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortableLongBitsToSortedNumericDoubleValues.java @@ -72,4 +72,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java index bd66d1d94f33c..ba1b890f1ad1a 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedBinaryDocValues.java @@ -33,6 +33,7 @@ package org.opensearch.index.fielddata; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; @@ -41,8 +42,9 @@ * according to {@link BytesRef#compareTo(BytesRef)}. * There might be dups however. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") // TODO: Should it expose a count (current approach) or return null when there are no more values? public abstract class SortedBinaryDocValues { diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java index 91d28113ae783..be9064751b5f0 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericDoubleValues.java @@ -33,14 +33,16 @@ package org.opensearch.index.fielddata; import org.apache.lucene.index.SortedNumericDocValues; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; /** * Clone of {@link SortedNumericDocValues} for double values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SortedNumericDoubleValues { /** Sole constructor. (For invocation by subclass @@ -68,4 +70,15 @@ protected SortedNumericDoubleValues() {} */ public abstract int docValueCount(); + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * <i>target</i>, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if <i>target</i> is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java index 8d17146760d9e..d9e9dd6a293fd 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToNumericDoubleValues.java @@ -42,4 +42,8 @@ public NumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java index 90b49e19a8954..63c7e6162cc55 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/UnsignedLongToSortedNumericDoubleValues.java @@ -47,4 +47,8 @@ public SortedNumericDocValues getLongValues() { return values; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index 430a1f90ff3a4..4c6eba2f0c7ec 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.TermOrdValComparator; @@ -91,10 +92,10 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx return indexFieldData.load(context).getBytesValues(); } - protected void setScorer(Scorable scorer) {} + protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; @@ -105,7 +106,7 @@ public FieldComparator<?> newComparator(String fieldname, int numHits, boolean e indexFieldData.getFieldName(), sortMissingLast, reversed, - enableSkipping + filterPruning(pruning) ) { @Override @@ -134,9 +135,11 @@ protected SortedDocValues getSortedDocValues(LeafReaderContext context, String f } return new FieldComparator.TermValComparator(numHits, null, sortMissingLast) { + LeafReaderContext leafReaderContext; @Override protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String field) throws IOException { + leafReaderContext = context; final SortedBinaryDocValues values = getValues(context); final BinaryDocValues selectedValues; if (nested == null) { @@ -152,7 +155,7 @@ protected BinaryDocValues getBinaryDocValues(LeafReaderContext context, String f @Override public void setScorer(Scorable scorer) { - BytesRefFieldComparatorSource.this.setScorer(scorer); + BytesRefFieldComparatorSource.this.setScorer(scorer, leafReaderContext); } }; diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 34e86070054c9..fd4fac3877f2f 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.DoubleComparator; @@ -95,16 +96,14 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, doubl } } - protected void setScorer(Scorable scorer) {} + protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final double dMissingValue = (Double) missingObject(missingValue, reversed); - // NOTE: it's important to pass null as a missing value in the constructor so that - // the comparator doesn't check docsWithField since we replace missing values in select() - return new DoubleComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + return new DoubleComparator(numHits, fieldname, dMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new DoubleLeafComparator(context) { @@ -115,7 +114,7 @@ protected NumericDocValues getNumericDocValues(LeafReaderContext context, String @Override public void setScorer(Scorable scorer) { - DoubleValuesComparatorSource.this.setScorer(scorer); + DoubleValuesComparatorSource.this.setScorer(scorer, context); } }; } diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 04a34cd418520..b106ba268bcb9 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.FloatComparator; @@ -91,13 +92,11 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final float fMissingValue = (Float) missingObject(missingValue, reversed); - // NOTE: it's important to pass null as a missing value in the constructor so that - // the comparator doesn't check docsWithField since we replace missing values in select() - return new FloatComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + return new FloatComparator(numHits, fieldname, fMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new FloatLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java new file mode 100644 index 0000000000000..e2e56dcb14fdf --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata.fieldcomparator; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; +import org.apache.lucene.util.BitSet; +import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.NumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.search.comparators.HalfFloatComparator; +import org.opensearch.search.MultiValueMode; + +import java.io.IOException; + +/** + * Comparator source for half_float values. + * + * @opensearch.internal + */ +public class HalfFloatValuesComparatorSource extends FloatValuesComparatorSource { + private final IndexNumericFieldData indexFieldData; + + public HalfFloatValuesComparatorSource( + IndexNumericFieldData indexFieldData, + Object missingValue, + MultiValueMode sortMode, + Nested nested + ) { + super(indexFieldData, missingValue, sortMode, nested); + this.indexFieldData = indexFieldData; + } + + @Override + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { + assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); + + final float fMissingValue = (Float) missingObject(missingValue, reversed); + return new HalfFloatComparator(numHits, fieldname, fMissingValue, reversed, filterPruning(pruning)) { + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + return new HalfFloatLeafComparator(context) { + @Override + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + return HalfFloatValuesComparatorSource.this.getNumericDocValues(context, fMissingValue).getRawFloatValues(); + } + }; + } + }; + } + + private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float missingValue) throws IOException { + final SortedNumericDoubleValues values = indexFieldData.load(context).getDoubleValues(); + if (nested == null) { + return FieldData.replaceMissing(sortMode.select(values), missingValue); + } else { + final BitSet rootDocs = nested.rootDocs(context); + final DocIdSetIterator innerDocs = nested.innerDocs(context); + final int maxChildren = nested.getNestedSort() != null ? nested.getNestedSort().getMaxChildren() : Integer.MAX_VALUE; + return sortMode.select(values, missingValue, rootDocs, innerDocs, context.reader().maxDoc(), maxChildren); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java index d5ea1eaf7263d..8f540cc6ae9d9 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.IntComparator; import org.apache.lucene.util.BitSet; @@ -70,13 +71,11 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, int miss } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final int iMissingValue = (Integer) missingObject(missingValue, reversed); - // NOTE: it's important to pass null as a missing value in the constructor so that - // the comparator doesn't check docsWithField since we replace missing values in select() - return new IntComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + return new IntComparator(numHits, fieldname, iMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new IntLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 43e033dd59716..3666cd8d6dfea 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -37,15 +37,16 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.BitSet; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; @@ -114,13 +115,11 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, long mis } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final long lMissingValue = (Long) missingObject(missingValue, reversed); - // NOTE: it's important to pass null as a missing value in the constructor so that - // the comparator doesn't check docsWithField since we replace missing values in select() - return new LongComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + return new LongComparator(numHits, fieldname, lMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new LongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index be56b50179114..3714561b63e44 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -14,15 +14,16 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.opensearch.common.Nullable; import org.opensearch.common.Numbers; import org.opensearch.common.util.BigArrays; -import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; import org.opensearch.index.search.comparators.UnsignedLongComparator; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; @@ -88,11 +89,11 @@ public Object missingObject(Object missingValue, boolean reversed) { } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final BigInteger ulMissingValue = (BigInteger) missingObject(missingValue, reversed); - return new UnsignedLongComparator(numHits, fieldname, null, reversed, enableSkipping && this.enableSkipping) { + return new UnsignedLongComparator(numHits, fieldname, ulMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new UnsignedLongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index e136e649d088a..06219b69266b5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -39,13 +39,13 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.packed.PackedInts; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexOrdinalsFieldData; import org.opensearch.index.fielddata.LeafOrdinalsFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; -import org.opensearch.indices.breaker.CircuitBreakerService; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractGeoShapeIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractGeoShapeIndexFieldData.java index 2c6aabf04d4ee..53d60d56bcd07 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractGeoShapeIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractGeoShapeIndexFieldData.java @@ -12,10 +12,10 @@ import org.apache.lucene.search.SortField; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.LeafGeoShapeFieldData; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 0b3a5e09064ab..b9f7f3824feb4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.IndexOrdinalsFieldData; import org.opensearch.index.fielddata.LeafOrdinalsFieldData; @@ -48,7 +49,6 @@ import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.ordinals.GlobalOrdinalsBuilder; import org.opensearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLatLonPointIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLatLonPointIndexFieldData.java index 6e9a13074a445..be6d4d2aae3a1 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLatLonPointIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLatLonPointIndexFieldData.java @@ -39,12 +39,12 @@ import org.apache.lucene.search.SortField; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.IndexGeoPointFieldData; import org.opensearch.index.fielddata.LeafGeoPointFieldData; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java index 89c462267a4d2..7b5c70e0ac756 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java @@ -32,8 +32,8 @@ package org.opensearch.index.fielddata.plain; import org.apache.lucene.util.Accountable; -import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.LeafGeoPointFieldData; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java index 0e16dbbd06271..8c319669f53ba 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java @@ -35,8 +35,8 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.Accountable; -import org.opensearch.index.fielddata.LeafOrdinalsFieldData; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.LeafOrdinalsFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryDVLeafFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryDVLeafFieldData.java index dd8235977af8f..ec71f1d5dd9dc 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryDVLeafFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryDVLeafFieldData.java @@ -36,8 +36,8 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReader; import org.apache.lucene.util.Accountable; -import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.ScriptDocValues.Strings; import org.opensearch.index.fielddata.SortedBinaryDocValues; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryIndexFieldData.java index 3fbc605e35e1d..cfa73be8a3670 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/BinaryIndexFieldData.java @@ -36,11 +36,11 @@ import org.apache.lucene.search.SortField; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/BytesBinaryIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/BytesBinaryIndexFieldData.java index e1a08344e68c7..86f5f2d04e14c 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/BytesBinaryIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/BytesBinaryIndexFieldData.java @@ -37,10 +37,10 @@ import org.apache.lucene.search.SortField; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/ConstantIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/ConstantIndexFieldData.java index 2b08498f2b58b..a9eea30dceca5 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/ConstantIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/ConstantIndexFieldData.java @@ -42,6 +42,7 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.AbstractSortedDocValues; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -49,7 +50,6 @@ import org.opensearch.index.fielddata.IndexOrdinalsFieldData; import org.opensearch.index.fielddata.LeafOrdinalsFieldData; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/PagedBytesIndexFieldData.java index 8f6a1e46ef417..bae7130dca4b3 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -44,8 +44,9 @@ import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -55,7 +56,6 @@ import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.ordinals.Ordinals; import org.opensearch.index.fielddata.ordinals.OrdinalsBuilder; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java index 812010b44b654..0019a41e67c02 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedNumericIndexFieldData.java @@ -38,9 +38,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.time.DateUtils; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -53,8 +55,6 @@ import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.index.fielddata.fieldcomparator.LongValuesComparatorSource; import org.opensearch.index.mapper.DocValueFetcher; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; @@ -336,6 +336,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -364,6 +369,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -434,6 +444,11 @@ public double doubleValue() throws IOException { public boolean advanceExact(int doc) throws IOException { return in.advanceExact(doc); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** @@ -462,6 +477,11 @@ public double nextValue() throws IOException { public int docValueCount() { return in.docValueCount(); } + + @Override + public int advance(int target) throws IOException { + return in.advance(target); + } } /** diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java index 801186bbc4379..fe033fa7a3f70 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java @@ -40,13 +40,13 @@ import org.apache.lucene.search.SortedSetSortField; import org.opensearch.common.Nullable; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.LeafOrdinalsFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.ValuesSourceType; @@ -101,9 +101,9 @@ public SortedSetOrdinalsIndexFieldData( @Override public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - /** - * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and - * returns a custom sort field otherwise. + /* + Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + returns a custom sort field otherwise. */ if (nested != null || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) diff --git a/server/src/main/java/org/opensearch/index/fieldvisitor/SingleFieldsVisitor.java b/server/src/main/java/org/opensearch/index/fieldvisitor/SingleFieldsVisitor.java index 69cef51c24421..a7ce7b8f88384 100644 --- a/server/src/main/java/org/opensearch/index/fieldvisitor/SingleFieldsVisitor.java +++ b/server/src/main/java/org/opensearch/index/fieldvisitor/SingleFieldsVisitor.java @@ -33,10 +33,10 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.util.BytesRef; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.Uid; -import org.apache.lucene.util.BytesRef; import java.util.List; diff --git a/server/src/main/java/org/opensearch/index/flush/FlushStats.java b/server/src/main/java/org/opensearch/index/flush/FlushStats.java index 0c05ae162782a..9bce46d1dd9d5 100644 --- a/server/src/main/java/org/opensearch/index/flush/FlushStats.java +++ b/server/src/main/java/org/opensearch/index/flush/FlushStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.flush; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,8 +45,9 @@ /** * Encapsulates statistics for flush * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FlushStats implements Writeable, ToXContentFragment { private long total; diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index 65788b2297da4..c0dd1cd2ecb30 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -34,18 +34,19 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.IgnoredFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceFieldMapper; @@ -68,6 +69,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetResult implements Writeable, Iterable<DocumentField>, ToXContentObject { public static final String _INDEX = "_index"; @@ -206,7 +208,7 @@ public BytesReference sourceRef() { } try { - this.source = CompressorFactory.uncompressIfNeeded(this.source); + this.source = CompressorRegistry.uncompressIfNeeded(this.source); return this.source; } catch (IOException e) { throw new OpenSearchParseException("failed to decompress source", e); @@ -477,6 +479,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/index/get/GetStats.java b/server/src/main/java/org/opensearch/index/get/GetStats.java index 5837ec2bedc2e..a366014fe228e 100644 --- a/server/src/main/java/org/opensearch/index/get/GetStats.java +++ b/server/src/main/java/org/opensearch/index/get/GetStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.get; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,6 +47,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class GetStats implements Writeable, ToXContentFragment { private long existsCount; diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java index b3715e097322d..d4eeb8aae8e24 100644 --- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java @@ -43,7 +43,7 @@ import org.apache.lucene.index.VectorSimilarityFunction; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.uid.Versions; @@ -51,10 +51,11 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; @@ -88,8 +89,9 @@ /** * Gets an index shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardGetService extends AbstractIndexShardComponent { private final MapperService mapperService; private final MeanMetric existsMetric = new MeanMetric(); @@ -299,7 +301,7 @@ private GetResult innerGetLoadFromStoredFields( shardId.getIndexName(), id, source, - XContentHelper.xContentType(source), + MediaTypeRegistry.xContentType(source), fieldVisitor.routing() ); ParsedDocument doc = indexShard.mapperService().documentMapper().parse(sourceToParse); @@ -329,6 +331,7 @@ private GetResult innerGetLoadFromStoredFields( 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); StoredFieldVisitor.Status status = fieldVisitor.needsField(fieldInfo); @@ -378,7 +381,7 @@ private GetResult innerGetLoadFromStoredFields( sourceAsMap = typeMapTuple.v2(); sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes()); try { - source = BytesReference.bytes(XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap)); + source = BytesReference.bytes(MediaTypeRegistry.contentBuilder(sourceContentType).map(sourceAsMap)); } catch (IOException e) { throw new OpenSearchException("Failed to get id [" + id + "] with includes/excludes set", e); } @@ -406,7 +409,7 @@ private GetResult innerGetLoadFromStoredFields( sourceAsMap = typeMapTuple.v2(); sourceAsMap = XContentMapValues.filter(sourceAsMap, fetchSourceContext.includes(), fetchSourceContext.excludes()); try { - source = BytesReference.bytes(XContentFactory.contentBuilder(sourceContentType).map(sourceAsMap)); + source = BytesReference.bytes(MediaTypeRegistry.contentBuilder(sourceContentType).map(sourceAsMap)); } catch (IOException e) { throw new OpenSearchException("Failed to get id [" + id + "] with includes/excludes set", e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java index e0798e74b4f2f..3b6782b34feea 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java @@ -36,15 +36,15 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.opensearch.common.Explicit; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoJsonGeometryFormat; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MapXContentParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.MapXContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; import org.opensearch.search.lookup.SearchLookup; @@ -121,7 +121,7 @@ public abstract static class Parser<Parsed> { /** * Given a parsed value and a format string, formats the value into a plain Java object. - * + * <p> * Supported formats include 'geojson' and 'wkt'. The different formats are defined * as subclasses of {@link org.opensearch.common.geo.GeometryFormat}. */ @@ -129,7 +129,7 @@ public abstract static class Parser<Parsed> { /** * Parses the given value, then formats it according to the 'format' string. - * + * <p> * By default, this method simply parses the value using {@link Parser#parse}, then formats * it with {@link Parser#format}. However some {@link Parser} implementations override this * as they can avoid parsing the value if it is already in the right format. @@ -141,7 +141,7 @@ public Object parseAndFormatObject(Object value, String format) { NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Collections.singletonMap("dummy_field", value), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { parser.nextToken(); // start object diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java index dd91a5cbbc619..1f53490de1ce1 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -35,11 +35,11 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedBiFunction; import org.opensearch.common.Explicit; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeometryFormat; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Geometry; diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 3c2f3136de13a..186e01047dc7d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -33,12 +33,12 @@ import org.apache.lucene.document.FieldType; import org.opensearch.common.Explicit; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.geo.builders.ShapeBuilder.Orientation; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper.DeprecatedParameters; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index f5dc34ab8ac5d..b3112df86bab6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -43,7 +43,7 @@ /** * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. - * + * <p> * This class differs from {@link SourceValueFetcher} in that it directly handles * array values in parsing. Field types should use this class if their corresponding * mapper returns true for {@link FieldMapper#parsesArrayValue()}. diff --git a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java index a55e55111ffa2..040491f775357 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java @@ -38,10 +38,10 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.BytesBinaryIndexFieldData; @@ -242,6 +242,10 @@ protected String contentType() { */ public static class CustomBinaryDocValuesField extends CustomDocValuesField { + // We considered using a TreeSet instead of an ArrayList here. + // Benchmarks show that ArrayList performs much better + // For details, see: https://github.com/opensearch-project/OpenSearch/pull/9426 + // Benchmarks are in CustomBinaryDocValuesFiledBenchmark private final ArrayList<byte[]> bytesList; public CustomBinaryDocValuesField(String name, byte[] bytes) { diff --git a/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java b/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java index 05c5e6f1424b3..7a07df7d217df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java +++ b/server/src/main/java/org/opensearch/index/mapper/BinaryRangeUtil.java @@ -36,8 +36,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.TriFunction; -import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.BytesStreamInput; import java.io.IOException; import java.net.InetAddress; diff --git a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java index c87d4140e0bf1..3c7925809415a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BooleanFieldMapper.java @@ -42,8 +42,8 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -193,7 +193,7 @@ protected Boolean parseSourceValue(Object value) { return (Boolean) value; } else { String textValue = value.toString(); - return Booleans.parseBoolean(textValue.toCharArray(), 0, textValue.length(), false); + return Booleans.parseBooleanStrict(textValue, false); } } }; diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index 83f0fada28467..a9d9f6cb35fcb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -35,7 +35,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.suggest.document.Completion90PostingsFormat; +import org.apache.lucene.search.suggest.document.Completion99PostingsFormat; import org.apache.lucene.search.suggest.document.CompletionAnalyzer; import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery; @@ -44,10 +44,10 @@ import org.apache.lucene.search.suggest.document.SuggestField; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.NumberType; @@ -330,7 +330,7 @@ public ContextMappings getContextMappings() { */ public static synchronized PostingsFormat postingsFormat() { if (postingsFormat == null) { - postingsFormat = new Completion90PostingsFormat(); + postingsFormat = new Completion99PostingsFormat(); } return postingsFormat; } @@ -447,13 +447,13 @@ int getMaxInputLength() { /** * Parses and indexes inputs - * + * <p> * Parsing: * Acceptable format: * "STRING" - interpreted as field value (input) * "ARRAY" - each element can be one of "OBJECT" (see below) * "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT } - * + * <p> * Indexing: * if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)} * else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField} diff --git a/server/src/main/java/org/opensearch/index/mapper/ContentPath.java b/server/src/main/java/org/opensearch/index/mapper/ContentPath.java index 8b0d5e24056d3..63e5731e2681b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ContentPath.java +++ b/server/src/main/java/org/opensearch/index/mapper/ContentPath.java @@ -32,11 +32,14 @@ package org.opensearch.index.mapper; +import org.opensearch.common.annotation.PublicApi; + /** * JSON Path for a document / field * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ContentPath { private static final char DELIMITER = '.'; diff --git a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java index 2776e7515bbf6..fbb67731f581b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java +++ b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java @@ -42,8 +42,7 @@ import java.io.Reader; /** - * Base class for constructing a custom docvalues type - * + * Base class for constructing a custom docvalues type. * used for binary, geo, and range fields * * @opensearch.api diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 1669c14d00c2a..d98e6ea6af83d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -37,9 +37,9 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.PointValues; -import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -52,6 +52,7 @@ import org.opensearch.common.time.DateMathParser; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.LocaleUtils; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; @@ -67,8 +68,8 @@ import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; -import java.util.Arrays; import java.time.ZonedDateTime; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -91,7 +92,21 @@ public final class DateFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "date"; public static final String DATE_NANOS_CONTENT_TYPE = "date_nanos"; - public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + @Deprecated + public static final DateFormatter LEGACY_DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + // TODO remove in 3.0 after backporting + "strict_date_optional_time||epoch_millis" + ); + public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + "strict_date_time_no_millis||strict_date_optional_time||epoch_millis", + "strict_date_optional_time" + ); + + public static DateFormatter getDefaultDateTimeFormatter() { + return FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ? DEFAULT_DATE_TIME_FORMATTER + : LEGACY_DEFAULT_DATE_TIME_FORMATTER; + } /** * Resolution of the date time @@ -223,8 +238,14 @@ public static class Builder extends ParametrizedFieldMapper.Builder { "format", false, m -> toType(m).format, - DEFAULT_DATE_TIME_FORMATTER.pattern() + getDefaultDateTimeFormatter().pattern() ); + private final Parameter<String> printFormat = Parameter.stringParam( + "print_format", + false, + m -> toType(m).printFormat, + getDefaultDateTimeFormatter().printPattern() + ).acceptsNull(); private final Parameter<Locale> locale = new Parameter<>( "locale", false, @@ -253,13 +274,18 @@ public Builder( this.ignoreMalformed = Parameter.boolParam("ignore_malformed", true, m -> toType(m).ignoreMalformed, ignoreMalformedByDefault); if (dateFormatter != null) { this.format.setValue(dateFormatter.pattern()); + this.printFormat.setValue(dateFormatter.printPattern()); this.locale.setValue(dateFormatter.locale()); } } private DateFormatter buildFormatter() { try { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); + if (format.isConfigured() && !printFormat.isConfigured()) { + return DateFormatter.forPattern(format.getValue(), null, !format.isConfigured()).withLocale(locale.getValue()); + } + return DateFormatter.forPattern(format.getValue(), printFormat.getValue(), !format.isConfigured()) + .withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } @@ -267,7 +293,7 @@ private DateFormatter buildFormatter() { @Override protected List<Parameter<?>> getParameters() { - return Arrays.asList(index, docValues, store, format, locale, nullValue, ignoreMalformed, boost, meta); + return Arrays.asList(index, docValues, store, format, printFormat, locale, nullValue, ignoreMalformed, boost, meta); } private Long parseNullValue(DateFieldType fieldType) { @@ -346,7 +372,7 @@ public DateFieldType( } public DateFieldType(String name) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, Resolution.MILLISECONDS, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap()); } public DateFieldType(String name, DateFormatter dateFormatter) { @@ -354,7 +380,7 @@ public DateFieldType(String name, DateFormatter dateFormatter) { } public DateFieldType(String name, Resolution resolution) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, resolution, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap()); } public DateFieldType(String name, Resolution resolution, DateFormatter dateFormatter) { @@ -383,6 +409,16 @@ public long parse(String value) { return resolution.convert(DateFormatters.from(dateTimeFormatter().parse(value), dateTimeFormatter().locale()).toInstant()); } + public long convertNanosToMillis(long nanoSecondsSinceEpoch) { + if (resolution.numericType.equals(NumericType.DATE_NANOSECONDS)) return DateUtils.toMilliSeconds(nanoSecondsSinceEpoch); + return nanoSecondsSinceEpoch; + } + + public long convertRoundedMillisToNanos(long milliSecondsSinceEpoch) { + if (resolution.numericType.equals(NumericType.DATE_NANOSECONDS)) return DateUtils.toNanoSeconds(milliSecondsSinceEpoch); + return milliSecondsSinceEpoch; + } + @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { DateFormatter defaultFormatter = dateTimeFormatter(); @@ -610,6 +646,7 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { private final boolean hasDocValues; private final Locale locale; private final String format; + private final String printFormat; private final boolean ignoreMalformed; private final Long nullValue; private final String nullValueAsString; @@ -633,6 +670,7 @@ private DateFieldMapper( this.hasDocValues = builder.docValues.getValue(); this.locale = builder.locale.getValue(); this.format = builder.format.getValue(); + this.printFormat = builder.printFormat.getValue(); this.ignoreMalformed = builder.ignoreMalformed.getValue(); this.nullValueAsString = builder.nullValue.getValue(); this.nullValue = nullValue; diff --git a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java index bf14af809fbfd..827792cdb1091 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.lookup.SourceLookup; @@ -77,8 +78,9 @@ public List<Object> fetchValues(SourceLookup lookup) throws IOException { /** * Leaf interface * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Leaf { /** * Advance the doc values reader to the provided doc. diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 2461a72a2d041..3ada04b41abd2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -40,14 +40,15 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchGenerationException; -import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.mapper.MapperService.MergeReason; @@ -65,15 +66,17 @@ /** * The OpenSearch DocumentMapper * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocumentMapper implements ToXContentFragment { /** * Builder for the Document Field Mapper * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappers = new LinkedHashMap<>(); @@ -236,7 +239,7 @@ public MappingLookup mappers() { return this.fieldMappers; } - public FieldTypeLookup fieldTypes() { + FieldTypeLookup fieldTypes() { return mappers().fieldTypes(); } @@ -249,13 +252,13 @@ public ParsedDocument parse(SourceToParse source) throws MapperParsingException } public ParsedDocument createDeleteTombstoneDoc(String index, String id) throws MapperParsingException { - final SourceToParse emptySource = new SourceToParse(index, id, new BytesArray("{}"), XContentType.JSON); + final SourceToParse emptySource = new SourceToParse(index, id, new BytesArray("{}"), MediaTypeRegistry.JSON); return documentParser.parseDocument(emptySource, deleteTombstoneMetadataFieldMappers).toTombstone(); } public ParsedDocument createNoopTombstoneDoc(String index, String reason) throws MapperParsingException { final String id = ""; // _id won't be used. - final SourceToParse sourceToParse = new SourceToParse(index, id, new BytesArray("{}"), XContentType.JSON); + final SourceToParse sourceToParse = new SourceToParse(index, id, new BytesArray("{}"), MediaTypeRegistry.JSON); final ParsedDocument parsedDoc = documentParser.parseDocument(sourceToParse, noopTombstoneMetadataFieldMappers).toTombstone(); // Store the reason of a noop as a raw string in the _source field final BytesRef byteRef = new BytesRef(reason); diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperForType.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperForType.java index b26c3f3645433..565817a450f56 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperForType.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperForType.java @@ -32,6 +32,8 @@ package org.opensearch.index.mapper; +import org.opensearch.common.annotation.DeprecatedApi; + /** * Document mapper used for types * @@ -40,6 +42,7 @@ * @deprecated types are being removed */ @Deprecated +@DeprecatedApi(since = "2.0.0") public class DocumentMapperForType { private final DocumentMapper documentMapper; private final Mapping mapping; diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 69775481a3056..fa736f84895eb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -34,12 +34,13 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.time.DateFormatter; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.similarity.SimilarityService; @@ -56,8 +57,9 @@ /** * Parser for a document mapper * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocumentMapperParser { final MapperService mapperService; @@ -119,7 +121,7 @@ public Mapper.TypeParser.ParserContext parserContext(DateFormatter dateFormatter public DocumentMapper parse(@Nullable String type, CompressedXContent source) throws MapperParsingException { Map<String, Object> mapping = null; if (source != null) { - Map<String, Object> root = XContentHelper.convertToMap(source.compressedReference(), true, XContentType.JSON).v2(); + Map<String, Object> root = XContentHelper.convertToMap(source.compressedReference(), true, MediaTypeRegistry.JSON).v2(); Tuple<String, Map<String, Object>> t = extractMapping(type, root); type = t.v1(); mapping = t.v2(); @@ -197,7 +199,7 @@ private static String getRemainingFields(Map<?, ?> map) { /** * Given an optional type name and mapping definition, returns the type and a normalized form of the mappings. - * + * <p> * The provided mapping definition may or may not contain the type name as the root key in the map. This method * attempts to unwrap the mappings, so that they no longer contain a type name at the root. If no type name can * be found, through either the 'type' parameter or by examining the provided mappings, then an exception will be @@ -205,7 +207,6 @@ private static String getRemainingFields(Map<?, ?> map) { * * @param type An optional type name. * @param root The mapping definition. - * * @return A tuple of the form (type, normalized mappings). */ @SuppressWarnings({ "unchecked" }) diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java index 94bc4806ba0e0..2e59d86f9119c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java @@ -43,7 +43,7 @@ * to {@link DynamicKeyFieldMapper#keyedFieldType(String)}, with 'some_key' passed as the * argument. The field mapper is allowed to create a new field type dynamically in order * to handle the search. - * + * <p> * To prevent conflicts between these dynamic sub-keys and multi-fields, any field mappers * implementing this interface should explicitly disallow multi-fields. The constructor makes * sure to passes an empty multi-fields list to help prevent conflicting sub-keys from being diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java index 13150ddc50a51..a415078108eb6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java @@ -37,11 +37,11 @@ /** * A container that supports looking up field types for 'dynamic key' fields ({@link DynamicKeyFieldMapper}). - * + * <p> * Compared to standard fields, 'dynamic key' fields require special handling. Given a field name of the form * 'path_to_field.path_to_key', the container will dynamically return a new {@link MappedFieldType} that is * suitable for performing searches on the sub-key. - * + * <p> * Note: we anticipate that 'flattened' fields will be the only implementation {@link DynamicKeyFieldMapper}. * Flattened object fields live in the 'mapper-flattened' module. * diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/opensearch/index/mapper/DynamicTemplate.java index a20ad43bd70af..da62b20c98563 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicTemplate.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * A template that is dynamic based on field types * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DynamicTemplate implements ToXContentObject { /** @@ -96,8 +98,9 @@ public static MatchType fromString(String value) { /** * The type of a field as detected while parsing a json document. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum XContentFieldType { OBJECT { @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java index 126fcf708487b..ff9cb61b85571 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java @@ -32,8 +32,8 @@ package org.opensearch.index.mapper; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -43,7 +43,7 @@ /** * A mapper for field aliases. - * + * <p> * A field alias has no concrete field mappings of its own, but instead points to another field by * its path. Once defined, an alias can be used in place of the concrete field name in search requests. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index e2f0697a65f8e..4e495c68fd822 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -35,12 +35,13 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.AbstractXContentParser; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.FieldNamesFieldMapper.FieldNamesFieldType; @@ -61,8 +62,9 @@ /** * The base OpenSearch Field Mapper * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class FieldMapper extends Mapper implements Cloneable { public static final Setting<Boolean> IGNORE_MALFORMED_SETTING = Setting.boolSetting( "index.mapping.ignore_malformed", @@ -305,7 +307,7 @@ public void parse(ParseContext context) throws IOException { /** * Parse the field value and populate the fields on {@link ParseContext#doc()}. - * + * <p> * Implementations of this method should ensure that on failing to parse parser.currentToken() must be the * current failing token */ @@ -572,8 +574,9 @@ protected static String indexOptionToString(IndexOptions indexOption) { /** * Multi field implementation used across field mappers * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class MultiFields implements Iterable<Mapper> { public static MultiFields empty() { @@ -702,8 +705,9 @@ public int compare(Mapper o1, Mapper o2) { /** * Represents a list of fields with optional boost factor where the current field should be copied to * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CopyTo { private static final CopyTo EMPTY = new CopyTo(Collections.emptyList()); diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java index 8e1b6f2a3c08b..7a9a0c5fb5428 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.collect.Iterators; import org.opensearch.common.regex.Regex; @@ -48,6 +49,7 @@ * * @opensearch.internal */ +@InternalApi class FieldTypeLookup implements Iterable<MappedFieldType> { private final Map<String, MappedFieldType> fullNameToFieldType = new HashMap<>(); @@ -57,7 +59,7 @@ class FieldTypeLookup implements Iterable<MappedFieldType> { * A map from field name to all fields whose content has been copied into it * through copy_to. A field only be present in the map if some other field * has listed it as a target of copy_to. - * + * <p> * For convenience, the set of copied fields includes the field itself. */ private final Map<String, Set<String>> fieldToCopiedFields = new HashMap<>(); @@ -133,7 +135,7 @@ public Set<String> simpleMatchToFullName(String pattern) { /** * Given a concrete field name, return its paths in the _source. - * + * <p> * For most fields, the source path is the same as the field itself. However * there are cases where a field's values are found elsewhere in the _source: * - For a multi-field, the source path is the parent field. diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index f8206d138534d..9a3f2595a7c9e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -29,10 +29,10 @@ import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.common.xcontent.JsonToStringXContentParser; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.JsonToStringXContentParser; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -572,12 +572,12 @@ protected void parseCreateField(ParseContext context) throws IOException { JsonToStringXContentParser JsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, - context, + context.parser(), fieldType().name() ); - /** - * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser - * It reads the JSON object and parsed to a list of string + /* + JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + It reads the JSON object and parsed to a list of string */ XContentParser parser = JsonToStringParser.parseObject(); diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java index e3dab3f892949..fcca7e9804bf3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java @@ -63,7 +63,7 @@ /** * Field Mapper for geo_point types. - * + * <p> * Uses lucene 6 LatLonPoint encoding * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java index 4a4b2684b5f4c..b44b4b75549c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoShapeFieldMapper.java @@ -31,12 +31,15 @@ package org.opensearch.index.mapper; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.LatLonShape; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; +import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.geo.ShapeRelation; @@ -77,6 +80,7 @@ * @opensearch.internal */ public class GeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper<Geometry, Geometry> { + private static final Logger logger = LogManager.getLogger(GeoShapeFieldMapper.class); public static final String CONTENT_TYPE = "geo_shape"; public static final FieldType FIELD_TYPE = new FieldType(); static { @@ -205,9 +209,24 @@ protected void addDocValuesFields( final List<IndexableField> indexableFields, final ParseContext context ) { - Field[] fieldsArray = new Field[indexableFields.size()]; - fieldsArray = indexableFields.toArray(fieldsArray); - context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + /* + * We are adding the doc values for GeoShape only if the index is created with 2.9 and above version of + * OpenSearch. If we don't do that after the upgrade of OpenSearch customers are not able to index documents + * with GeoShape fields. Github issue: https://github.com/opensearch-project/OpenSearch/issues/10958, + * https://github.com/opensearch-project/OpenSearch/issues/10795 + */ + if (context.indexSettings().getIndexVersionCreated().onOrAfter(Version.V_2_9_0)) { + Field[] fieldsArray = new Field[indexableFields.size()]; + fieldsArray = indexableFields.toArray(fieldsArray); + context.doc().add(LatLonShape.createDocValueField(name, fieldsArray)); + } else { + logger.warn( + "The index was created with Version : {}, for geoshape doc values to work index must be " + + "created with OpenSearch Version : {} or above", + context.indexSettings().getIndexVersionCreated(), + Version.V_2_9_0 + ); + } } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoShapeIndexer.java b/server/src/main/java/org/opensearch/index/mapper/GeoShapeIndexer.java index 758798dc514c2..166665382a9f4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoShapeIndexer.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoShapeIndexer.java @@ -36,8 +36,8 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.common.geo.GeoLineDecomposer; import org.opensearch.common.geo.GeoPolygonDecomposer; -import org.opensearch.common.geo.GeoShapeUtils; import org.opensearch.common.geo.GeoShapeType; +import org.opensearch.common.geo.GeoShapeUtils; import org.opensearch.common.geo.GeoUtils; import org.opensearch.geometry.Circle; import org.opensearch.geometry.Geometry; diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoShapeParser.java b/server/src/main/java/org/opensearch/index/mapper/GeoShapeParser.java index 128a7faf2791a..3006078473afd 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoShapeParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoShapeParser.java @@ -35,10 +35,10 @@ import org.opensearch.common.geo.GeometryFormat; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MapXContentParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.MapXContentParser; import org.opensearch.geometry.Geometry; import java.io.IOException; @@ -75,7 +75,7 @@ public Object parseAndFormatObject(Object value, String format) { NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Collections.singletonMap("dummy_field", value), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { parser.nextToken(); // start object diff --git a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java index 4b1395ac02bf1..658f4228cb0c6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IdFieldMapper.java @@ -41,9 +41,11 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -54,7 +56,6 @@ import org.opensearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.opensearch.index.query.QueryShardContext; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; import org.opensearch.search.aggregations.support.CoreValuesSourceType; @@ -74,8 +75,9 @@ * stored, but we need to keep it so that its FieldType can be used to generate * queries. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IdFieldMapper extends MetadataFieldMapper { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IdFieldMapper.class); static final String ID_FIELD_DATA_DEPRECATION_MESSAGE = diff --git a/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java index 8cc307f7697c6..22fcb66848ed1 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IndexFieldMapper.java @@ -34,7 +34,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.plain.ConstantIndexFieldData; import org.opensearch.index.query.QueryShardContext; @@ -47,8 +48,9 @@ /** * Index specific field mapper * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexFieldMapper extends MetadataFieldMapper { public static final String NAME = "_index"; diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index 2a677d8bc1352..db8da8a949d6f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -36,7 +36,9 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -222,25 +224,48 @@ protected Object parseSourceValue(Object value) { @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); + Query query; if (value instanceof InetAddress) { - return InetAddressPoint.newExactQuery(name(), (InetAddress) value); + query = InetAddressPoint.newExactQuery(name(), (InetAddress) value); } else { if (value instanceof BytesRef) { value = ((BytesRef) value).utf8ToString(); } + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + query = InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } else { + InetAddress address = InetAddresses.forString(term); + query = InetAddressPoint.newExactQuery(name(), address); + } + } + if (isSearchable() && hasDocValues()) { + String term = value.toString(); + if (term.contains("/")) { + final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); + return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); + } + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ); + } + if (hasDocValues()) { String term = value.toString(); if (term.contains("/")) { final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(term); return InetAddressPoint.newPrefixQuery(name(), cidr.v1(), cidr.v2()); } - InetAddress address = InetAddresses.forString(term); - return InetAddressPoint.newExactQuery(name(), address); + return SortedSetDocValuesField.newSlowExactQuery(name(), new BytesRef(((PointRangeQuery) query).getLowerPoint())); } + return query; } @Override public Query termsQuery(List<?> values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); InetAddress[] addresses = new InetAddress[values.size()]; int i = 0; for (Object value : values) { @@ -265,14 +290,32 @@ public Query termsQuery(List<?> values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - return rangeQuery( - lowerTerm, - upperTerm, - includeLower, - includeUpper, - (lower, upper) -> InetAddressPoint.newRangeQuery(name(), lower, upper) - ); + failIfNotIndexedAndNoDocValues(); + return rangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (lower, upper) -> { + Query query = InetAddressPoint.newRangeQuery(name(), lower, upper); + if (isSearchable() && hasDocValues()) { + return new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ); + } + if (hasDocValues()) { + return SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ); + } + return query; + }); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 92ee8067ee4a0..c14b2c92c89c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -38,11 +38,24 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -62,6 +75,8 @@ import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; + /** * A field mapper for keywords. This mapper accepts strings and indexes them as-is. * @@ -317,7 +332,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); } return new SourceValueFetcher(name(), context, nullValue) { @@ -372,17 +387,226 @@ protected BytesRef indexedValueForSearch(Object value) { return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + @Override + public Query termsQuery(List<?> values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + // has index and doc_values enabled + if (isSearchable() && hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + Query indexQuery = new TermInSetQuery(name(), bytesRefs); + Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + // if we only have doc_values enabled, we construct a new query with doc_values re-written + if (hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + } + // has index enabled, we're going to return the query as is + return super.termsQuery(values, context); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[prefix] queries cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false. For optimised prefix queries on text " + + "fields please enable [index_prefixes]." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.prefixQuery(value, method, caseInsensitive, context); + Query dvQuery = super.prefixQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + if (caseInsensitive) { + return AutomatonQueries.caseInsensitivePrefixQuery( + (new Term(name(), indexedValueForSearch(value))), + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.prefixQuery(value, method, caseInsensitive, context); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[regexp] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + Query dvQuery = super.regexpQuery( + value, + syntaxFlags, + matchFlags, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new RegexpQuery( + new Term(name(), indexedValueForSearch(value)), + syntaxFlags, + matchFlags, + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + } + + @Override + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[range] queries on [text] or [keyword] fields cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + Query dvQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + failIfNotIndexedAndNoDocValues(); + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[fuzzy] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + Query dvQuery = super.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new FuzzyQuery( + new Term(name(), indexedValueForSearch(value)), + fuzziness.asDistance(BytesRefs.toString(value)), + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + } + @Override public Query wildcardQuery( String value, @Nullable MultiTermQuery.RewriteMethod method, - boolean caseInsensitve, + boolean caseInsensitive, QueryShardContext context ) { - // keyword field types are always normalized, so ignore case sensitivity and force normalize the wildcard + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[wildcard] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + // keyword field types are always normalized, so ignore case sensitivity and force normalize the + // wildcard // query text - return super.wildcardQuery(value, method, caseInsensitve, true, context); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.wildcardQuery(value, method, caseInsensitive, true, context); + Query dvQuery = super.wildcardQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, true, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + Term term; + value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); + term = new Term(name(), value); + if (caseInsensitive) { + return AutomatonQueries.caseInsensitiveWildcardQuery(term, method); + } + return new WildcardQuery(term, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.wildcardQuery(value, method, caseInsensitive, true, context); } + } private final boolean indexed; @@ -422,8 +646,10 @@ protected KeywordFieldMapper( this.indexAnalyzers = builder.indexAnalyzers; } - /** Values that have more chars than the return value of this method will - * be skipped at parsing time. */ + /** + * Values that have more chars than the return value of this method will + * be skipped at parsing time. + */ public int ignoreAbove() { return ignoreAbove; } diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java index 6735a267a6995..a5dcb60a86af9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -45,7 +45,6 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Explicit; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.geo.ShapeRelation; @@ -58,13 +57,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.geometry.Geometry; import org.opensearch.index.query.LegacyGeoShapeQueryProcessor; import org.opensearch.index.query.QueryShardContext; -import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.text.ParseException; @@ -72,6 +71,8 @@ import java.util.List; import java.util.Map; +import org.locationtech.spatial4j.shape.Shape; + /** * FieldMapper for indexing {@link org.locationtech.spatial4j.shape.Shape}s. * <p> diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeIndexer.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeIndexer.java index 8565599404bb0..5f12cb59ebe7a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeIndexer.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeIndexer.java @@ -35,14 +35,15 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.common.geo.XShapeCollection; import org.opensearch.common.geo.builders.ShapeBuilder; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + /** * Indexer for legacy prefix trees * diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 58aa0bb2576e2..66d4654e543a2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -54,6 +54,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchParseException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.time.DateMathParser; import org.opensearch.common.unit.Fuzziness; @@ -79,8 +80,9 @@ /** * This defines the core properties and functions to operate on a field. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class MappedFieldType { private final String name; @@ -125,7 +127,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S /** * Create a helper class to fetch field values during the {@link FetchFieldsPhase}. - * + * <p> * New field types must implement this method in order to support the search 'fields' option. Except * for metadata fields, field types should not throw {@link UnsupportedOperationException} since this * could cause a search retrieving multiple fields (like "fields": ["*"]) to fail. @@ -269,6 +271,21 @@ public Query fuzzyQuery( ); } + // Fuzzy Query with re-write method + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + throw new IllegalArgumentException( + "Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" + ); + } + // Case sensitive form of prefix query public final Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { return prefixQuery(value, method, false, context); @@ -342,18 +359,31 @@ public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionInc ); } + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + return phraseQuery(stream, slop, enablePositionIncrements); + } + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { throw new IllegalArgumentException( "Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + return multiPhraseQuery(stream, slop, enablePositionIncrements); + } + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { throw new IllegalArgumentException( "Can only use phrase prefix queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + return phrasePrefixQuery(stream, slop, maxExpansions); + } + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { throw new IllegalArgumentException( "Can only use span prefix queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" @@ -385,8 +415,9 @@ public IntervalsSource intervals(String query, int max_gaps, IntervalMode mode, * An enum used to describe the relation between the range of terms in a * shard when compared with a query range * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Relation { WITHIN, INTERSECTS, @@ -433,6 +464,15 @@ protected final void failIfNotIndexed() { } } + protected final void failIfNotIndexedAndNoDocValues() { + // we fail if a field is both not indexed and does not have doc_values enabled + if (isIndexed == false && hasDocValues() == false) { + throw new IllegalArgumentException( + "Cannot search on field [" + name() + "] since it is both not indexed," + " and does not have doc_values enabled." + ); + } + } + public boolean eagerGlobalOrdinals() { return eagerGlobalOrdinals; } @@ -487,7 +527,7 @@ public Map<String, String> meta() { /** * Returns information on how any text in this field is indexed - * + * <p> * Fields that do not support any text-based queries should return * {@link TextSearchInfo#NONE}. Some fields (eg numeric) may support * only simple match queries, and can return diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapper.java b/server/src/main/java/org/opensearch/index/mapper/Mapper.java index 59c647d38f0de..bd5d3f15c0706 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapper.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.core.xcontent.ToXContentFragment; @@ -51,15 +52,17 @@ /** * The foundation OpenSearch mapper * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Mapper implements ToXContentFragment, Iterable<Mapper> { /** * The builder context used in field mappings * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class BuilderContext { private final Settings indexSettings; private final ContentPath contentPath; @@ -94,8 +97,9 @@ public Version indexCreatedVersionOrDefault(@Nullable Version defaultValue) { /** * Base mapper builder * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Builder<T extends Builder> { public String name; @@ -117,15 +121,17 @@ public String name() { /** * Type parser for the mapper * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface TypeParser { /** * Parser context for the type parser * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") class ParserContext { private final Function<String, SimilarityProvider> similarityLookupService; @@ -190,7 +196,7 @@ public Supplier<QueryShardContext> queryShardContextSupplier() { /** * Gets an optional default date format for date fields that do not have an explicit format set - * + * <p> * If {@code null}, then date fields will default to {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER}. */ public DateFormatter getDateFormatter() { diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperParsingException.java b/server/src/main/java/org/opensearch/index/mapper/MapperParsingException.java index 7dff82e2e3167..881e8c78b919d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperParsingException.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperParsingException.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.rest.RestStatus; @@ -40,8 +41,9 @@ /** * Exception thrown if there are any errors parsing mappings * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MapperParsingException extends MapperException { public MapperParsingException(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 521de037e63d0..fc8654216e187 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -35,11 +35,10 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; -import org.opensearch.core.Assertions; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; @@ -47,11 +46,14 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.AbstractIndexComponent; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisRegistry; @@ -89,8 +91,9 @@ /** * The core field mapping service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MapperService extends AbstractIndexComponent implements Closeable { /** @@ -98,6 +101,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { * * @opensearch.internal */ + @PublicApi(since = "1.0.0") public enum MergeReason { /** * Pre-flight check before sending a mapping update to the cluster-manager @@ -146,13 +150,45 @@ public enum MergeReason { "index.mapping.depth.limit", 20L, 1, + Long.MAX_VALUE, + limit -> { + // Make sure XContent constraints are not exceeded (otherwise content processing will fail) + if (limit > XContentContraints.DEFAULT_MAX_DEPTH) { + throw new IllegalArgumentException( + "The provided value " + + limit + + " of the index setting 'index.mapping.depth.limit' exceeds per-JVM configured limit of " + + XContentContraints.DEFAULT_MAX_DEPTH + + ". Please change the setting value or increase per-JVM limit " + + "using '" + + XContentContraints.DEFAULT_MAX_DEPTH_PROPERTY + + "' system property." + ); + } + }, Property.Dynamic, Property.IndexScope ); public static final Setting<Long> INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING = Setting.longSetting( "index.mapping.field_name_length.limit", - Long.MAX_VALUE, + 50000, 1L, + Long.MAX_VALUE, + limit -> { + // Make sure XContent constraints are not exceeded (otherwise content processing will fail) + if (limit > XContentContraints.DEFAULT_MAX_NAME_LEN) { + throw new IllegalArgumentException( + "The provided value " + + limit + + " of the index setting 'index.mapping.field_name_length.limit' exceeds per-JVM configured limit of " + + XContentContraints.DEFAULT_MAX_NAME_LEN + + ". Please change the setting value or increase per-JVM limit " + + "using '" + + XContentContraints.DEFAULT_MAX_NAME_LEN_PROPERTY + + "' system property." + ); + } + }, Property.Dynamic, Property.IndexScope ); @@ -172,9 +208,6 @@ public enum MergeReason { ); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MapperService.class); - static final String DEFAULT_MAPPING_ERROR_MESSAGE = "[_default_] mappings are not allowed on new indices and should no " - + "longer be used. See [https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html" - + "#default-mapping-not-allowed] for more information."; private final IndexAnalyzers indexAnalyzers; @@ -204,6 +237,7 @@ public MapperService( ScriptService scriptService ) { super(indexSettings); + this.indexVersionCreated = indexSettings.getIndexVersionCreated(); this.indexAnalyzers = indexAnalyzers; this.documentParser = new DocumentMapperParser( @@ -228,7 +262,12 @@ public MapperService( this.idFieldDataEnabled = idFieldDataEnabled; if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { - throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); + deprecationLogger.deprecate( + index().getName() + INDEX_MAPPER_DYNAMIC_SETTING.getKey(), + "Index [{}] has setting [{}] that is not supported in OpenSearch, its value will be ignored.", + index().getName(), + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + ); } } @@ -253,7 +292,7 @@ public DocumentMapperParser documentMapperParser() { */ public static Map<String, Object> parseMapping(NamedXContentRegistry xContentRegistry, String mappingSource) throws IOException { try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, mappingSource) ) { return parser.map(); @@ -348,7 +387,7 @@ private void assertMappingVersion( + "to be the same as new mapping [" + newSource + "]"; - final CompressedXContent mapperSource = new CompressedXContent(Strings.toString(XContentType.JSON, mapper)); + final CompressedXContent mapperSource = new CompressedXContent(Strings.toString(MediaTypeRegistry.JSON, mapper)); assert currentSource.equals(mapperSource) : "expected current mapping [" + currentSource + "] for type [" @@ -394,7 +433,7 @@ public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason) try { mappingSourcesCompressed.put( entry.getKey(), - new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(entry.getValue()))) + new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).toString()) ); } catch (Exception e) { throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); @@ -405,7 +444,7 @@ public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason) } public void merge(String type, Map<String, Object> mappings, MergeReason reason) throws IOException { - CompressedXContent content = new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(mappings))); + CompressedXContent content = new CompressedXContent(XContentFactory.jsonBuilder().map(mappings).toString()); internalMerge(Collections.singletonMap(type, content), reason); } @@ -543,7 +582,7 @@ public static boolean isMappingSourceTyped(String type, Map<String, Object> mapp } public static boolean isMappingSourceTyped(String type, CompressedXContent mappingSource) { - Map<String, Object> root = XContentHelper.convertToMap(mappingSource.compressedReference(), true, XContentType.JSON).v2(); + Map<String, Object> root = XContentHelper.convertToMap(mappingSource.compressedReference(), true, MediaTypeRegistry.JSON).v2(); return isMappingSourceTyped(type, root); } diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapping.java b/server/src/main/java/org/opensearch/index/mapper/Mapping.java index 968e489d72afb..20bd290ec8af4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapping.java @@ -33,12 +33,12 @@ package org.opensearch.index.mapper; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.index.mapper.MapperService.MergeReason; import java.io.IOException; @@ -55,8 +55,9 @@ * Wrapper around everything that defines a mapping, without references to * utility classes like MapperService, ... * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Mapping implements ToXContentFragment { final Version indexCreated; @@ -185,7 +186,7 @@ public String toString() { try { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); toXContent(builder, new ToXContent.MapParams(emptyMap())); - return Strings.toString(builder.endObject()); + return builder.endObject().toString(); } catch (IOException bogus) { throw new UncheckedIOException(bogus); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java index 024f4b71584bf..fdebe24327ca0 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java @@ -34,6 +34,7 @@ import org.apache.lucene.analysis.Analyzer; import org.opensearch.cluster.metadata.DataStream; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.FieldNameAnalyzer; @@ -49,8 +50,9 @@ /** * Looks up a mapping for a field * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MappingLookup implements Iterable<Mapper> { /** Full field name to mapper */ @@ -157,14 +159,14 @@ public MappingLookup( /** * Returns the leaf mapper associated with this field name. Note that the returned mapper * could be either a concrete {@link FieldMapper}, or a {@link FieldAliasMapper}. - * + * <p> * To access a field's type information, {@link MapperService#fieldType} should be used instead. */ public Mapper getMapper(String field) { return fieldMappers.get(field); } - public FieldTypeLookup fieldTypes() { + FieldTypeLookup fieldTypes() { return fieldTypeLookup; } diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java new file mode 100644 index 0000000000000..fb97f8c309a70 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.opensearch.Version; +import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.SourceFieldMatchQuery; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; + +/** + * A specialized type of TextFieldMapper which disables the positions and norms to save on storage and executes phrase queries, which requires + * positional data, in a slightly less efficient manner using the {@link org.opensearch.index.query.SourceFieldMatchQuery}. + */ +public class MatchOnlyTextFieldMapper extends TextFieldMapper { + + public static final FieldType FIELD_TYPE = new FieldType(); + public static final String CONTENT_TYPE = "match_only_text"; + private final String indexOptions = FieldMapper.indexOptionToString(FIELD_TYPE.indexOptions()); + private final boolean norms = FIELD_TYPE.omitNorms() == false; + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + static { + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setStoreTermVectors(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.freeze(); + } + + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers())); + + protected MatchOnlyTextFieldMapper( + String simpleName, + FieldType fieldType, + MatchOnlyTextFieldType mappedFieldType, + TextFieldMapper.PrefixFieldMapper prefixFieldMapper, + TextFieldMapper.PhraseFieldMapper phraseFieldMapper, + MultiFields multiFields, + CopyTo copyTo, + Builder builder + ) { + + super(simpleName, fieldType, mappedFieldType, prefixFieldMapper, phraseFieldMapper, multiFields, copyTo, builder); + } + + @Override + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName(), this.indexCreatedVersion, this.indexAnalyzers).init(this); + } + + /** + * Builder class for constructing the MatchOnlyTextFieldMapper. + */ + public static class Builder extends TextFieldMapper.Builder { + final Parameter<String> indexOptions = indexOptions(m -> ((MatchOnlyTextFieldMapper) m).indexOptions); + + private static Parameter<String> indexOptions(Function<FieldMapper, String> initializer) { + return Parameter.restrictedStringParam("index_options", false, initializer, "docs"); + } + + final Parameter<Boolean> norms = norms(m -> ((MatchOnlyTextFieldMapper) m).norms); + final Parameter<Boolean> indexPhrases = Parameter.boolParam( + "index_phrases", + false, + m -> ((MatchOnlyTextFieldType) m.mappedFieldType).indexPhrases, + false + ).setValidator(v -> { + if (v == true) { + throw new MapperParsingException("Index phrases cannot be enabled on for match_only_text field. Use text field instead"); + } + }); + + final Parameter<PrefixConfig> indexPrefixes = new Parameter<>( + "index_prefixes", + false, + () -> null, + TextFieldMapper::parsePrefixConfig, + m -> Optional.ofNullable(((MatchOnlyTextFieldType) m.mappedFieldType).prefixFieldType) + .map(p -> new PrefixConfig(p.minChars, p.maxChars)) + .orElse(null) + ).acceptsNull().setValidator(v -> { + if (v != null) { + throw new MapperParsingException("Index prefixes cannot be enabled on for match_only_text field. Use text field instead"); + } + }); + + private static Parameter<Boolean> norms(Function<FieldMapper, Boolean> initializer) { + return Parameter.boolParam("norms", false, initializer, false) + .setMergeValidator((o, n) -> o == n || (o && n == false)) + .setValidator(v -> { + if (v == true) { + throw new MapperParsingException("Norms cannot be enabled on for match_only_text field"); + } + }); + } + + public Builder(String name, IndexAnalyzers indexAnalyzers) { + super(name, indexAnalyzers); + } + + public Builder(String name, Version indexCreatedVersion, IndexAnalyzers indexAnalyzers) { + super(name, indexCreatedVersion, indexAnalyzers); + } + + @Override + public MatchOnlyTextFieldMapper build(BuilderContext context) { + FieldType fieldType = TextParams.buildFieldType(index, store, indexOptions, norms, termVectors); + MatchOnlyTextFieldType tft = buildFieldType(fieldType, context); + return new MatchOnlyTextFieldMapper( + name, + fieldType, + tft, + buildPrefixMapper(context, fieldType, tft), + buildPhraseMapper(fieldType, tft), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); + } + + @Override + protected MatchOnlyTextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { + NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); + NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); + NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer(); + + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS) > 0) { + throw new IllegalArgumentException("Cannot set position_increment_gap on field [" + name + "] without positions enabled"); + } + if (positionIncrementGap.get() != POSITION_INCREMENT_GAP_USE_ANALYZER) { + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS) < 0) { + throw new IllegalArgumentException( + "Cannot set position_increment_gap on field [" + name + "] without indexing enabled" + ); + } + indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionIncrementGap.get()); + searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionIncrementGap.get()); + searchQuoteAnalyzer = new NamedAnalyzer(searchQuoteAnalyzer, positionIncrementGap.get()); + } + TextSearchInfo tsi = new TextSearchInfo(fieldType, similarity.getValue(), searchAnalyzer, searchQuoteAnalyzer); + MatchOnlyTextFieldType ft = new MatchOnlyTextFieldType( + buildFullName(context), + index.getValue(), + fieldType.stored(), + tsi, + meta.getValue() + ); + ft.setIndexAnalyzer(indexAnalyzer); + ft.setEagerGlobalOrdinals(eagerGlobalOrdinals.getValue()); + ft.setBoost(boost.getValue()); + if (fieldData.getValue()) { + ft.setFielddata(true, freqFilter.getValue()); + } + return ft; + } + + @Override + protected List<Parameter<?>> getParameters() { + return Arrays.asList( + index, + store, + indexOptions, + norms, + termVectors, + analyzers.indexAnalyzer, + analyzers.searchAnalyzer, + analyzers.searchQuoteAnalyzer, + similarity, + positionIncrementGap, + fieldData, + freqFilter, + eagerGlobalOrdinals, + indexPhrases, + indexPrefixes, + boost, + meta + ); + } + } + + /** + * The specific field type for MatchOnlyTextFieldMapper + * + * @opensearch.internal + */ + public static final class MatchOnlyTextFieldType extends TextFieldType { + private final boolean indexPhrases = false; + + private PrefixFieldType prefixFieldType; + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + public MatchOnlyTextFieldType(String name, boolean indexed, boolean stored, TextSearchInfo tsi, Map<String, String> meta) { + super(name, indexed, stored, tsi, meta); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePosIncrements, QueryShardContext context) throws IOException { + PhraseQuery phraseQuery = (PhraseQuery) super.phraseQuery(stream, slop, enablePosIncrements); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (Term term : phraseQuery.getTerms()) { + builder.add(new TermQuery(term), BooleanClause.Occur.FILTER); + } + return new SourceFieldMatchQuery(builder.build(), phraseQuery, this, context); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + MultiPhraseQuery multiPhraseQuery = (MultiPhraseQuery) super.multiPhraseQuery(stream, slop, enablePositionIncrements); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (Term[] terms : multiPhraseQuery.getTermArrays()) { + if (terms.length > 1) { + // Multiple terms in the same position, creating a disjunction query for it and + // adding it to conjunction query + BooleanQuery.Builder disjunctions = new BooleanQuery.Builder(); + for (Term term : terms) { + disjunctions.add(new TermQuery(term), BooleanClause.Occur.SHOULD); + } + builder.add(disjunctions.build(), BooleanClause.Occur.FILTER); + } else { + builder.add(new TermQuery(terms[0]), BooleanClause.Occur.FILTER); + } + } + return new SourceFieldMatchQuery(builder.build(), multiPhraseQuery, this, context); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query phrasePrefixQuery = super.phrasePrefixQuery(stream, slop, maxExpansions); + List<List<Term>> termArray = getTermsFromTokenStream(stream); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (int i = 0; i < termArray.size(); i++) { + if (i == termArray.size() - 1) { + // last element of the term Array is a prefix, thus creating a prefix query for it and adding it to + // conjunction query + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery(name()); + mqb.add(termArray.get(i).toArray(new Term[0])); + builder.add(mqb, BooleanClause.Occur.FILTER); + } else { + if (termArray.get(i).size() > 1) { + // multiple terms in the same position, creating a disjunction query for it and + // adding it to conjunction query + BooleanQuery.Builder disjunctions = new BooleanQuery.Builder(); + for (Term term : termArray.get(i)) { + disjunctions.add(new TermQuery(term), BooleanClause.Occur.SHOULD); + } + builder.add(disjunctions.build(), BooleanClause.Occur.FILTER); + } else { + builder.add(new TermQuery(termArray.get(i).get(0)), BooleanClause.Occur.FILTER); + } + } + } + return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); + } + + private List<List<Term>> getTermsFromTokenStream(TokenStream stream) throws IOException { + final List<List<Term>> termArray = new ArrayList<>(); + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + List<Term> currentTerms = new ArrayList<>(); + stream.reset(); + while (stream.incrementToken()) { + if (posIncrAtt.getPositionIncrement() != 0) { + if (currentTerms.isEmpty() == false) { + termArray.add(List.copyOf(currentTerms)); + } + currentTerms.clear(); + } + currentTerms.add(new Term(name(), termAtt.getBytesRef())); + } + termArray.add(List.copyOf(currentTerms)); + return termArray; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java index 6052bf5496de8..c98d0c8242078 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java @@ -33,8 +33,9 @@ package org.opensearch.index.mapper; import org.opensearch.common.Explicit; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Map; @@ -43,15 +44,17 @@ /** * A mapper for a builtin field containing metadata about a document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class MetadataFieldMapper extends ParametrizedFieldMapper { /** * Type parser for the field mapper * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface TypeParser extends Mapper.TypeParser { @Override @@ -70,7 +73,7 @@ public interface TypeParser extends Mapper.TypeParser { /** * Declares an updateable boolean parameter for a metadata field - * + * <p> * We need to distinguish between explicit configuration and default value for metadata * fields, because mapping updates will carry over the previous metadata values if a * metadata field is not explicitly declared in the update. A standard boolean @@ -152,8 +155,9 @@ public MetadataFieldMapper getDefault(MappedFieldType defaultFieldType, ParserCo /** * Base builder for internal metadata fields * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Builder extends ParametrizedFieldMapper.Builder { protected Builder(String name) { diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index beefe48d46b57..eb3a99b0e0388 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -34,6 +34,7 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.exc.InputCoercionException; + import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; import org.apache.lucene.document.FloatPoint; @@ -43,9 +44,9 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; -import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -60,6 +61,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.index.document.SortedUnsignedLongDocValuesRangeQuery; +import org.opensearch.index.document.SortedUnsignedLongDocValuesSetQuery; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -200,18 +202,39 @@ public Float parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { float v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, HalfFloatPoint.halfFloatToSortableShort(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, HalfFloatPoint.halfFloatToSortableShort(v)); + } return HalfFloatPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { float[] v = new float[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = HalfFloatPoint.halfFloatToSortableShort(v[i]); + } + } + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newSetQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowSetQuery(field, points); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } return HalfFloatPoint.newSetQuery(field, v); + } @Override @@ -222,6 +245,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { float l = Float.NEGATIVE_INFINITY; @@ -240,16 +264,23 @@ public Query rangeQuery( } u = HalfFloatPoint.nextDown(u); } - Query query = HalfFloatPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, HalfFloatPoint.halfFloatToSortableShort(l), HalfFloatPoint.halfFloatToSortableShort(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + HalfFloatPoint.halfFloatToSortableShort(l), + HalfFloatPoint.halfFloatToSortableShort(u) + ); } - return query; + return HalfFloatPoint.newRangeQuery(field, l, u); } @Override @@ -308,16 +339,37 @@ public Float parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { float v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = FloatPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.floatToSortableInt(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.floatToSortableInt(v)); + } return FloatPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { float[] v = new float[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = NumericUtils.floatToSortableInt(v[i]); + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + FloatPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } return FloatPoint.newSetQuery(field, v); } @@ -330,6 +382,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { float l = Float.NEGATIVE_INFINITY; @@ -346,16 +399,23 @@ public Query rangeQuery( u = FloatPoint.nextDown(u); } } - Query query = FloatPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = FloatPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.floatToSortableInt(l), NumericUtils.floatToSortableInt(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + NumericUtils.floatToSortableInt(l), + NumericUtils.floatToSortableInt(u) + ); + } + return FloatPoint.newRangeQuery(field, l, u); } @Override @@ -405,16 +465,37 @@ public Double parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { double v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = DoublePoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.doubleToSortableLong(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.doubleToSortableLong(v)); + } return DoublePoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { double[] v = new double[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = NumericUtils.doubleToSortableLong(v[i]); + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + DoublePoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } return DoublePoint.newSetQuery(field, v); } @@ -427,19 +508,27 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return doubleRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = DoublePoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = DoublePoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.doubleToSortableLong(l), NumericUtils.doubleToSortableLong(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + NumericUtils.doubleToSortableLong(l), + NumericUtils.doubleToSortableLong(u) + ); + } + return DoublePoint.newRangeQuery(field, l, u); }); } @@ -503,13 +592,13 @@ public Short parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { - return INTEGER.termQuery(field, value); + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termQuery(field, value, hasDocValues, isSearchable); } @Override - public Query termsQuery(String field, List<Object> values) { - return INTEGER.termsQuery(field, values); + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termsQuery(field, values, hasDocValues, isSearchable); } @Override @@ -520,9 +609,10 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { - return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, context); + return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, isSearchable, context); } @Override @@ -570,13 +660,13 @@ public Short parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { - return INTEGER.termQuery(field, value); + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termQuery(field, value, hasDocValues, isSearchable); } @Override - public Query termsQuery(String field, List<Object> values) { - return INTEGER.termsQuery(field, values); + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termsQuery(field, values, hasDocValues, isSearchable); } @Override @@ -587,9 +677,10 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { - return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, context); + return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, isSearchable, context); } @Override @@ -637,16 +728,24 @@ public Integer parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } int v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = IntPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, v); + } return IntPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { int[] v = new int[values.size()]; int upTo = 0; @@ -663,6 +762,21 @@ public Query termsQuery(String field, List<Object> values) { if (upTo != v.length) { v = Arrays.copyOf(v, upTo); } + long points[] = new long[v.length]; + if (hasDocValues) { + for (int i = 0; i < v.length; i++) { + points[i] = v[i]; + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + IntPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); + } return IntPoint.newSetQuery(field, v); } @@ -674,6 +788,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { int l = Integer.MIN_VALUE; @@ -703,15 +818,23 @@ public Query rangeQuery( --u; } } - Query query = IntPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = IntPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); query = new IndexOrDocValuesQuery(query, dvQuery); if (context.indexSortedOnField(field)) { query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); } + return query; } - return query; + if (hasDocValues) { + Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + return query; + } + return IntPoint.newRangeQuery(field, l, u); } @Override @@ -751,17 +874,28 @@ public Long parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } long v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = LongPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, v); + + } return LongPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable) { long[] v = new long[values.size()]; + int upTo = 0; for (int i = 0; i < values.size(); i++) { @@ -777,6 +911,16 @@ public Query termsQuery(String field, List<Object> values) { if (upTo != v.length) { v = Arrays.copyOf(v, upTo); } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + LongPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, v) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, v); + + } return LongPoint.newSetQuery(field, v); } @@ -788,18 +932,28 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return longRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = LongPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = LongPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); query = new IndexOrDocValuesQuery(query, dvQuery); if (context.indexSortedOnField(field)) { query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); } + return query; } - return query; + if (hasDocValues) { + Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + return query; + } + return LongPoint.newRangeQuery(field, l, u); + }); } @@ -840,16 +994,24 @@ public BigInteger parse(XContentParser parser, boolean coerce) throws IOExceptio } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } BigInteger v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = BigIntegerPoint.newExactQuery(field, v); + Query dvQuery = SortedUnsignedLongDocValuesSetQuery.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedUnsignedLongDocValuesSetQuery.newSlowExactQuery(field, v); + } return BigIntegerPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List<Object> values) { + public Query termsQuery(String field, List<Object> values, boolean hasDocvalues, boolean isSearchable) { BigInteger[] v = new BigInteger[values.size()]; int upTo = 0; @@ -867,6 +1029,14 @@ public Query termsQuery(String field, List<Object> values) { v = Arrays.copyOf(v, upTo); } + if (isSearchable && hasDocvalues) { + Query query = BigIntegerPoint.newSetQuery(field, v); + Query dvQuery = SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocvalues) { + return SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery(field, v); + } return BigIntegerPoint.newSetQuery(field, v); } @@ -878,15 +1048,19 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return unsignedLongRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = BigIntegerPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = BigIntegerPoint.newRangeQuery(field, l, u); Query dvQuery = SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); + } + return BigIntegerPoint.newRangeQuery(field, l, u); }); } @@ -940,9 +1114,9 @@ public final TypeParser parser() { return parser; } - public abstract Query termQuery(String field, Object value); + public abstract Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable); - public abstract Query termsQuery(String field, List<Object> values); + public abstract Query termsQuery(String field, List<Object> values, boolean hasDocValues, boolean isSearchable); public abstract Query rangeQuery( String field, @@ -951,6 +1125,7 @@ public abstract Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ); @@ -1225,8 +1400,8 @@ public NumericType numericType() { @Override public Query termQuery(Object value, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.termQuery(name(), value); + failIfNotIndexedAndNoDocValues(); + Query query = type.termQuery(name(), value, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -1235,8 +1410,8 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.termsQuery(name(), values); + failIfNotIndexedAndNoDocValues(); + Query query = type.termsQuery(name(), values, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -1245,8 +1420,17 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.rangeQuery(name(), lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues(), context); + failIfNotIndexedAndNoDocValues(); + Query query = type.rangeQuery( + name(), + lowerTerm, + upperTerm, + includeLower, + includeUpper, + hasDocValues(), + isSearchable(), + context + ); if (boost() != 1f) { query = new BoostQuery(query, boost()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index b4dcd35a5cd47..39b0ec224b57c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -38,12 +38,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Explicit; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.CopyOnWriteHashMap; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.mapper.MapperService.MergeReason; import java.io.IOException; @@ -60,8 +61,9 @@ /** * Field mapper for object field types * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ObjectMapper extends Mapper implements Cloneable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ObjectMapper.class); @@ -82,8 +84,9 @@ public static class Defaults { /** * Dynamic field mapping specification * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Dynamic { TRUE, FALSE, @@ -93,8 +96,9 @@ public enum Dynamic { /** * Nested objects * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Nested { public static final Nested NO = new Nested(false, new Explicit<>(false, false), new Explicit<>(false, false)); diff --git a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java index b4fa781090615..f4723b6178137 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java @@ -36,10 +36,11 @@ import org.opensearch.Version; import org.opensearch.common.Explicit; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.Mapper.TypeParser.ParserContext; @@ -63,16 +64,17 @@ /** * Defines how a particular field should be indexed and searched - * + * <p> * Configuration {@link Parameter}s for the mapper are defined on a {@link Builder} subclass, * and returned by its {@link Builder#getParameters()} method. Merging, serialization * and parsing of the mapper are all mediated through this set of parameters. - * + * <p> * Subclasses should implement a {@link Builder} that is returned from the * {@link #getMergeBuilder()} method, initialised with the existing builder. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ParametrizedFieldMapper extends FieldMapper { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParametrizedFieldMapper.class); @@ -86,7 +88,7 @@ protected ParametrizedFieldMapper(String simpleName, MappedFieldType mappedField /** * Returns a {@link Builder} to be used for merging and serialization - * + * <p> * Implement as follows: * {@code return new MyBuilder(simpleName()).init(this); } */ @@ -151,14 +153,20 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, /** * Serializes a parameter + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") protected interface Serializer<T> { void serialize(XContentBuilder builder, String name, T value) throws IOException; } /** * Check on whether or not a parameter should be serialized + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") protected interface SerializerCheck<T> { /** * Check on whether or not a parameter should be serialized @@ -174,8 +182,9 @@ protected interface SerializerCheck<T> { * A configurable parameter for a field mapper * @param <T> the type of the value the parameter holds * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Parameter<T> implements Supplier<T> { public final String name; @@ -256,7 +265,7 @@ public Parameter<T> acceptsNull() { /** * Adds a deprecated parameter name. - * + * <p> * If this parameter name is encountered during parsing, a deprecation warning will * be emitted. The parameter will be serialized with its main name. */ @@ -577,8 +586,9 @@ void check() { /** * A Builder for a ParametrizedFieldMapper * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Builder extends Mapper.Builder<Builder> { protected final MultiFields.Builder multiFieldsBuilder = new MultiFields.Builder(); diff --git a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java index 092e2a6fd3710..5d382ff28bcf9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java @@ -35,8 +35,9 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; import java.util.ArrayList; @@ -53,15 +54,17 @@ /** * Holds the context at parse time * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ParseContext implements Iterable<ParseContext.Document> { /** * Fork of {@link org.apache.lucene.document.Document} with additional functionality. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Document implements Iterable<IndexableField> { private final Document parent; diff --git a/server/src/main/java/org/opensearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/opensearch/index/mapper/ParsedDocument.java index 1084c08cdf32c..16e38980f8600 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParsedDocument.java @@ -33,19 +33,21 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.Field; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.index.mapper.MapperService.MergeReason; +import org.opensearch.index.mapper.ParseContext.Document; import java.util.List; /** * The result of parsing a document. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ParsedDocument { private final Field version; diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 10f179c964591..05ca7dee0fe4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -95,7 +95,7 @@ public class RangeFieldMapper extends ParametrizedFieldMapper { */ public static class Defaults { public static final Explicit<Boolean> COERCE = new Explicit<>(true, false); - public static final DateFormatter DATE_FORMATTER = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + public static final DateFormatter DATE_FORMATTER = DateFieldMapper.getDefaultDateTimeFormatter(); } // this is private since it has a different default diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeType.java b/server/src/main/java/org/opensearch/index/mapper/RangeType.java index c8cd317779c7c..7e29fd417845b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeType.java @@ -313,7 +313,7 @@ public Query rangeQuery( ) { ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; - DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; + DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser() : parser; boolean roundUp = includeLower == false; // using "gt" should round lower bound up Long low = lowerTerm == null ? minValue() diff --git a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java index 925c249b41ac7..9504e6eafc046 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java @@ -34,11 +34,12 @@ import org.opensearch.common.Explicit; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.DynamicTemplate.XContentFieldType; @@ -60,8 +61,9 @@ /** * The root object mapper for a document * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RootObjectMapper extends ObjectMapper { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RootObjectMapper.class); @@ -72,7 +74,7 @@ public class RootObjectMapper extends ObjectMapper { */ public static class Defaults { public static final DateFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new DateFormatter[] { - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis") }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; @@ -459,7 +461,7 @@ private static void validateDynamicTemplate(Mapper.TypeParser.ParserContext pars Locale.ROOT, "dynamic template [%s] has invalid content [%s]", dynamicTemplate.getName(), - Strings.toString(XContentType.JSON, dynamicTemplate) + Strings.toString(MediaTypeRegistry.JSON, dynamicTemplate) ); final String deprecationMessage; diff --git a/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java index 222ba70a5ce0c..60decc56e0db2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RoutingFieldMapper.java @@ -35,6 +35,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.lookup.SearchLookup; @@ -45,8 +46,9 @@ /** * Internal field mapper for _routing fields * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RoutingFieldMapper extends MetadataFieldMapper { public static final String NAME = "_routing"; diff --git a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java index 16f76f087e403..c7e9bed7577c5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -55,11 +56,11 @@ /** * Mapper for the {@code _seq_no} field. - * + * <p> * We expect to use the seq# for sorting, during collision checking and for * doing range searches. Therefore the {@code _seq_no} field is stored both * as a numeric doc value and as numeric indexed field. - * + * <p> * This mapper also manages the primary term field, which has no OpenSearch named * equivalent. The primary term is only used during collision after receiving * identical seq# values for two document copies. The primary term is stored as @@ -74,8 +75,9 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { * A sequence ID, which is made up of a sequence number (both the searchable * and doc_value version of the field) and the primary term. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class SequenceIDFields { public final Field seqNo; diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java index ad467d045b7e4..a2d769d486a0f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceFieldMapper.java @@ -39,15 +39,16 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; @@ -63,8 +64,9 @@ /** * Internal field mapper for storing source (and recovery source) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SourceFieldMapper extends MetadataFieldMapper { public static final String NAME = "_source"; @@ -228,7 +230,7 @@ public BytesReference applyFilters(@Nullable BytesReference originalSource, @Nul Map<String, Object> filteredSource = filter.apply(mapTuple.v2()); BytesStreamOutput bStream = new BytesStreamOutput(); MediaType actualContentType = mapTuple.v1(); - XContentBuilder builder = XContentFactory.contentBuilder(actualContentType, bStream).map(filteredSource); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(actualContentType, bStream).map(filteredSource); builder.close(); return bStream.bytes(); } else { diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceToParse.java b/server/src/main/java/org/opensearch/index/mapper/SourceToParse.java index 2b24a380c8bb6..c8bb4dbefbbb1 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceToParse.java @@ -32,18 +32,20 @@ package org.opensearch.index.mapper; -import java.util.Objects; - import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; +import java.util.Objects; + /** * Stores the document source * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SourceToParse { private final BytesReference source; diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index 69f53ba126790..a32d1c9f489ca 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -46,7 +46,7 @@ * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. Most standard field mappers will use this class * to implement value fetching. - * + * <p> * Field types that handle arrays directly should instead use {@link ArraySourceValueFetcher}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index a22b028888550..d0e041e68a81d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -32,7 +32,6 @@ package org.opensearch.index.mapper; -import java.util.Optional; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.CachingTokenFilter; @@ -75,9 +74,9 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -99,6 +98,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.IntPredicate; import java.util.function.Supplier; @@ -110,7 +110,7 @@ public class TextFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "text"; - private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; + protected static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; private static final String FAST_PHRASE_SUFFIX = "._index_phrase"; /** @@ -152,11 +152,11 @@ private static TextFieldMapper toType(FieldMapper in) { * * @opensearch.internal */ - private static final class PrefixConfig implements ToXContent { + protected static final class PrefixConfig implements ToXContent { final int minChars; final int maxChars; - private PrefixConfig(int minChars, int maxChars) { + PrefixConfig(int minChars, int maxChars) { this.minChars = minChars; this.maxChars = maxChars; if (minChars > maxChars) { @@ -198,7 +198,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - private static PrefixConfig parsePrefixConfig(String propName, ParserContext parserContext, Object propNode) { + static PrefixConfig parsePrefixConfig(String propName, ParserContext parserContext, Object propNode) { if (propNode == null) { return null; } @@ -214,7 +214,7 @@ private static PrefixConfig parsePrefixConfig(String propName, ParserContext par * * @opensearch.internal */ - private static final class FielddataFrequencyFilter implements ToXContent { + protected static final class FielddataFrequencyFilter implements ToXContent { final double minFreq; final double maxFreq; final int minSegmentSize; @@ -280,15 +280,14 @@ public static class Builder extends ParametrizedFieldMapper.Builder { private final Version indexCreatedVersion; - private final Parameter<Boolean> index = Parameter.indexParam(m -> toType(m).mappedFieldType.isSearchable(), true); - private final Parameter<Boolean> store = Parameter.storeParam(m -> toType(m).fieldType.stored(), false); + protected final Parameter<Boolean> index = Parameter.indexParam(m -> toType(m).mappedFieldType.isSearchable(), true); + protected final Parameter<Boolean> store = Parameter.storeParam(m -> toType(m).fieldType.stored(), false); final Parameter<SimilarityProvider> similarity = TextParams.similarity(m -> toType(m).similarity); final Parameter<String> indexOptions = TextParams.indexOptions(m -> toType(m).indexOptions); final Parameter<Boolean> norms = TextParams.norms(true, m -> toType(m).fieldType.omitNorms() == false); final Parameter<String> termVectors = TextParams.termVectors(m -> toType(m).termVectors); - final Parameter<Integer> positionIncrementGap = Parameter.intParam( "position_increment_gap", false, @@ -332,8 +331,8 @@ public static class Builder extends ParametrizedFieldMapper.Builder { .orElse(null) ).acceptsNull(); - private final Parameter<Float> boost = Parameter.boostParam(); - private final Parameter<Map<String, String>> meta = Parameter.metaParam(); + protected final Parameter<Float> boost = Parameter.boostParam(); + protected final Parameter<Map<String, String>> meta = Parameter.metaParam(); final TextParams.Analyzers analyzers; @@ -395,7 +394,7 @@ protected List<Parameter<?>> getParameters() { ); } - private TextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { + protected TextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer(); @@ -420,7 +419,7 @@ private TextFieldType buildFieldType(FieldType fieldType, BuilderContext context return ft; } - private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fieldType, TextFieldType tft) { + protected PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fieldType, TextFieldType tft) { if (indexPrefixes.get() == null) { return null; } @@ -454,7 +453,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi return new PrefixFieldMapper(pft, prefixFieldType); } - private PhraseFieldMapper buildPhraseMapper(FieldType fieldType, TextFieldType parent) { + protected PhraseFieldMapper buildPhraseMapper(FieldType fieldType, TextFieldType parent) { if (indexPhrases.get() == false) { return null; } @@ -683,7 +682,7 @@ public Query existsQuery(QueryShardContext context) { * * @opensearch.internal */ - private static final class PhraseFieldMapper extends FieldMapper { + protected static final class PhraseFieldMapper extends FieldMapper { PhraseFieldMapper(FieldType fieldType, PhraseFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); @@ -710,7 +709,7 @@ protected String contentType() { * * @opensearch.internal */ - private static final class PrefixFieldMapper extends FieldMapper { + protected static final class PrefixFieldMapper extends FieldMapper { protected PrefixFieldMapper(FieldType fieldType, PrefixFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); @@ -968,15 +967,15 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } - private final FieldType fieldType; + protected final FieldType fieldType; private final PrefixFieldMapper prefixFieldMapper; private final PhraseFieldMapper phraseFieldMapper; private final SimilarityProvider similarity; private final String indexOptions; private final String termVectors; private final int positionIncrementGap; - private final Version indexCreatedVersion; - private final IndexAnalyzers indexAnalyzers; + protected final Version indexCreatedVersion; + protected final IndexAnalyzers indexAnalyzers; private final FielddataFrequencyFilter freqFilter; protected TextFieldMapper( diff --git a/server/src/main/java/org/opensearch/index/mapper/TextSearchInfo.java b/server/src/main/java/org/opensearch/index/mapper/TextSearchInfo.java index 535fba6c39371..a3f5eed479b8f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextSearchInfo.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextSearchInfo.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.similarity.SimilarityProvider; @@ -41,8 +42,9 @@ /** * Encapsulates information about how to perform text searches over a field * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TextSearchInfo { private static final FieldType SIMPLE_MATCH_ONLY_FIELD_TYPE = new FieldType(); @@ -145,8 +147,9 @@ public boolean isTokenized() { /** * What sort of term vectors are available * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum TermVector { NONE, DOCS, diff --git a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java index d1cea3fe7f1b0..c6bdad8b8653c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.fetch.subphase.FetchFieldsPhase; import org.opensearch.search.lookup.SourceLookup; @@ -43,16 +44,17 @@ * A helper class for fetching field values during the {@link FetchFieldsPhase}. Each {@link MappedFieldType} * is in charge of defining a value fetcher through {@link MappedFieldType#valueFetcher}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ValueFetcher { /** * Given access to a document's _source, return this field's values. - * + * <p> * In addition to pulling out the values, they will be parsed into a standard form. * For example numeric field mappers make sure to parse the source value into a number * of the right type. - * + * <p> * Note that for array values, the order in which values are returned is undefined and * should not be relied on. * diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index fee0ed904b68a..7ecaed60735b4 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -32,11 +32,13 @@ package org.opensearch.index.merge; +import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,8 +47,9 @@ /** * Stores stats about a merge process * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class MergeStats implements Writeable, ToXContentFragment { private long total; @@ -65,9 +68,9 @@ public class MergeStats implements Writeable, ToXContentFragment { private long totalBytesPerSecAutoThrottle; - public MergeStats() { + private long unreferencedFileCleanUpsPerformed; - } + public MergeStats() {} public MergeStats(StreamInput in) throws IOException { total = in.readVLong(); @@ -81,6 +84,9 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); + } } public void add( @@ -133,6 +139,7 @@ public void addTotals(MergeStats mergeStats) { this.totalSizeInBytes += mergeStats.totalSizeInBytes; this.totalStoppedTimeInMillis += mergeStats.totalStoppedTimeInMillis; this.totalThrottledTimeInMillis += mergeStats.totalThrottledTimeInMillis; + addUnreferencedFileCleanUpStats(mergeStats.unreferencedFileCleanUpsPerformed); if (this.totalBytesPerSecAutoThrottle == Long.MAX_VALUE || mergeStats.totalBytesPerSecAutoThrottle == Long.MAX_VALUE) { this.totalBytesPerSecAutoThrottle = Long.MAX_VALUE; } else { @@ -140,6 +147,14 @@ public void addTotals(MergeStats mergeStats) { } } + public void addUnreferencedFileCleanUpStats(long unreferencedFileCleanUpsPerformed) { + this.unreferencedFileCleanUpsPerformed += unreferencedFileCleanUpsPerformed; + } + + public long getUnreferencedFileCleanUpsPerformed() { + return this.unreferencedFileCleanUpsPerformed; + } + /** * The total number of merges executed. */ @@ -240,6 +255,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); } builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); + builder.field(Fields.UNREFERENCED_FILE_CLEANUPS_PERFORMED, unreferencedFileCleanUpsPerformed); builder.endObject(); return builder; } @@ -267,6 +283,7 @@ static final class Fields { static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES = "total_auto_throttle_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; + static final String UNREFERENCED_FILE_CLEANUPS_PERFORMED = "unreferenced_file_cleanups_performed"; } @Override @@ -282,5 +299,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); + } } } diff --git a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java index 042890c238410..9fb857e33bfee 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractGeometryQueryBuilder.java @@ -35,24 +35,24 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.client.Client; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.SetOnce; import org.opensearch.common.geo.GeoJson; import org.opensearch.common.geo.GeometryIO; import org.opensearch.common.geo.GeometryParser; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index c45637f986d35..66c6ee115c3f0 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -36,15 +36,15 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.xcontent.SuggestingErrorOnUnknown; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.AbstractObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; @@ -298,7 +298,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I /** * For internal usage only! - * + * <p> * Extracts the inner hits from the query tree. * While it extracts inner hits, child inner hits are inlined into the inner hit builder they belong to. */ @@ -395,6 +395,6 @@ protected static void declareStandardFields(AbstractObjectParser<? extends Query @Override public final String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java index 9f62ccfe9f31f..c4d9437a60c75 100644 --- a/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BaseTermQueryBuilder.java @@ -34,9 +34,9 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index 21735da57daf1..c44a7ef6a397c 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -37,11 +37,11 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -427,4 +427,35 @@ private static boolean rewriteClauses( } return changed; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (mustClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST); + for (QueryBuilder mustClause : mustClauses) { + mustClause.visit(subVisitor); + } + } + if (shouldClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.SHOULD); + for (QueryBuilder shouldClause : shouldClauses) { + shouldClause.visit(subVisitor); + } + } + if (mustNotClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST_NOT); + for (QueryBuilder mustNotClause : mustNotClauses) { + mustNotClause.visit(subVisitor); + } + } + if (filterClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.FILTER); + for (QueryBuilder filterClause : filterClauses) { + filterClause.visit(subVisitor); + } + } + + } + } diff --git a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java index 26124b422f26f..1b52ae2f03605 100644 --- a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.queries.function.FunctionScoreQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -252,4 +253,15 @@ protected void extractInnerHitBuilders(Map<String, InnerHitContextBuilder> inner InnerHitContextBuilder.extractInnerHits(positiveQuery, innerHits); InnerHitContextBuilder.extractInnerHits(negativeQuery, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (positiveQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(positiveQuery); + } + if (negativeQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.SHOULD).accept(negativeQuery); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java index e127e5a8dbc4a..652cae86da0dc 100644 --- a/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/CommonTermsQueryBuilder.java @@ -42,9 +42,9 @@ import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java index 6a29ad8a0a401..b2764d29da80a 100644 --- a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; @@ -183,4 +184,11 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws protected void extractInnerHitBuilders(Map<String, InnerHitContextBuilder> innerHits) { InnerHitContextBuilder.extractInnerHits(filterBuilder, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.FILTER).accept(filterBuilder); + } + } diff --git a/server/src/main/java/org/opensearch/index/query/DateRangeIncludingNowQuery.java b/server/src/main/java/org/opensearch/index/query/DateRangeIncludingNowQuery.java index 2a7dbf314103d..001d77facb829 100644 --- a/server/src/main/java/org/opensearch/index/query/DateRangeIncludingNowQuery.java +++ b/server/src/main/java/org/opensearch/index/query/DateRangeIncludingNowQuery.java @@ -32,8 +32,8 @@ package org.opensearch.index.query; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; @@ -60,7 +60,7 @@ public Query getQuery() { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { return in; } diff --git a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java index e4fda385ead97..bd8ec62f6c43e 100644 --- a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java @@ -32,13 +32,14 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -246,4 +247,15 @@ protected void extractInnerHitBuilders(Map<String, InnerHitContextBuilder> inner InnerHitContextBuilder.extractInnerHits(query, innerHits); } } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (queries.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : queries) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java index 222b2cd4b1cd0..1d9f0479c6b17 100644 --- a/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/DistanceFeatureQueryBuilder.java @@ -33,13 +33,13 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 38960792b1dea..7fd83d5753512 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -38,12 +38,12 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.FieldNamesFieldMapper; diff --git a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java index 1a9623f0fd7dd..4e73d87b07b7a 100644 --- a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -34,12 +34,13 @@ import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; @@ -53,7 +54,9 @@ * @opensearch.internal */ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder<FieldMaskingSpanQueryBuilder> implements SpanQueryBuilder { - public static final ParseField SPAN_FIELD_MASKING_FIELD = new ParseField("span_field_masking", "field_masking_span"); + + public static final String NAME = "span_field_masking"; + public static final ParseField SPAN_FIELD_MASKING_FIELD = new ParseField(NAME, "field_masking_span"); private static final ParseField FIELD_FIELD = new ParseField("field"); private static final ParseField QUERY_FIELD = new ParseField("query"); @@ -207,4 +210,10 @@ protected boolean doEquals(FieldMaskingSpanQueryBuilder other) { public String getWriteableName() { return SPAN_FIELD_MASKING_FIELD.getPreferredName(); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(queryBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/FuzzyQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FuzzyQueryBuilder.java index 49d4c4f87d691..a25a426792e31 100644 --- a/server/src/main/java/org/opensearch/index/query/FuzzyQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FuzzyQueryBuilder.java @@ -35,13 +35,13 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java index 5cf3bf2de87c7..1fade8601e2a6 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -36,13 +36,13 @@ import org.apache.lucene.search.Query; import org.opensearch.OpenSearchParseException; import org.opensearch.common.Numbers; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -60,7 +60,7 @@ /** * Creates a Lucene query that will filter for all documents that lie within the specified * bounding box. - * + * <p> * This query can only operate on fields of type geo_point that have latitude and longitude * enabled. * diff --git a/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java index f43a088434fc0..8d126f19a204c 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoDistanceQueryBuilder.java @@ -34,17 +34,17 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Circle; diff --git a/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java index a230f09ecd374..47eafa3893384 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoPolygonQueryBuilder.java @@ -38,13 +38,13 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; diff --git a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java index 92768ff5f4c43..33b896a1d5163 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java @@ -34,12 +34,12 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.geo.parsers.ShapeParser; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -55,7 +55,7 @@ /** * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query. It * can be applied to any {@link MappedFieldType} that implements {@link GeoShapeQueryable}. - * + * <p> * GeoJson and WKT shape definitions are supported * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java index aad3e50a0acd5..bb3cd34ae629d 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java +++ b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java @@ -35,14 +35,14 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; /** * This enum is used to determine how to deal with invalid geo coordinates in geo related * queries: - * + * <p> * On STRICT validation invalid coordinates cause an exception to be thrown. * On IGNORE_MALFORMED invalid coordinates are being accepted. * On COERCE invalid coordinates are being corrected to the most likely valid coordinate. diff --git a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java index 245c7f2240ff6..d7ebdbff10adb 100644 --- a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java @@ -34,12 +34,12 @@ import org.apache.lucene.search.Query; import org.opensearch.Version; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.core.common.Strings; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/opensearch/index/query/InnerHitBuilder.java index 279b9bf07171b..1a2a554526e15 100644 --- a/server/src/main/java/org/opensearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/InnerHitBuilder.java @@ -32,13 +32,14 @@ package org.opensearch.index.query; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -66,7 +67,10 @@ /** * Query builder for inner hits + * + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InnerHitBuilder implements Writeable, ToXContentObject { public static final ParseField NAME_FIELD = new ParseField("name"); @@ -598,6 +602,6 @@ public static InnerHitBuilder fromXContent(XContentParser parser) throws IOExcep @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java b/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java index 0a13900d110ab..0e42e79f67d0c 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalBuilder.java @@ -39,12 +39,12 @@ import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.queries.intervals.IntervalMatchesIterator; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.queries.intervals.IntervalIterator; +import org.apache.lucene.queries.intervals.IntervalMatchesIterator; import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings; diff --git a/server/src/main/java/org/opensearch/index/query/IntervalMode.java b/server/src/main/java/org/opensearch/index/query/IntervalMode.java index 454d867b41da2..a2979135ac03f 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalMode.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalMode.java @@ -9,6 +9,7 @@ package org.opensearch.index.query; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * Mode for Text and Mapped Field Types * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum IntervalMode implements Writeable { ORDERED(0), UNORDERED(1), diff --git a/server/src/main/java/org/opensearch/index/query/IntervalQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/IntervalQueryBuilder.java index 421f174591954..125035ea5e95a 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalQueryBuilder.java @@ -32,9 +32,9 @@ package org.opensearch.index.query; +import org.apache.lucene.queries.intervals.IntervalQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.queries.intervals.IntervalQuery; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index b696539bbd366..5a57dfed14f69 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -41,13 +41,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; @@ -691,7 +691,7 @@ public static class Regexp extends IntervalsSourceProvider { /** * Constructor - * + * <p> * {@code flags} is Lucene's <a href="https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java#L391-L411">syntax flags</a> * and {@code caseInsensitive} enables Lucene's only <a href="https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java#L416">matching flag</a>. */ diff --git a/server/src/main/java/org/opensearch/index/query/LegacyGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/LegacyGeoShapeQueryProcessor.java index 189f42ae59020..c3437fbf4dda6 100644 --- a/server/src/main/java/org/opensearch/index/query/LegacyGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/LegacyGeoShapeQueryProcessor.java @@ -67,12 +67,13 @@ import org.opensearch.geometry.Rectangle; import org.opensearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Shape; import java.util.ArrayList; import java.util.List; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Shape; + import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; /** diff --git a/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java index fbf2b209dc6c7..c62ee0ac39584 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchAllQueryBuilder.java @@ -33,10 +33,10 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index 8f5b537ac8e08..7ceb17203e837 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -34,13 +34,13 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.support.QueryParsers; @@ -176,7 +176,7 @@ public String minimumShouldMatch() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchNoneQueryBuilder.java index 98a6a32de4847..17e84bc785206 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchNoneQueryBuilder.java @@ -33,10 +33,10 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java index 43e740a1d2867..6cdf6c6600304 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchPhraseQueryBuilder.java @@ -35,9 +35,9 @@ import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.search.MatchQuery; diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 59aee000b7792..5e9e6a3660e76 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -34,13 +34,13 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; -import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.support.QueryParsers; @@ -209,7 +209,7 @@ public String analyzer() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 7287634ecfacb..e6472afef2215 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -37,8 +37,8 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; -import org.opensearch.OpenSearchParseException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.termvectors.MultiTermVectorsItemResponse; @@ -48,22 +48,23 @@ import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.client.Client; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.MoreLikeThisQuery; +import org.opensearch.common.lucene.search.XMoreLikeThis; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.search.MoreLikeThisQuery; -import org.opensearch.common.lucene.search.XMoreLikeThis; -import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; @@ -86,7 +87,7 @@ /** * A more like this query that finds documents that are "like" the provided set of document(s). - * + * <p> * The documents are provided as a set of strings and/or a list of {@link Item}. * * @opensearch.internal @@ -235,7 +236,7 @@ public Item(@Nullable String index, XContentBuilder doc) { } if (in.readBoolean()) { doc = (BytesReference) in.readGenericValue(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -260,7 +261,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeGenericValue(doc); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); @@ -382,7 +383,7 @@ public static Item parse(XContentParser parser, Item item) throws IOException { item.id = parser.text(); } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { item.doc = BytesReference.bytes(jsonBuilder().copyCurrentStructure(parser)); - item.mediaType = XContentType.JSON; + item.mediaType = MediaTypeRegistry.JSON; } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.START_ARRAY) { List<String> fields = new ArrayList<>(); @@ -455,7 +456,7 @@ public String toString() { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.prettyPrint(); toXContent(builder, EMPTY_PARAMS); - return Strings.toString(builder); + return builder.toString(); } catch (Exception e) { return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; } diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 38e341d2be536..6227e5d2fa806 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -35,15 +35,15 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.Fuzziness; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.DeprecationHandler; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.support.QueryParsers; @@ -398,8 +398,8 @@ public int slop() { } @Deprecated - /** - * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". + /* + Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MultiMatchQueryBuilder fuzziness(Object fuzziness) { if (fuzziness != null) { diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 547974b2fd5fe..3f97b3918a126 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -49,18 +49,18 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.action.search.MaxScoreCollector; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.lucene.search.Queries; -import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.ObjectMapper; -import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; import org.opensearch.index.search.NestedHelper; +import org.opensearch.index.search.OpenSearchToParentBlockJoinQuery; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.subphase.InnerHitsContext; import org.opensearch.search.internal.SearchContext; @@ -318,11 +318,17 @@ protected Query doToQuery(QueryShardContext context) throws IOException { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } + BitSetProducer previousParentFilter = context.getParentFilter(); try { + context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); - innerQuery = this.query.toQuery(context); + try { + innerQuery = this.query.toQuery(context); + } finally { + context.nestedScope().previousLevel(); + } } finally { - context.nestedScope().previousLevel(); + context.setParentFilter(previousParentFilter); } // ToParentBlockJoinQuery requires that the inner query only matches documents diff --git a/server/src/main/java/org/opensearch/index/query/Operator.java b/server/src/main/java/org/opensearch/index/query/Operator.java index 31caca0f60caa..ee8c93ce76ecb 100644 --- a/server/src/main/java/org/opensearch/index/query/Operator.java +++ b/server/src/main/java/org/opensearch/index/query/Operator.java @@ -36,7 +36,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import java.io.IOException; import java.util.Locale; diff --git a/server/src/main/java/org/opensearch/index/query/ParsedQuery.java b/server/src/main/java/org/opensearch/index/query/ParsedQuery.java index 5d9f67ac365b9..1a4d45178e826 100644 --- a/server/src/main/java/org/opensearch/index/query/ParsedQuery.java +++ b/server/src/main/java/org/opensearch/index/query/ParsedQuery.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.search.Queries; import java.util.Map; @@ -42,8 +43,9 @@ /** * The result of parsing a query. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ParsedQuery { private final Query query; private final Map<String, Query> namedFilters; diff --git a/server/src/main/java/org/opensearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/PrefixQueryBuilder.java index 1956b50a39513..ffc748bffb66e 100644 --- a/server/src/main/java/org/opensearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/PrefixQueryBuilder.java @@ -36,16 +36,16 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.ConstantFieldType; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.support.QueryParsers; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index a40ccf427794a..0cdf7f31c2ebf 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.xcontent.ToXContentObject; @@ -41,8 +42,9 @@ /** * Foundation class for all OpenSearch query builders * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewriteable<QueryBuilder> { /** @@ -95,4 +97,13 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea default QueryBuilder rewrite(QueryRewriteContext queryShardContext) throws IOException { return this; } + + /** + * Recurse through the QueryBuilder tree, visiting any child QueryBuilder. + * @param visitor a query builder visitor to be called by each query builder in the tree. + */ + default void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + }; + } diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java new file mode 100644 index 0000000000000..b40dcca17e45b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.annotation.PublicApi; + +/** + * QueryBuilderVisitor is an interface to define Visitor Object to be traversed in QueryBuilder tree. + * + * @opensearch.api + */ +@PublicApi(since = "2.11.0") +public interface QueryBuilderVisitor { + + /** + * Accept method is called when the visitor accepts the queryBuilder object to be traversed in the query tree. + * @param qb is a queryBuilder object which is accepeted by the visitor. + */ + void accept(QueryBuilder qb); + + /** + * Fetches the child sub visitor from the main QueryBuilderVisitor Object. + * @param occur defines the occurrence of the result fetched from the search query in the final search result. + * @return a child queryBuilder Visitor Object. + */ + QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur); + + /** + * NoopQueryVisitor is a default implementation of QueryBuilderVisitor. + * When a user does not want to implement QueryBuilderVisitor and have to just pass an empty object then this class will be used. + * + */ + QueryBuilderVisitor NO_OP_VISITOR = new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + // Do nothing + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java index be384d8bf1a20..387d21830aa38 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java @@ -34,10 +34,10 @@ import org.apache.lucene.search.join.ScoreMode; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.geometry.Geometry; import org.opensearch.index.query.DistanceFeatureQueryBuilder.Origin; import org.opensearch.index.query.MoreLikeThisQueryBuilder.Item; diff --git a/server/src/main/java/org/opensearch/index/query/QueryParser.java b/server/src/main/java/org/opensearch/index/query/QueryParser.java index b8edcb92185c4..1eefb87f6e2b2 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryParser.java +++ b/server/src/main/java/org/opensearch/index/query/QueryParser.java @@ -32,8 +32,8 @@ package org.opensearch.index.query; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContent; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java index ab5406d5531bd..15a6d0b5a774e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java @@ -31,10 +31,11 @@ package org.opensearch.index.query; -import org.opensearch.action.ActionListener; import org.opensearch.client.Client; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; @@ -46,8 +47,9 @@ /** * Context object used to rewrite {@link QueryBuilder} instances into simplified version. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryRewriteContext { private final NamedXContentRegistry xContentRegistry; private final NamedWriteableRegistry writeableRegistry; diff --git a/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java new file mode 100644 index 0000000000000..3ba13bc7a2da4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.SetOnce; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Class to traverse the QueryBuilder tree and capture the query shape + */ +public final class QueryShapeVisitor implements QueryBuilderVisitor { + private final SetOnce<String> queryType = new SetOnce<>(); + private final Map<BooleanClause.Occur, List<QueryShapeVisitor>> childVisitors = new EnumMap<>(BooleanClause.Occur.class); + + @Override + public void accept(QueryBuilder qb) { + queryType.set(qb.getName()); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + // Should get called once per Occur value + if (childVisitors.containsKey(occur)) { + throw new IllegalStateException("child visitor already called for " + occur); + } + final List<QueryShapeVisitor> childVisitorList = new ArrayList<>(); + QueryBuilderVisitor childVisitorWrapper = new QueryBuilderVisitor() { + QueryShapeVisitor currentChild; + + @Override + public void accept(QueryBuilder qb) { + currentChild = new QueryShapeVisitor(); + childVisitorList.add(currentChild); + currentChild.accept(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return currentChild.getChildVisitor(occur); + } + }; + childVisitors.put(occur, childVisitorList); + return childVisitorWrapper; + } + + String toJson() { + StringBuilder outputBuilder = new StringBuilder("{\"type\":\"").append(queryType.get()).append("\""); + for (Map.Entry<BooleanClause.Occur, List<QueryShapeVisitor>> entry : childVisitors.entrySet()) { + outputBuilder.append(",\"").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append("\"["); + boolean first = true; + for (QueryShapeVisitor child : entry.getValue()) { + if (!first) { + outputBuilder.append(","); + } + outputBuilder.append(child.toJson()); + first = false; + } + outputBuilder.append("]"); + } + outputBuilder.append("}"); + return outputBuilder.toString(); + } + + public String prettyPrintTree(String indent) { + StringBuilder outputBuilder = new StringBuilder(indent).append(queryType.get()).append("\n"); + for (Map.Entry<BooleanClause.Occur, List<QueryShapeVisitor>> entry : childVisitors.entrySet()) { + outputBuilder.append(indent).append(" ").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append(":\n"); + for (QueryShapeVisitor child : entry.getValue()) { + outputBuilder.append(child.prettyPrintTree(indent + " ")); + } + } + return outputBuilder.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index a1fc4327997e8..f3b392559d33e 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -39,18 +39,19 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.similarities.Similarity; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.client.Client; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.SetOnce; import org.opensearch.common.TriFunction; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.analysis.IndexAnalyzers; @@ -90,8 +91,9 @@ /** * Context object used to create lucene queries on the shard level. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryShardContext extends QueryRewriteContext { private final ScriptService scriptService; @@ -115,6 +117,7 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; private final ValuesSourceRegistry valuesSourceRegistry; + private BitSetProducer parentFilter; public QueryShardContext( int shardId, @@ -253,7 +256,7 @@ private QueryShardContext( this.bitsetFilterCache = bitsetFilterCache; this.indexFieldDataService = indexFieldDataLookup; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); this.scriptService = scriptService; this.indexSettings = indexSettings; this.searcher = searcher; @@ -267,7 +270,7 @@ private void reset() { allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.lookup = null; this.namedQueries.clear(); - this.nestedScope = new NestedScope(); + this.nestedScope = new NestedScope(indexSettings); } public IndexAnalyzers getIndexAnalyzers() { @@ -420,7 +423,8 @@ public SearchLookup lookup() { if (this.lookup == null) { this.lookup = new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } return this.lookup; @@ -436,7 +440,8 @@ public SearchLookup newFetchLookup() { */ return new SearchLookup( getMapperService(), - (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup) + (fieldType, searchLookup) -> indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName(), searchLookup), + shardId ); } @@ -509,7 +514,7 @@ public final void freezeContext() { /** * This method fails if {@link #freezeContext()} is called before on this * context. This is used to <i>seal</i>. - * + * <p> * This methods and all methods that call it should be final to ensure that * setting the request as not cacheable and the freezing behaviour of this * class cannot be bypassed. This is important so we can trust when this @@ -622,4 +627,12 @@ public BitsetFilterCache getBitsetFilterCache() { public AggregationUsageService getUsageService() { return valuesSourceRegistry.getUsageService(); } + + public BitSetProducer getParentFilter() { + return parentFilter; + } + + public void setParentFilter(BitSetProducer parentFilter) { + this.parentFilter = parentFilter; + } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java index ebe314cebf1a0..3d8fbd5fc436d 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java @@ -36,14 +36,14 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.automaton.Operations; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.NamedAnalyzer; @@ -119,7 +119,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue * currently _ALL. Uses a TreeMap to hold the fields so boolean clauses are * always sorted in same order for generated Lucene query for easier * testing. - * + * <p> * Can be changed back to HashMap once https://issues.apache.org/jira/browse/LUCENE-6305 is fixed. */ private final Map<String, Float> fieldsAndWeights = new TreeMap<>(); diff --git a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java index 0f43a192cdd60..fdbef2c732361 100644 --- a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java @@ -35,14 +35,14 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -504,9 +504,9 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override protected Query doToQuery(QueryShardContext context) throws IOException { if (from == null && to == null) { - /** - * Open bounds on both side, we can rewrite to an exists query - * if the {@link FieldNamesFieldMapper} is enabled. + /* + Open bounds on both side, we can rewrite to an exists query + if the {@link FieldNamesFieldMapper} is enabled. */ final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context .getMapperService() diff --git a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java index 0cce2401df065..f0da4d5736c0f 100644 --- a/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RegexpQueryBuilder.java @@ -38,13 +38,13 @@ import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; +import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.BytesRefs; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; diff --git a/server/src/main/java/org/opensearch/index/query/Rewriteable.java b/server/src/main/java/org/opensearch/index/query/Rewriteable.java index ea884f720f4fc..bca719c6fe857 100644 --- a/server/src/main/java/org/opensearch/index/query/Rewriteable.java +++ b/server/src/main/java/org/opensearch/index/query/Rewriteable.java @@ -31,7 +31,8 @@ package org.opensearch.index.query; -import org.opensearch.action.ActionListener; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.ParsingException; import java.io.IOException; @@ -41,8 +42,9 @@ /** * A basic interface for rewriteable classes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Rewriteable<T> { int MAX_REWRITE_ROUNDS = 16; diff --git a/server/src/main/java/org/opensearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ScriptQueryBuilder.java index 916df7debc1bc..ded6fd0528c33 100644 --- a/server/src/main/java/org/opensearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ScriptQueryBuilder.java @@ -45,10 +45,10 @@ import org.apache.lucene.search.Weight; import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.script.FilterScript; diff --git a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java index 00758309fc0f0..598406b4e45f2 100644 --- a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java +++ b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java @@ -73,7 +73,7 @@ public SearchIndexNameMatcher( /** * Given an index pattern, checks whether it matches against the current shard. - * + * <p> * If this shard represents a remote shard target, then in order to match the pattern contain * the separator ':', and must match on both the cluster alias and index name. */ diff --git a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java index cf9f86b6f6341..57ae7dd0ea5e9 100644 --- a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java @@ -35,13 +35,13 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.Queries; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.core.common.Strings; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.search.QueryParserHelper; @@ -66,7 +66,7 @@ * <li>'{@code -}' negates a single token: {@code -token0} * <li>'{@code "}' creates phrases of terms: {@code "term1 term2 ..."} * <li>'{@code *}' at the end of terms specifies prefix query: {@code term*} - * <li>'{@code (}' and '{@code)}' specifies precedence: {@code token1 + (token2 | token3)} + * <li>'{@code (}' and '{@code )}' specifies precedence: {@code token1 + (token2 | token3)} * <li>'{@code ~}N' at the end of terms specifies fuzzy query: {@code term~1} * <li>'{@code ~}N' at the end of phrases specifies near/slop query: {@code "term1 term2"~5} * </ul> diff --git a/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java new file mode 100644 index 0000000000000..b0be20e417efe --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.SourceValueFetcher; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A query that matches against each document from the parent query by filtering using the source field values. + * Useful to query against field type which doesn't store positional data and field is not stored/computed dynamically. + */ +public class SourceFieldMatchQuery extends Query { + private final Query delegateQuery; + private final Query filter; + private final SearchLookup lookup; + private final MappedFieldType fieldType; + private final SourceValueFetcher valueFetcher; + private final QueryShardContext context; + + /** + * Constructs a SourceFieldMatchQuery. + * + * @param delegateQuery The parent query to use to find matches. + * @param filter The query used to filter further by running against field value fetched using _source field. + * @param fieldType The mapped field type. + * @param context The QueryShardContext to get lookup and valueFetcher + */ + public SourceFieldMatchQuery(Query delegateQuery, Query filter, MappedFieldType fieldType, QueryShardContext context) { + this.delegateQuery = delegateQuery; + this.filter = filter; + this.fieldType = fieldType; + this.context = context; + this.lookup = context.lookup(); + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "SourceFieldMatchQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + this.valueFetcher = (SourceValueFetcher) fieldType.valueFetcher(context, lookup, null); + } + + @Override + public void visit(QueryVisitor visitor) { + delegateQuery.visit(visitor); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + Query rewritten = indexSearcher.rewrite(delegateQuery); + if (rewritten == delegateQuery) { + return this; + } + return new SourceFieldMatchQuery(rewritten, filter, fieldType, context); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + + Weight weight = delegateQuery.createWeight(searcher, ScoreMode.TOP_DOCS, boost); + + return new ConstantScoreWeight(this, boost) { + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + + Scorer scorer = weight.scorer(context); + if (scorer == null) { + // none of the docs are matching + return null; + } + DocIdSetIterator approximation = scorer.iterator(); + LeafSearchLookup leafSearchLookup = lookup.getLeafSearchLookup(context); + TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { + + @Override + public boolean matches() { + leafSearchLookup.setDocument(approximation.docID()); + List<Object> values = valueFetcher.fetchValues(leafSearchLookup.source()); + // Missing fields won't count as match. Can we use a default value for missing field? + if (values.isEmpty()) { + return false; + } + MemoryIndex memoryIndex = new MemoryIndex(); + for (Object value : values) { + memoryIndex.addField(fieldType.name(), (String) value, fieldType.indexAnalyzer()); + } + float score = memoryIndex.search(filter); + return score > 0.0f; + } + + @Override + public float matchCost() { + // arbitrary cost + return 1000f; + } + }; + return new ConstantScoreScorer(this, score(), ScoreMode.TOP_DOCS, twoPhase); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + // It is fine to cache if delegate query weight is cacheable since additional logic here + // is just a filter on top of delegate query matches + return weight.isCacheable(ctx); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (sameClassAs(o) == false) { + return false; + } + SourceFieldMatchQuery other = (SourceFieldMatchQuery) o; + return Objects.equals(this.delegateQuery, other.delegateQuery) + && Objects.equals(this.filter, other.filter) + && Objects.equals(this.fieldType, other.fieldType) + && Objects.equals(this.context, other.context); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), delegateQuery, filter, fieldType, context); + } + + @Override + public String toString(String f) { + return "SourceFieldMatchQuery (delegate query: [ " + delegateQuery.toString(f) + " ], filter query: [ " + filter.toString(f) + "])"; + } +} diff --git a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java index ed4f5c6848b06..32a19ea3e9b50 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanContainingQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,11 @@ protected boolean doEquals(SpanContainingQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java index 7427b13463284..bcbc64ddf386d 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanFirstQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -186,4 +187,10 @@ protected boolean doEquals(SpanFirstQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(matchBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java index 9e458c994bf9e..96d03c91964e3 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java @@ -33,18 +33,19 @@ import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopTermsRewrite; +import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; @@ -213,4 +214,12 @@ protected boolean doEquals(SpanMultiTermQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (multiTermQueryBuilder != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(multiTermQueryBuilder); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index c86831fad0370..30a1c29c29126 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -34,13 +34,14 @@ import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; @@ -299,6 +300,17 @@ public String getWriteableName() { return NAME; } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (this.clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } + /** * SpanGapQueryBuilder enables gaps in a SpanNearQuery. * Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot @@ -326,7 +338,7 @@ public static class SpanGapQueryBuilder implements SpanQueryBuilder { * @param width The width of the gap introduced */ public SpanGapQueryBuilder(String fieldName, int width) { - if (org.opensearch.core.common.Strings.isEmpty(fieldName)) { + if (Strings.isEmpty(fieldName)) { throw new IllegalArgumentException("[span_gap] field name is null or empty"); } // lucene has not coded any restriction on value of width. @@ -445,7 +457,7 @@ public final int hashCode() { @Override public final String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } // copied from AbstractQueryBuilder diff --git a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java index 98e7f287749f5..59ec5b9d77fc8 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -284,4 +285,16 @@ protected boolean doEquals(SpanNotQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (include != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(include); + } + + if (exclude != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST_NOT).accept(exclude); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java index 2f63e6d7403f7..fae1e318c66bd 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,15 @@ protected boolean doEquals(SpanOrQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java index d97fbaf38fdae..a6108578da06c 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanTermQueryBuilder.java @@ -36,10 +36,10 @@ import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java index 5d02cc0026dfd..4d5a6dde61a70 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanWithinQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -197,4 +198,11 @@ protected boolean doEquals(SpanWithinQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermQueryBuilder.java index 21bc60646d535..02779bc916cde 100644 --- a/server/src/main/java/org/opensearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermQueryBuilder.java @@ -41,8 +41,8 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.ConstantFieldType; +import org.opensearch.index.mapper.MappedFieldType; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index c677bab0ea7e0..ac0ca3919ea38 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -37,19 +37,19 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.opensearch.action.ActionListener; import org.opensearch.action.get.GetRequest; import org.opensearch.client.Client; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ConstantFieldType; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsSetQueryBuilder.java index 940356563ab4d..e2cf7384ecac7 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsSetQueryBuilder.java @@ -42,12 +42,12 @@ import org.apache.lucene.search.LongValuesSource; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.BytesRefs; -import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexNumericFieldData; diff --git a/server/src/main/java/org/opensearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/WildcardQueryBuilder.java index 69b0190bfb700..d1fe4f0ba0264 100644 --- a/server/src/main/java/org/opensearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/WildcardQueryBuilder.java @@ -36,16 +36,16 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.ConstantFieldType; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.support.QueryParsers; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/WrapperQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/WrapperQueryBuilder.java index aab9adecb9a62..8a322b2f9e173 100644 --- a/server/src/main/java/org/opensearch/index/query/WrapperQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/WrapperQueryBuilder.java @@ -34,15 +34,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; @@ -175,7 +175,8 @@ protected boolean doEquals(WrapperQueryBuilder other) { @Override protected QueryBuilder doRewrite(QueryRewriteContext context) throws IOException { try ( - XContentParser qSourceParser = XContentFactory.xContent(source) + XContentParser qSourceParser = MediaTypeRegistry.xContent(source) + .xContent() .createParser(context.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, source) ) { diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java index 40b15eace2bad..1c693f9761240 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionBuilder.java @@ -36,23 +36,23 @@ import org.apache.lucene.search.Explanation; import org.opensearch.OpenSearchParseException; import org.opensearch.common.Nullable; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.LeafScoreFunction; import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexGeoPointFieldData; @@ -230,7 +230,8 @@ protected ScoreFunction doToFunction(QueryShardContext context) throws IOExcepti // EMPTY is safe because parseVariable doesn't use namedObject try ( InputStream stream = functionBytes.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(functionBytes)) + XContentParser parser = MediaTypeRegistry.xContentType(functionBytes) + .xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { scoreFunction = parseVariable(fieldName, parser, context, multiValueMode); @@ -559,6 +560,11 @@ public boolean needsScores() { protected NumericDoubleValues distance(LeafReaderContext context) { final SortedNumericDoubleValues doubleValues = fieldData.load(context).getDoubleValues(); return FieldData.replaceMissing(mode.select(new SortingNumericDoubleValues() { + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } + @Override public boolean advanceExact(int docId) throws IOException { if (doubleValues.advanceExact(docId)) { diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionParser.java b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionParser.java index 4bbb9e32730c4..f04d5c46740f7 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionParser.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/DecayFunctionParser.java @@ -32,11 +32,11 @@ package org.opensearch.index.query.functionscore; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.MultiValueMode; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java index b526dab025e55..7c69e345eb307 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ExponentialDecayFunctionBuilder.java @@ -34,9 +34,9 @@ import org.apache.lucene.search.Explanation; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lucene.search.function.Functions; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionBuilder.java index de22ae14528c2..28498735cb5a4 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionBuilder.java @@ -34,11 +34,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; +import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; -import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexNumericFieldData; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 1d15172852295..b3c797f11de6d 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -35,14 +35,14 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.CombineFunction; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.search.function.CombineFunction; -import org.opensearch.common.lucene.search.function.FunctionScoreQuery; -import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/GaussDecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/GaussDecayFunctionBuilder.java index 3ef28493ed146..89bf383ed85dc 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/GaussDecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/GaussDecayFunctionBuilder.java @@ -34,10 +34,10 @@ import org.apache.lucene.search.Explanation; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lucene.search.function.Functions; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/LinearDecayFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/LinearDecayFunctionBuilder.java index 0247af6acbd74..9205c658c0415 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/LinearDecayFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/LinearDecayFunctionBuilder.java @@ -34,9 +34,9 @@ import org.apache.lucene.search.Explanation; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lucene.search.function.Functions; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/RandomScoreFunctionBuilder.java index fa960099c7e3f..1726c44674d66 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -32,12 +32,12 @@ package org.opensearch.index.query.functionscore; import org.opensearch.common.Nullable; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.search.function.RandomScoreFunction; import org.opensearch.common.lucene.search.function.ScoreFunction; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.IdFieldMapper; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilder.java index c14c091bcdc48..50e01411b5760 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.index.query.functionscore; +import org.opensearch.common.lucene.search.function.ScoreFunction; +import org.opensearch.common.lucene.search.function.WeightFactorFunction; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.ScoreFunction; -import org.opensearch.common.lucene.search.function.WeightFactorFunction; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index a8c27d468a8f2..3dadaeada2e60 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -33,11 +33,11 @@ package org.opensearch.index.query.functionscore; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.ScoreFunction; +import org.opensearch.common.lucene.search.function.ScriptScoreFunction; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.ScoreFunction; -import org.opensearch.common.lucene.search.function.ScriptScoreFunction; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryShardContext; @@ -114,7 +114,7 @@ protected int doHashCode() { protected ScoreFunction doToFunction(QueryShardContext context) { try { ScoreScript.Factory factory = context.compile(script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); + ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup(), context.searcher()); return new ScriptScoreFunction( script, searchScript, diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 8d67a4be38dfb..fe9ad200d44f0 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -32,12 +32,13 @@ package org.opensearch.index.query.functionscore; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.OpenSearchException; +import org.opensearch.common.lucene.search.function.ScriptScoreQuery; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.ScriptScoreQuery; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -45,6 +46,7 @@ import org.opensearch.index.query.InnerHitContextBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.ScoreScript; @@ -187,7 +189,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { ); } ScoreScript.Factory factory = context.compile(script, ScoreScript.CONTEXT); - ScoreScript.LeafFactory scoreScriptFactory = factory.newFactory(script.getParams(), context.lookup()); + ScoreScript.LeafFactory scoreScriptFactory = factory.newFactory(script.getParams(), context.lookup(), context.searcher()); final QueryBuilder queryBuilder = this.query; Query query = queryBuilder.toQuery(context); return new ScriptScoreQuery( @@ -224,4 +226,12 @@ protected void extractInnerHitBuilders(Map<String, InnerHitContextBuilder> inner InnerHitContextBuilder.extractInnerHits(query(), innerHits); } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (query != null) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + subVisitor.accept(query); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java new file mode 100644 index 0000000000000..95fbecc53f4ae --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunction.java @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query.functionscore; + +import java.io.IOException; + +/** + * An interface representing a term frequency function used to compute document scores + * based on specific term frequency calculations. Implementations of this interface should + * provide a way to execute the term frequency function for a given document ID. + * + * @opensearch.internal + */ +public interface TermFrequencyFunction { + Object execute(int docId) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java new file mode 100644 index 0000000000000..9db58f0f78a30 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query.functionscore; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.queries.function.FunctionValues; +import org.apache.lucene.queries.function.valuesource.SumTotalTermFreqValueSource; +import org.apache.lucene.queries.function.valuesource.TermFreqValueSource; +import org.apache.lucene.queries.function.valuesource.TotalTermFreqValueSource; +import org.apache.lucene.search.IndexSearcher; +import org.opensearch.common.lucene.BytesRefs; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * A factory class for creating instances of {@link TermFrequencyFunction}. + * This class provides methods for creating different term frequency functions based on + * the specified function name, field, and term. Each term frequency function is designed + * to compute document scores based on specific term frequency calculations. + * + * @opensearch.internal + */ +public class TermFrequencyFunctionFactory { + public static TermFrequencyFunction createFunction( + TermFrequencyFunctionName functionName, + String field, + String term, + LeafReaderContext readerContext, + IndexSearcher indexSearcher + ) throws IOException { + switch (functionName) { + case TERM_FREQ: + TermFreqValueSource termFreqValueSource = new TermFreqValueSource(field, term, field, BytesRefs.toBytesRef(term)); + FunctionValues functionValues = termFreqValueSource.getValues(null, readerContext); + return docId -> functionValues.intVal(docId); + case TOTAL_TERM_FREQ: + TotalTermFreqValueSource totalTermFreqValueSource = new TotalTermFreqValueSource( + field, + term, + field, + BytesRefs.toBytesRef(term) + ); + Map<Object, Object> ttfContext = new HashMap<>(); + totalTermFreqValueSource.createWeight(ttfContext, indexSearcher); + functionValues = totalTermFreqValueSource.getValues(ttfContext, readerContext); + return docId -> functionValues.longVal(docId); + case SUM_TOTAL_TERM_FREQ: + SumTotalTermFreqValueSource sumTotalTermFreqValueSource = new SumTotalTermFreqValueSource(field); + Map<Object, Object> sttfContext = new HashMap<>(); + sumTotalTermFreqValueSource.createWeight(sttfContext, indexSearcher); + functionValues = sumTotalTermFreqValueSource.getValues(sttfContext, readerContext); + return docId -> functionValues.longVal(docId); + default: + throw new IllegalArgumentException("Unsupported function: " + functionName); + } + } + + /** + * An enumeration representing the names of supported term frequency functions. + */ + public enum TermFrequencyFunctionName { + TERM_FREQ("termFreq"), + TOTAL_TERM_FREQ("totalTermFreq"), + SUM_TOTAL_TERM_FREQ("sumTotalTermFreq"); + + private final String termFrequencyFunctionName; + + TermFrequencyFunctionName(String termFrequencyFunctionName) { + this.termFrequencyFunctionName = termFrequencyFunctionName; + } + + public String getTermFrequencyFunctionName() { + return termFrequencyFunctionName; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/WeightBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/WeightBuilder.java index 018e475c4a513..254607edd44d2 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/WeightBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/WeightBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.index.query.functionscore; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java index 4a09b11666c18..488768c32d17f 100644 --- a/server/src/main/java/org/opensearch/index/query/support/NestedScope.java +++ b/server/src/main/java/org/opensearch/index/query/support/NestedScope.java @@ -32,6 +32,8 @@ package org.opensearch.index.query.support; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ObjectMapper; import java.util.Deque; @@ -40,11 +42,17 @@ /** * During query parsing this keeps track of the current nested level. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class NestedScope { private final Deque<ObjectMapper> levelStack = new LinkedList<>(); + private final IndexSettings indexSettings; + + public NestedScope(IndexSettings indexSettings) { + this.indexSettings = indexSettings; + } /** * @return For the current nested level returns the object mapper that belongs to that @@ -58,7 +66,21 @@ public ObjectMapper getObjectMapper() { */ public ObjectMapper nextLevel(ObjectMapper level) { ObjectMapper previous = levelStack.peek(); - levelStack.push(level); + if (levelStack.size() < indexSettings.getMaxNestedQueryDepth()) { + levelStack.push(level); + } else { + throw new IllegalArgumentException( + "The depth of Nested Query is [" + + (levelStack.size() + 1) + + "] has exceeded " + + "the allowed maximum of [" + + indexSettings.getMaxNestedQueryDepth() + + "]. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_NESTED_QUERY_DEPTH_SETTING.getKey() + + "] index level setting." + ); + } return previous; } diff --git a/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java b/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java index 3a67283a60180..48fe75eb54280 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java +++ b/server/src/main/java/org/opensearch/index/recovery/RecoveryStats.java @@ -31,10 +31,11 @@ package org.opensearch.index.recovery; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,8 +47,9 @@ * Recovery related statistics, starting at the shard level and allowing aggregation to * indices and node level * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryStats implements ToXContentFragment, Writeable { private final AtomicInteger currentAsSource = new AtomicInteger(); diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java new file mode 100644 index 0000000000000..23bb4cea17a20 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -0,0 +1,321 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.recovery; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; +import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.indices.ShardLimitValidator; +import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.RestoreInfo; +import org.opensearch.snapshots.RestoreService; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + +/** + * Service responsible for restoring index data from remote store + * + * @opensearch.internal + */ +public class RemoteStoreRestoreService { + private static final Logger logger = LogManager.getLogger(RemoteStoreRestoreService.class); + + private final ClusterService clusterService; + + private final AllocationService allocationService; + + private final MetadataCreateIndexService createIndexService; + + private final MetadataIndexUpgradeService metadataIndexUpgradeService; + + private final ShardLimitValidator shardLimitValidator; + + private final RemoteClusterStateService remoteClusterStateService; + + public RemoteStoreRestoreService( + ClusterService clusterService, + AllocationService allocationService, + MetadataCreateIndexService createIndexService, + MetadataIndexUpgradeService metadataIndexUpgradeService, + ShardLimitValidator shardLimitValidator, + RemoteClusterStateService remoteClusterStateService + ) { + this.clusterService = clusterService; + this.allocationService = allocationService; + this.createIndexService = createIndexService; + this.metadataIndexUpgradeService = metadataIndexUpgradeService; + this.shardLimitValidator = shardLimitValidator; + this.remoteClusterStateService = remoteClusterStateService; + } + + /** + * Restores data from remote store for indices specified in the restore request. + * + * @param request restore request + * @param listener restore listener + */ + public void restore(RestoreRemoteStoreRequest request, final ActionListener<RestoreService.RestoreCompletionResponse> listener) { + clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask() { + String restoreUUID; + RestoreInfo restoreInfo = null; + + @Override + public ClusterState execute(ClusterState currentState) { + RemoteRestoreResult remoteRestoreResult = restore(currentState, null, request.restoreAllShards(), request.indices()); + restoreUUID = remoteRestoreResult.getRestoreUUID(); + restoreInfo = remoteRestoreResult.getRestoreInfo(); + return remoteRestoreResult.getClusterState(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn("failed to restore from remote store", e); + listener.onFailure(e); + } + + @Override + public TimeValue timeout() { + return request.clusterManagerNodeTimeout(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new RestoreService.RestoreCompletionResponse(restoreUUID, null, restoreInfo)); + } + }); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param restoreClusterUUID cluster UUID used to restore IndexMetadata + * @param restoreAllShards indicates if all shards of the index needs to be restored. This flag is ignored if remoteClusterUUID is provided + * @param indexNames list of indices to restore. This list is ignored if remoteClusterUUID is provided + * @return remote restore result + */ + public RemoteRestoreResult restore( + ClusterState currentState, + @Nullable String restoreClusterUUID, + boolean restoreAllShards, + String[] indexNames + ) { + Map<String, Tuple<Boolean, IndexMetadata>> indexMetadataMap = new HashMap<>(); + ClusterState remoteState = null; + boolean metadataFromRemoteStore = (restoreClusterUUID == null + || restoreClusterUUID.isEmpty() + || restoreClusterUUID.isBlank()) == false; + if (metadataFromRemoteStore) { + try { + // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to + // restore + if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { + throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + } + logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); + remoteState = remoteClusterStateService.getLatestClusterState(currentState.getClusterName().value(), restoreClusterUUID); + remoteState.getMetadata().getIndices().values().forEach(indexMetadata -> { + indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); + }); + } catch (Exception e) { + throw new IllegalStateException("Unable to restore remote index metadata", e); + } + } else { + for (String indexName : indexNames) { + IndexMetadata indexMetadata = currentState.metadata().index(indexName); + if (indexMetadata == null) { + logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); + } else if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) == false) { + logger.warn("Remote store is not enabled for index: {}", indexName); + } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster.", + indexName + ) + " Close the existing index." + ); + } else { + indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); + } + } + } + return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteState); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param indexMetadataMap map of index metadata to restore + * @param restoreAllShards indicates if all shards of the index needs to be restored + * @return remote restore result + */ + private RemoteRestoreResult executeRestore( + ClusterState currentState, + Map<String, Tuple<Boolean, IndexMetadata>> indexMetadataMap, + boolean restoreAllShards, + ClusterState remoteState + ) { + final String restoreUUID = UUIDs.randomBase64UUID(); + List<String> indicesToBeRestored = new ArrayList<>(); + int totalShards = 0; + boolean metadataFromRemoteStore = false; + ClusterState.Builder builder = ClusterState.builder(currentState); + Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); + for (Map.Entry<String, Tuple<Boolean, IndexMetadata>> indexMetadataEntry : indexMetadataMap.entrySet()) { + String indexName = indexMetadataEntry.getKey(); + IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); + metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + IndexMetadata updatedIndexMetadata = indexMetadata; + if (metadataFromRemoteStore == false && restoreAllShards) { + updatedIndexMetadata = IndexMetadata.builder(indexMetadata) + .state(IndexMetadata.State.OPEN) + .version(1 + indexMetadata.getVersion()) + .mappingVersion(1 + indexMetadata.getMappingVersion()) + .settingsVersion(1 + indexMetadata.getSettingsVersion()) + .aliasesVersion(1 + indexMetadata.getAliasesVersion()) + .build(); + } + + IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); + + if (metadataFromRemoteStore == false) { + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap = currentState.routingTable() + .index(indexName) + .shards() + .values() + .stream() + .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); + + RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( + restoreUUID, + updatedIndexMetadata.getCreationVersion(), + indexId + ); + + rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, indexShardRoutingTableMap, restoreAllShards); + } + + blocks.updateBlocks(updatedIndexMetadata); + mdBuilder.put(updatedIndexMetadata, true); + indicesToBeRestored.add(indexName); + totalShards += updatedIndexMetadata.getNumberOfShards(); + } + + if (remoteState != null) { + restoreGlobalMetadata(mdBuilder, remoteState.getMetadata()); + // Restore ClusterState version + logger.info("Restoring ClusterState with Remote State version [{}]", remoteState.version()); + builder.version(remoteState.version()); + } + + RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); + + RoutingTable rt = rtBuilder.build(); + ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); + if (metadataFromRemoteStore == false) { + updatedState = allocationService.reroute(updatedState, "restored from remote store"); + } + return RemoteRestoreResult.build(restoreUUID, restoreInfo, updatedState); + } + + private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMetadata) { + if (remoteMetadata.persistentSettings() != null) { + Settings settings = remoteMetadata.persistentSettings(); + clusterService.getClusterSettings().validateUpdate(settings); + mdBuilder.persistentSettings(settings); + } + if (remoteMetadata.templates() != null) { + for (final IndexTemplateMetadata cursor : remoteMetadata.templates().values()) { + mdBuilder.put(cursor); + } + } + if (remoteMetadata.customs() != null) { + for (final Map.Entry<String, Metadata.Custom> cursor : remoteMetadata.customs().entrySet()) { + if (RepositoriesMetadata.TYPE.equals(cursor.getKey()) == false) { + mdBuilder.putCustom(cursor.getKey(), cursor.getValue()); + } + } + } + Optional<RepositoriesMetadata> repositoriesMetadata = Optional.ofNullable(remoteMetadata.custom(RepositoriesMetadata.TYPE)); + repositoriesMetadata = repositoriesMetadata.map( + repositoriesMetadata1 -> new RepositoriesMetadata( + repositoriesMetadata1.repositories() + .stream() + .filter(repository -> SYSTEM_REPOSITORY_SETTING.get(repository.settings()) == false) + .collect(Collectors.toList()) + ) + ); + repositoriesMetadata.ifPresent(metadata -> mdBuilder.putCustom(RepositoriesMetadata.TYPE, metadata)); + } + + /** + * Result of a remote restore operation. + */ + public static class RemoteRestoreResult { + private final ClusterState clusterState; + private final RestoreInfo restoreInfo; + private final String restoreUUID; + + private RemoteRestoreResult(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + this.clusterState = clusterState; + this.restoreInfo = restoreInfo; + this.restoreUUID = restoreUUID; + } + + public static RemoteRestoreResult build(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + return new RemoteRestoreResult(restoreUUID, restoreInfo, clusterState); + } + + public ClusterState getClusterState() { + return clusterState; + } + + public RestoreInfo getRestoreInfo() { + return restoreInfo; + } + + public String getRestoreUUID() { + return restoreUUID; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 8a975020eadf5..489ac386f72a0 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.refresh; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,8 +46,9 @@ /** * Encapsulates stats for index refresh * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RefreshStats implements Writeable, ToXContentFragment { private long total; diff --git a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java index 1ed4df0fb4fe9..4dea9ddc71f71 100644 --- a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequest.java @@ -37,13 +37,13 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.replication.ReplicationRequest; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.Scroll; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestBuilder.java b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestBuilder.java index c32e31802ac35..ccba2d6f435f1 100644 --- a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestBuilder.java +++ b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; +import org.opensearch.action.ActionType; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.replication.ReplicationRequest; diff --git a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkIndexByScrollRequest.java b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkIndexByScrollRequest.java index 663f6136d2105..9d63658f4a67c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/AbstractBulkIndexByScrollRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/AbstractBulkIndexByScrollRequest.java @@ -36,8 +36,8 @@ import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.script.Script; -import org.opensearch.tasks.TaskId; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java index b96db441d6736..0f3714fb16754 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponse.java @@ -34,21 +34,21 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.bulk.BulkItemResponse.Failure; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.index.reindex.BulkByScrollTask.Status; import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; +import org.opensearch.index.reindex.BulkByScrollTask.Status; import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponseBuilder.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponseBuilder.java index 3a38d2552309c..f1a9d1322412c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponseBuilder.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollResponseBuilder.java @@ -32,11 +32,11 @@ package org.opensearch.index.reindex; -import org.opensearch.common.unit.TimeValue; import org.opensearch.action.bulk.BulkItemResponse.Failure; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; import org.opensearch.index.reindex.BulkByScrollTask.StatusBuilder; +import org.opensearch.index.reindex.ScrollableHitSource.SearchFailure; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java index 040ee61ccd37d..d7c0da4773fff 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java @@ -34,15 +34,16 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -51,7 +52,6 @@ import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import java.io.IOException; @@ -68,18 +68,18 @@ import static java.lang.Math.min; import static java.util.Collections.emptyList; import static org.opensearch.common.unit.TimeValue.timeValueNanos; -import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** * Task storing information about a currently running BulkByScroll request. - * + * <p> * When the request is not sliced, this task is the only task created, and starts an action to perform search requests. - * + * <p> * When the request is sliced, this task can either represent a coordinating task (using * {@link BulkByScrollTask#setWorkerCount(int)}) or a worker task that performs search queries (using * {@link BulkByScrollTask#setWorker(float, Integer)}). - * + * <p> * We don't always know if this task will be a leader or worker task when it's created, because if slices is set to "auto" it may * be either depending on the number of shards in the source indices. We figure that out when the request is handled and set it on this * class with {@link #setWorkerCount(int)} or {@link #setWorker(float, Integer)}. @@ -1048,9 +1048,9 @@ public static StatusOrException fromXContent(XContentParser parser) throws IOExc @Override public String toString() { if (exception != null) { - return "BulkByScrollTask{error=" + Strings.toString(XContentType.JSON, this) + "}"; + return "BulkByScrollTask{error=" + Strings.toString(MediaTypeRegistry.JSON, this) + "}"; } else { - return "BulkByScrollTask{status=" + Strings.toString(XContentType.JSON, this) + "}"; + return "BulkByScrollTask{status=" + Strings.toString(MediaTypeRegistry.JSON, this) + "}"; } } diff --git a/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java index b3ed0079ff4c2..55d018af46970 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/opensearch/index/reindex/ClientScrollableHitSource.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; @@ -45,12 +44,13 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.client.Client; import org.opensearch.client.ParentTaskAssigningClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.RoutingFieldMapper; import org.opensearch.search.SearchHit; import org.opensearch.threadpool.ThreadPool; @@ -62,7 +62,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; import static org.opensearch.common.unit.TimeValue.timeValueNanos; -import static org.opensearch.common.util.CollectionUtils.isEmpty; +import static org.opensearch.core.common.util.CollectionUtils.isEmpty; /** * A scrollable source of hits from a {@linkplain Client} instance. @@ -210,7 +210,7 @@ public BytesReference getSource() { @Override public MediaType getMediaType() { - return XContentHelper.xContentType(source); + return MediaTypeRegistry.xContentType(source); } @Override diff --git a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java index f3efa0040a0ea..4963080f5916c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java @@ -37,10 +37,10 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; -import org.opensearch.tasks.TaskId; import java.io.IOException; @@ -49,7 +49,7 @@ /** * Creates a new {@link DeleteByQueryRequest} that uses scrolling and bulk requests to delete all documents matching * the query. This can have performance as well as visibility implications. - * + * <p> * Delete-by-query now has the following semantics: * <ul> * <li>it's {@code non-atomic}, a delete-by-query may fail at any time while some documents matching the query have already been diff --git a/server/src/main/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskState.java b/server/src/main/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskState.java index 973ceeefe43b7..39ff9478cb13e 100644 --- a/server/src/main/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskState.java +++ b/server/src/main/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskState.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java index f6b4793f3b87a..393e01823024e 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java @@ -36,24 +36,24 @@ import org.opensearch.action.CompositeIndicesRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchRequest; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.VersionType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.script.Script; import org.opensearch.search.sort.SortOrder; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.io.InputStream; @@ -356,7 +356,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws request.getSearchRequest().indices(indices); } request.setRemoteInfo(buildRemoteInfo(source)); - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(parser.contentType()); builder.map(source); try ( InputStream stream = BytesReference.bytes(builder).streamInput(); diff --git a/server/src/main/java/org/opensearch/index/reindex/RejectAwareActionListener.java b/server/src/main/java/org/opensearch/index/reindex/RejectAwareActionListener.java index da3afb178300e..04e3d2a6c6c83 100644 --- a/server/src/main/java/org/opensearch/index/reindex/RejectAwareActionListener.java +++ b/server/src/main/java/org/opensearch/index/reindex/RejectAwareActionListener.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; import org.opensearch.common.CheckedConsumer; +import org.opensearch.core.action.ActionListener; import java.util.function.Consumer; diff --git a/server/src/main/java/org/opensearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/opensearch/index/reindex/RemoteInfo.java index 78032c193c866..37df0a6aef85d 100644 --- a/server/src/main/java/org/opensearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/opensearch/index/reindex/RemoteInfo.java @@ -33,11 +33,12 @@ package org.opensearch.index.reindex; import org.opensearch.common.Nullable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -45,7 +46,6 @@ import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/index/reindex/RetryListener.java b/server/src/main/java/org/opensearch/index/reindex/RetryListener.java index ad09080cfea2b..e46e675977268 100644 --- a/server/src/main/java/org/opensearch/index/reindex/RetryListener.java +++ b/server/src/main/java/org/opensearch/index/reindex/RetryListener.java @@ -34,9 +34,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.threadpool.ThreadPool; import java.util.Iterator; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Response> { +public class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Response> { private final Logger logger; private final Iterator<TimeValue> retries; private final ThreadPool threadPool; @@ -55,7 +55,7 @@ class RetryListener implements RejectAwareActionListener<ScrollableHitSource.Res private final ActionListener<ScrollableHitSource.Response> delegate; private int retryCount = 0; - RetryListener( + public RetryListener( Logger logger, ThreadPool threadPool, BackoffPolicy backoffPolicy, diff --git a/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java index 47e882fc04127..8b18a2c6b086e 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/opensearch/index/reindex/ScrollableHitSource.java @@ -35,23 +35,23 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.threadpool.ThreadPool; @@ -138,7 +138,7 @@ public void done(TimeValue extraKeepAlive) { public final void close(Runnable onCompletion) { String scrollId = this.scrollId.get(); - if (org.opensearch.core.common.Strings.hasLength(scrollId)) { + if (Strings.hasLength(scrollId)) { clearScroll(scrollId, () -> cleanup(onCompletion)); } else { cleanup(onCompletion); @@ -499,7 +499,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } } diff --git a/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java index 6601fc3a7513f..d47ed239c22f2 100644 --- a/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/UpdateByQueryRequest.java @@ -37,10 +37,10 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; -import org.opensearch.tasks.TaskId; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java b/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java index e925e57784aa3..c9661c6e00c64 100644 --- a/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/server/src/main/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.RunOnce; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java deleted file mode 100644 index 3f1161f0c5e03..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.shard.IndexEventListener; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.BiConsumer; - -/** - * Service used to validate if the incoming indexing request should be rejected based on the {@link RemoteRefreshSegmentTracker}. - * - * @opensearch.internal - */ -public class RemoteRefreshSegmentPressureService implements IndexEventListener { - - private static final Logger logger = LogManager.getLogger(RemoteRefreshSegmentPressureService.class); - - /** - * Keeps map of remote-backed index shards and their corresponding backpressure tracker. - */ - private final Map<ShardId, RemoteRefreshSegmentTracker> trackerMap = ConcurrentCollections.newConcurrentMap(); - - /** - * Remote refresh segment pressure settings which is used for creation of the backpressure tracker and as well as rejection. - */ - private final RemoteRefreshSegmentPressureSettings pressureSettings; - - private final List<LagValidator> lagValidators; - - @Inject - public RemoteRefreshSegmentPressureService(ClusterService clusterService, Settings settings) { - pressureSettings = new RemoteRefreshSegmentPressureSettings(clusterService, settings, this); - lagValidators = Arrays.asList( - new ConsecutiveFailureValidator(pressureSettings), - new BytesLagValidator(pressureSettings), - new TimeLagValidator(pressureSettings) - ); - } - - /** - * Get {@code RemoteRefreshSegmentTracker} only if the underlying Index has remote segments integration enabled. - * - * @param shardId shard id - * @return the tracker if index is remote-backed, else null. - */ - public RemoteRefreshSegmentTracker getRemoteRefreshSegmentTracker(ShardId shardId) { - return trackerMap.get(shardId); - } - - @Override - public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { - return; - } - ShardId shardId = indexShard.shardId(); - trackerMap.put( - shardId, - new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ) - ); - logger.trace("Created tracker for shardId={}", shardId); - } - - @Override - public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = trackerMap.remove(shardId); - if (remoteRefreshSegmentTracker != null) { - logger.trace("Deleted tracker for shardId={}", shardId); - } - } - - /** - * Check if remote refresh segments backpressure is enabled. This is backed by a cluster level setting. - * - * @return true if enabled, else false. - */ - public boolean isSegmentsUploadBackpressureEnabled() { - return pressureSettings.isRemoteRefreshSegmentPressureEnabled(); - } - - /** - * Validates if segments are lagging more than the limits. If yes, it would lead to rejections of the requests. - * - * @param shardId shardId for which the validation needs to be done. - */ - public void validateSegmentsUploadLag(ShardId shardId) { - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = getRemoteRefreshSegmentTracker(shardId); - // condition 1 - This will be null for non-remote backed indexes - // condition 2 - This will be zero if the remote store is - if (remoteRefreshSegmentTracker == null || remoteRefreshSegmentTracker.getRefreshSeqNoLag() == 0) { - return; - } - - for (LagValidator lagValidator : lagValidators) { - if (lagValidator.validate(remoteRefreshSegmentTracker, shardId) == false) { - remoteRefreshSegmentTracker.incrementRejectionCount(lagValidator.name()); - throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteRefreshSegmentTracker, shardId)); - } - } - } - - void updateUploadBytesMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadBytesMovingAverageWindowSize, updatedSize); - } - - void updateUploadBytesPerSecMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadBytesPerSecMovingAverageWindowSize, updatedSize); - } - - void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadTimeMsMovingAverageWindowSize, updatedSize); - } - - void updateMovingAverageWindowSize(BiConsumer<RemoteRefreshSegmentTracker, Integer> biConsumer, int updatedSize) { - trackerMap.values().forEach(tracker -> biConsumer.accept(tracker, updatedSize)); - } - - /** - * Abstract class for validating if lag is acceptable or not. - * - * @opensearch.internal - */ - private static abstract class LagValidator { - - final RemoteRefreshSegmentPressureSettings pressureSettings; - - private LagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { - this.pressureSettings = pressureSettings; - } - - /** - * Validates the lag and returns value accordingly. - * - * @param pressureTracker tracker which holds information about the shard. - * @param shardId shard id of the {@code IndexShard} currently being validated. - * @return true if successfully validated that lag is acceptable. - */ - abstract boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId); - - /** - * Returns the name of the lag validator. - * - * @return the name using class name. - */ - abstract String name(); - - abstract String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId); - } - - /** - * Check if the remote store is lagging more than the upload bytes average multiplied by a variance factor - * - * @opensearch.internal - */ - private static class BytesLagValidator extends LagValidator { - - private static final String NAME = "bytes_lag"; - - private BytesLagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { - super(pressureSettings); - } - - @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - if (pressureTracker.getRefreshSeqNoLag() <= 1) { - return true; - } - if (pressureTracker.isUploadBytesAverageReady() == false) { - logger.trace("upload bytes moving average is not ready"); - return true; - } - double dynamicBytesLagThreshold = pressureTracker.getUploadBytesAverage() * pressureSettings.getBytesLagVarianceFactor(); - long bytesLag = pressureTracker.getBytesLag(); - return bytesLag <= dynamicBytesLagThreshold; - } - - @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - double dynamicBytesLagThreshold = pressureTracker.getUploadBytesAverage() * pressureSettings.getBytesLagVarianceFactor(); - return String.format( - Locale.ROOT, - "rejected execution on primary shard:%s due to remote segments lagging behind local segments." - + "bytes_lag:%s dynamic_bytes_lag_threshold:%s", - shardId, - pressureTracker.getBytesLag(), - dynamicBytesLagThreshold - ); - } - - @Override - String name() { - return NAME; - } - } - - /** - * Check if the remote store is lagging more than the upload time average multiplied by a variance factor - * - * @opensearch.internal - */ - private static class TimeLagValidator extends LagValidator { - - private static final String NAME = "time_lag"; - - private TimeLagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { - super(pressureSettings); - } - - @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - if (pressureTracker.getRefreshSeqNoLag() <= 1) { - return true; - } - if (pressureTracker.isUploadTimeMsAverageReady() == false) { - logger.trace("upload time moving average is not ready"); - return true; - } - long timeLag = pressureTracker.getTimeMsLag(); - double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMsAverage() * pressureSettings.getUploadTimeLagVarianceFactor(); - return timeLag <= dynamicTimeLagThreshold; - } - - @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMsAverage() * pressureSettings.getUploadTimeLagVarianceFactor(); - return String.format( - Locale.ROOT, - "rejected execution on primary shard:%s due to remote segments lagging behind local segments." - + "time_lag:%s ms dynamic_time_lag_threshold:%s ms", - shardId, - pressureTracker.getTimeMsLag(), - dynamicTimeLagThreshold - ); - } - - @Override - String name() { - return NAME; - } - } - - /** - * Check if consecutive failure limit has been breached - * - * @opensearch.internal - */ - private static class ConsecutiveFailureValidator extends LagValidator { - - private static final String NAME = "consecutive_failures_lag"; - - private ConsecutiveFailureValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { - super(pressureSettings); - } - - @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - int failureStreakCount = pressureTracker.getConsecutiveFailureCount(); - int minConsecutiveFailureThreshold = pressureSettings.getMinConsecutiveFailuresLimit(); - return failureStreakCount <= minConsecutiveFailureThreshold; - } - - @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { - return String.format( - Locale.ROOT, - "rejected execution on primary shard:%s due to remote segments lagging behind local segments." - + "failure_streak_count:%s min_consecutive_failure_threshold:%s", - shardId, - pressureTracker.getConsecutiveFailureCount(), - pressureSettings.getMinConsecutiveFailuresLimit() - ); - } - - @Override - String name() { - return NAME; - } - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettings.java b/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettings.java deleted file mode 100644 index 2a098b8f7a89b..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettings.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; - -/** - * Settings related to back pressure on account of segments upload failures / lags. - * - * @opensearch.internal - */ -public class RemoteRefreshSegmentPressureSettings { - - private static class Defaults { - private static final double BYTES_LAG_VARIANCE_FACTOR = 10.0; - private static final double UPLOAD_TIME_LAG_VARIANCE_FACTOR = 10.0; - private static final double VARIANCE_FACTOR_MIN_VALUE = 1.0; - private static final int MIN_CONSECUTIVE_FAILURES_LIMIT = 5; - private static final int MIN_CONSECUTIVE_FAILURES_LIMIT_MIN_VALUE = 1; - private static final int UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE = 5; - } - - public static final Setting<Boolean> REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED = Setting.boolSetting( - "remote_store.segment.pressure.enabled", - false, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Double> BYTES_LAG_VARIANCE_FACTOR = Setting.doubleSetting( - "remote_store.segment.pressure.bytes_lag.variance_factor", - Defaults.BYTES_LAG_VARIANCE_FACTOR, - Defaults.VARIANCE_FACTOR_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Double> UPLOAD_TIME_LAG_VARIANCE_FACTOR = Setting.doubleSetting( - "remote_store.segment.pressure.time_lag.variance_factor", - Defaults.UPLOAD_TIME_LAG_VARIANCE_FACTOR, - Defaults.VARIANCE_FACTOR_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Integer> MIN_CONSECUTIVE_FAILURES_LIMIT = Setting.intSetting( - "remote_store.segment.pressure.consecutive_failures.limit", - Defaults.MIN_CONSECUTIVE_FAILURES_LIMIT, - Defaults.MIN_CONSECUTIVE_FAILURES_LIMIT_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Integer> UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_bytes_moving_average_window_size", - Defaults.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Integer> UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_bytes_per_sec_moving_average_window_size", - Defaults.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting<Integer> UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_time_moving_average_window_size", - Defaults.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - private volatile boolean remoteRefreshSegmentPressureEnabled; - - private volatile long minRefreshSeqNoLagLimit; - - private volatile double bytesLagVarianceFactor; - - private volatile double uploadTimeLagVarianceFactor; - - private volatile int minConsecutiveFailuresLimit; - - private volatile int uploadBytesMovingAverageWindowSize; - - private volatile int uploadBytesPerSecMovingAverageWindowSize; - - private volatile int uploadTimeMovingAverageWindowSize; - - public RemoteRefreshSegmentPressureSettings( - ClusterService clusterService, - Settings settings, - RemoteRefreshSegmentPressureService remoteUploadPressureService - ) { - ClusterSettings clusterSettings = clusterService.getClusterSettings(); - - this.remoteRefreshSegmentPressureEnabled = REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.get(settings); - clusterSettings.addSettingsUpdateConsumer(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, this::setRemoteRefreshSegmentPressureEnabled); - - this.bytesLagVarianceFactor = BYTES_LAG_VARIANCE_FACTOR.get(settings); - clusterSettings.addSettingsUpdateConsumer(BYTES_LAG_VARIANCE_FACTOR, this::setBytesLagVarianceFactor); - - this.uploadTimeLagVarianceFactor = UPLOAD_TIME_LAG_VARIANCE_FACTOR.get(settings); - clusterSettings.addSettingsUpdateConsumer(UPLOAD_TIME_LAG_VARIANCE_FACTOR, this::setUploadTimeLagVarianceFactor); - - this.minConsecutiveFailuresLimit = MIN_CONSECUTIVE_FAILURES_LIMIT.get(settings); - clusterSettings.addSettingsUpdateConsumer(MIN_CONSECUTIVE_FAILURES_LIMIT, this::setMinConsecutiveFailuresLimit); - - this.uploadBytesMovingAverageWindowSize = UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadBytesMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer(UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, this::setUploadBytesMovingAverageWindowSize); - - this.uploadBytesPerSecMovingAverageWindowSize = UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadBytesPerSecMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - this::setUploadBytesPerSecMovingAverageWindowSize - ); - - this.uploadTimeMovingAverageWindowSize = UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadTimeMsMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer(UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, this::setUploadTimeMovingAverageWindowSize); - } - - public boolean isRemoteRefreshSegmentPressureEnabled() { - return remoteRefreshSegmentPressureEnabled; - } - - public void setRemoteRefreshSegmentPressureEnabled(boolean remoteRefreshSegmentPressureEnabled) { - this.remoteRefreshSegmentPressureEnabled = remoteRefreshSegmentPressureEnabled; - } - - public long getMinRefreshSeqNoLagLimit() { - return minRefreshSeqNoLagLimit; - } - - public void setMinRefreshSeqNoLagLimit(long minRefreshSeqNoLagLimit) { - this.minRefreshSeqNoLagLimit = minRefreshSeqNoLagLimit; - } - - public double getBytesLagVarianceFactor() { - return bytesLagVarianceFactor; - } - - public void setBytesLagVarianceFactor(double bytesLagVarianceFactor) { - this.bytesLagVarianceFactor = bytesLagVarianceFactor; - } - - public double getUploadTimeLagVarianceFactor() { - return uploadTimeLagVarianceFactor; - } - - public void setUploadTimeLagVarianceFactor(double uploadTimeLagVarianceFactor) { - this.uploadTimeLagVarianceFactor = uploadTimeLagVarianceFactor; - } - - public int getMinConsecutiveFailuresLimit() { - return minConsecutiveFailuresLimit; - } - - public void setMinConsecutiveFailuresLimit(int minConsecutiveFailuresLimit) { - this.minConsecutiveFailuresLimit = minConsecutiveFailuresLimit; - } - - public int getUploadBytesMovingAverageWindowSize() { - return uploadBytesMovingAverageWindowSize; - } - - public void setUploadBytesMovingAverageWindowSize(int uploadBytesMovingAverageWindowSize) { - this.uploadBytesMovingAverageWindowSize = uploadBytesMovingAverageWindowSize; - } - - public int getUploadBytesPerSecMovingAverageWindowSize() { - return uploadBytesPerSecMovingAverageWindowSize; - } - - public void setUploadBytesPerSecMovingAverageWindowSize(int uploadBytesPerSecMovingAverageWindowSize) { - this.uploadBytesPerSecMovingAverageWindowSize = uploadBytesPerSecMovingAverageWindowSize; - } - - public int getUploadTimeMovingAverageWindowSize() { - return uploadTimeMovingAverageWindowSize; - } - - public void setUploadTimeMovingAverageWindowSize(int uploadTimeMovingAverageWindowSize) { - this.uploadTimeMovingAverageWindowSize = uploadTimeMovingAverageWindowSize; - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java deleted file mode 100644 index 332b0d1698800..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java +++ /dev/null @@ -1,618 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.MovingAverage; -import org.opensearch.common.util.Streak; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.core.index.shard.ShardId; - -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -/** - * Keeps track of remote refresh which happens in {@link org.opensearch.index.shard.RemoteStoreRefreshListener}. This consist of multiple critical metrics. - * - * @opensearch.internal - */ -public class RemoteRefreshSegmentTracker { - - /** - * ShardId for which this instance tracks the remote segment upload metadata. - */ - private final ShardId shardId; - - /** - * Every refresh is assigned a sequence number. This is the sequence number of the most recent refresh. - */ - private volatile long localRefreshSeqNo; - - /** - * The refresh time of the most recent refresh. - */ - private volatile long localRefreshTimeMs; - - /** - * The refresh time(clock) of the most recent refresh. - */ - private volatile long localRefreshClockTimeMs; - - /** - * Sequence number of the most recent remote refresh. - */ - private volatile long remoteRefreshSeqNo; - - /** - * The refresh time of most recent remote refresh. - */ - private volatile long remoteRefreshTimeMs; - - /** - * The refresh time(clock) of most recent remote refresh. - */ - private volatile long remoteRefreshClockTimeMs; - - /** - * Keeps the seq no lag computed so that we do not compute it for every request. - */ - private volatile long refreshSeqNoLag; - - /** - * Keeps the time (ms) lag computed so that we do not compute it for every request. - */ - private volatile long timeMsLag; - - /** - * Keeps track of the total bytes of segment files which were uploaded to remote store during last successful remote refresh - */ - private volatile long lastSuccessfulRemoteRefreshBytes; - - /** - * Cumulative sum of size in bytes of segment files for which upload has started during remote refresh. - */ - private volatile long uploadBytesStarted; - - /** - * Cumulative sum of size in bytes of segment files for which upload has failed during remote refresh. - */ - private volatile long uploadBytesFailed; - - /** - * Cumulative sum of size in bytes of segment files for which upload has succeeded during remote refresh. - */ - private volatile long uploadBytesSucceeded; - - /** - * Cumulative sum of count of remote refreshes that have started. - */ - private volatile long totalUploadsStarted; - - /** - * Cumulative sum of count of remote refreshes that have failed. - */ - private volatile long totalUploadsFailed; - - /** - * Cumulative sum of count of remote refreshes that have succeeded. - */ - private volatile long totalUploadsSucceeded; - - /** - * Cumulative sum of rejection counts for this shard. - */ - private final AtomicLong rejectionCount = new AtomicLong(); - - /** - * Keeps track of rejection count with each rejection reason. - */ - private final Map<String, AtomicLong> rejectionCountMap = ConcurrentCollections.newConcurrentMap(); - - /** - * Map of name to size of the segment files created as part of the most recent refresh. - */ - private volatile Map<String, Long> latestLocalFileNameLengthMap; - - /** - * Set of names of segment files that were uploaded as part of the most recent remote refresh. - */ - private final Set<String> latestUploadedFiles = new HashSet<>(); - - /** - * Keeps the bytes lag computed so that we do not compute it for every request. - */ - private volatile long bytesLag; - - /** - * Holds count of consecutive failures until last success. Gets reset to zero if there is a success. - */ - private final Streak failures = new Streak(); - - /** - * Provides moving average over the last N total size in bytes of segment files uploaded as part of remote refresh. - * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference<MovingAverage> uploadBytesMovingAverageReference; - - /** - * This lock object is used for making sure we do not miss any data - */ - private final Object uploadBytesMutex = new Object(); - - /** - * Provides moving average over the last N upload speed (in bytes/s) of segment files uploaded as part of remote refresh. - * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference<MovingAverage> uploadBytesPerSecMovingAverageReference; - - private final Object uploadBytesPerSecMutex = new Object(); - - /** - * Provides moving average over the last N overall upload time (in nanos) as part of remote refresh.N is window size. - * Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference<MovingAverage> uploadTimeMsMovingAverageReference; - - private final Object uploadTimeMsMutex = new Object(); - - public RemoteRefreshSegmentTracker( - ShardId shardId, - int uploadBytesMovingAverageWindowSize, - int uploadBytesPerSecMovingAverageWindowSize, - int uploadTimeMsMovingAverageWindowSize - ) { - this.shardId = shardId; - // Both the local refresh time and remote refresh time are set with current time to give consistent view of time lag when it arises. - long currentClockTimeMs = System.currentTimeMillis(); - long currentTimeMs = System.nanoTime() / 1_000_000L; - localRefreshTimeMs = currentTimeMs; - remoteRefreshTimeMs = currentTimeMs; - localRefreshClockTimeMs = currentClockTimeMs; - remoteRefreshClockTimeMs = currentClockTimeMs; - uploadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadBytesMovingAverageWindowSize)); - uploadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadBytesPerSecMovingAverageWindowSize)); - uploadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadTimeMsMovingAverageWindowSize)); - - latestLocalFileNameLengthMap = new HashMap<>(); - } - - ShardId getShardId() { - return shardId; - } - - public long getLocalRefreshSeqNo() { - return localRefreshSeqNo; - } - - public void updateLocalRefreshSeqNo(long localRefreshSeqNo) { - assert localRefreshSeqNo >= this.localRefreshSeqNo : "newLocalRefreshSeqNo=" - + localRefreshSeqNo - + " < " - + "currentLocalRefreshSeqNo=" - + this.localRefreshSeqNo; - this.localRefreshSeqNo = localRefreshSeqNo; - computeRefreshSeqNoLag(); - } - - public long getLocalRefreshTimeMs() { - return localRefreshTimeMs; - } - - public long getLocalRefreshClockTimeMs() { - return localRefreshClockTimeMs; - } - - public void updateLocalRefreshTimeMs(long localRefreshTimeMs) { - assert localRefreshTimeMs >= this.localRefreshTimeMs : "newLocalRefreshTimeMs=" - + localRefreshTimeMs - + " < " - + "currentLocalRefreshTimeMs=" - + this.localRefreshTimeMs; - this.localRefreshTimeMs = localRefreshTimeMs; - computeTimeMsLag(); - } - - public void updateLocalRefreshClockTimeMs(long localRefreshClockTimeMs) { - this.localRefreshClockTimeMs = localRefreshClockTimeMs; - } - - long getRemoteRefreshSeqNo() { - return remoteRefreshSeqNo; - } - - public void updateRemoteRefreshSeqNo(long remoteRefreshSeqNo) { - assert remoteRefreshSeqNo >= this.remoteRefreshSeqNo : "newRemoteRefreshSeqNo=" - + remoteRefreshSeqNo - + " < " - + "currentRemoteRefreshSeqNo=" - + this.remoteRefreshSeqNo; - this.remoteRefreshSeqNo = remoteRefreshSeqNo; - computeRefreshSeqNoLag(); - } - - long getRemoteRefreshTimeMs() { - return remoteRefreshTimeMs; - } - - long getRemoteRefreshClockTimeMs() { - return remoteRefreshClockTimeMs; - } - - public void updateRemoteRefreshTimeMs(long remoteRefreshTimeMs) { - assert remoteRefreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" - + remoteRefreshTimeMs - + " < " - + "currentRemoteRefreshTimeMs=" - + this.remoteRefreshTimeMs; - this.remoteRefreshTimeMs = remoteRefreshTimeMs; - computeTimeMsLag(); - } - - public void updateRemoteRefreshClockTimeMs(long remoteRefreshClockTimeMs) { - this.remoteRefreshClockTimeMs = remoteRefreshClockTimeMs; - } - - private void computeRefreshSeqNoLag() { - refreshSeqNoLag = localRefreshSeqNo - remoteRefreshSeqNo; - } - - public long getRefreshSeqNoLag() { - return refreshSeqNoLag; - } - - private void computeTimeMsLag() { - timeMsLag = localRefreshTimeMs - remoteRefreshTimeMs; - } - - public long getTimeMsLag() { - return timeMsLag; - } - - public long getBytesLag() { - return bytesLag; - } - - public long getUploadBytesStarted() { - return uploadBytesStarted; - } - - public void addUploadBytesStarted(long size) { - uploadBytesStarted += size; - } - - public long getUploadBytesFailed() { - return uploadBytesFailed; - } - - public void addUploadBytesFailed(long size) { - uploadBytesFailed += size; - } - - public long getUploadBytesSucceeded() { - return uploadBytesSucceeded; - } - - public void addUploadBytesSucceeded(long size) { - uploadBytesSucceeded += size; - } - - public long getInflightUploadBytes() { - return uploadBytesStarted - uploadBytesFailed - uploadBytesSucceeded; - } - - public long getTotalUploadsStarted() { - return totalUploadsStarted; - } - - public void incrementTotalUploadsStarted() { - totalUploadsStarted += 1; - } - - public long getTotalUploadsFailed() { - return totalUploadsFailed; - } - - public void incrementTotalUploadsFailed() { - totalUploadsFailed += 1; - failures.record(true); - } - - public long getTotalUploadsSucceeded() { - return totalUploadsSucceeded; - } - - public void incrementTotalUploadsSucceeded() { - totalUploadsSucceeded += 1; - failures.record(false); - } - - public long getInflightUploads() { - return totalUploadsStarted - totalUploadsFailed - totalUploadsSucceeded; - } - - public long getRejectionCount() { - return rejectionCount.get(); - } - - void incrementRejectionCount() { - rejectionCount.incrementAndGet(); - } - - void incrementRejectionCount(String rejectionReason) { - rejectionCountMap.computeIfAbsent(rejectionReason, k -> new AtomicLong()).incrementAndGet(); - incrementRejectionCount(); - } - - long getRejectionCount(String rejectionReason) { - return rejectionCountMap.get(rejectionReason).get(); - } - - Map<String, Long> getLatestLocalFileNameLengthMap() { - return latestLocalFileNameLengthMap; - } - - public void setLatestLocalFileNameLengthMap(Map<String, Long> latestLocalFileNameLengthMap) { - this.latestLocalFileNameLengthMap = latestLocalFileNameLengthMap; - computeBytesLag(); - } - - public void addToLatestUploadedFiles(String file) { - this.latestUploadedFiles.add(file); - computeBytesLag(); - } - - public void setLatestUploadedFiles(Set<String> files) { - this.latestUploadedFiles.clear(); - this.latestUploadedFiles.addAll(files); - computeBytesLag(); - } - - private void computeBytesLag() { - if (latestLocalFileNameLengthMap == null || latestLocalFileNameLengthMap.isEmpty()) { - return; - } - Set<String> filesNotYetUploaded = latestLocalFileNameLengthMap.keySet() - .stream() - .filter(f -> !latestUploadedFiles.contains(f)) - .collect(Collectors.toSet()); - this.bytesLag = filesNotYetUploaded.stream().map(latestLocalFileNameLengthMap::get).mapToLong(Long::longValue).sum(); - } - - int getConsecutiveFailureCount() { - return failures.length(); - } - - boolean isUploadBytesAverageReady() { - return uploadBytesMovingAverageReference.get().isReady(); - } - - double getUploadBytesAverage() { - return uploadBytesMovingAverageReference.get().getAverage(); - } - - public void addUploadBytes(long size) { - lastSuccessfulRemoteRefreshBytes = size; - synchronized (uploadBytesMutex) { - this.uploadBytesMovingAverageReference.get().record(size); - } - } - - /** - * Updates the window size for data collection of upload bytes. This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadBytesMovingAverageWindowSize(int updatedSize) { - synchronized (uploadBytesMutex) { - this.uploadBytesMovingAverageReference.set(this.uploadBytesMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - - boolean isUploadBytesPerSecAverageReady() { - return uploadBytesPerSecMovingAverageReference.get().isReady(); - } - - double getUploadBytesPerSecAverage() { - return uploadBytesPerSecMovingAverageReference.get().getAverage(); - } - - public void addUploadBytesPerSec(long bytesPerSec) { - synchronized (uploadBytesPerSecMutex) { - this.uploadBytesPerSecMovingAverageReference.get().record(bytesPerSec); - } - } - - /** - * Updates the window size for data collection of upload bytes per second. This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadBytesPerSecMovingAverageWindowSize(int updatedSize) { - synchronized (uploadBytesPerSecMutex) { - this.uploadBytesPerSecMovingAverageReference.set(this.uploadBytesPerSecMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - - boolean isUploadTimeMsAverageReady() { - return uploadTimeMsMovingAverageReference.get().isReady(); - } - - double getUploadTimeMsAverage() { - return uploadTimeMsMovingAverageReference.get().getAverage(); - } - - public void addUploadTimeMs(long timeMs) { - synchronized (uploadTimeMsMutex) { - this.uploadTimeMsMovingAverageReference.get().record(timeMs); - } - } - - /** - * Updates the window size for data collection of upload time (ms). This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { - synchronized (uploadTimeMsMutex) { - this.uploadTimeMsMovingAverageReference.set(this.uploadTimeMsMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - - public RemoteRefreshSegmentTracker.Stats stats() { - return new RemoteRefreshSegmentTracker.Stats( - shardId, - localRefreshClockTimeMs, - remoteRefreshClockTimeMs, - timeMsLag, - localRefreshSeqNo, - remoteRefreshSeqNo, - uploadBytesStarted, - uploadBytesSucceeded, - uploadBytesFailed, - totalUploadsStarted, - totalUploadsSucceeded, - totalUploadsFailed, - rejectionCount.get(), - failures.length(), - lastSuccessfulRemoteRefreshBytes, - uploadBytesMovingAverageReference.get().getAverage(), - uploadBytesPerSecMovingAverageReference.get().getAverage(), - uploadTimeMsMovingAverageReference.get().getAverage(), - getBytesLag() - ); - } - - /** - * Represents the tracker's state as seen in the stats API. - * - * @opensearch.internal - */ - public static class Stats implements Writeable { - - public final ShardId shardId; - public final long localRefreshClockTimeMs; - public final long remoteRefreshClockTimeMs; - public final long refreshTimeLagMs; - public final long localRefreshNumber; - public final long remoteRefreshNumber; - public final long uploadBytesStarted; - public final long uploadBytesFailed; - public final long uploadBytesSucceeded; - public final long totalUploadsStarted; - public final long totalUploadsFailed; - public final long totalUploadsSucceeded; - public final long rejectionCount; - public final long consecutiveFailuresCount; - public final long lastSuccessfulRemoteRefreshBytes; - public final double uploadBytesMovingAverage; - public final double uploadBytesPerSecMovingAverage; - public final double uploadTimeMovingAverage; - public final long bytesLag; - - public Stats( - ShardId shardId, - long localRefreshClockTimeMs, - long remoteRefreshClockTimeMs, - long refreshTimeLagMs, - long localRefreshNumber, - long remoteRefreshNumber, - long uploadBytesStarted, - long uploadBytesSucceeded, - long uploadBytesFailed, - long totalUploadsStarted, - long totalUploadsSucceeded, - long totalUploadsFailed, - long rejectionCount, - long consecutiveFailuresCount, - long lastSuccessfulRemoteRefreshBytes, - double uploadBytesMovingAverage, - double uploadBytesPerSecMovingAverage, - double uploadTimeMovingAverage, - long bytesLag - ) { - this.shardId = shardId; - this.localRefreshClockTimeMs = localRefreshClockTimeMs; - this.remoteRefreshClockTimeMs = remoteRefreshClockTimeMs; - this.refreshTimeLagMs = refreshTimeLagMs; - this.localRefreshNumber = localRefreshNumber; - this.remoteRefreshNumber = remoteRefreshNumber; - this.uploadBytesStarted = uploadBytesStarted; - this.uploadBytesFailed = uploadBytesFailed; - this.uploadBytesSucceeded = uploadBytesSucceeded; - this.totalUploadsStarted = totalUploadsStarted; - this.totalUploadsFailed = totalUploadsFailed; - this.totalUploadsSucceeded = totalUploadsSucceeded; - this.rejectionCount = rejectionCount; - this.consecutiveFailuresCount = consecutiveFailuresCount; - this.lastSuccessfulRemoteRefreshBytes = lastSuccessfulRemoteRefreshBytes; - this.uploadBytesMovingAverage = uploadBytesMovingAverage; - this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; - this.uploadTimeMovingAverage = uploadTimeMovingAverage; - this.bytesLag = bytesLag; - } - - public Stats(StreamInput in) throws IOException { - try { - this.shardId = new ShardId(in); - this.localRefreshClockTimeMs = in.readLong(); - this.remoteRefreshClockTimeMs = in.readLong(); - this.refreshTimeLagMs = in.readLong(); - this.localRefreshNumber = in.readLong(); - this.remoteRefreshNumber = in.readLong(); - this.uploadBytesStarted = in.readLong(); - this.uploadBytesFailed = in.readLong(); - this.uploadBytesSucceeded = in.readLong(); - this.totalUploadsStarted = in.readLong(); - this.totalUploadsFailed = in.readLong(); - this.totalUploadsSucceeded = in.readLong(); - this.rejectionCount = in.readLong(); - this.consecutiveFailuresCount = in.readLong(); - this.lastSuccessfulRemoteRefreshBytes = in.readLong(); - this.uploadBytesMovingAverage = in.readDouble(); - this.uploadBytesPerSecMovingAverage = in.readDouble(); - this.uploadTimeMovingAverage = in.readDouble(); - this.bytesLag = in.readLong(); - } catch (IOException e) { - throw e; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardId.writeTo(out); - out.writeLong(localRefreshClockTimeMs); - out.writeLong(remoteRefreshClockTimeMs); - out.writeLong(refreshTimeLagMs); - out.writeLong(localRefreshNumber); - out.writeLong(remoteRefreshNumber); - out.writeLong(uploadBytesStarted); - out.writeLong(uploadBytesFailed); - out.writeLong(uploadBytesSucceeded); - out.writeLong(totalUploadsStarted); - out.writeLong(totalUploadsFailed); - out.writeLong(totalUploadsSucceeded); - out.writeLong(rejectionCount); - out.writeLong(consecutiveFailuresCount); - out.writeLong(lastSuccessfulRemoteRefreshBytes); - out.writeDouble(uploadBytesMovingAverage); - out.writeDouble(uploadBytesPerSecMovingAverage); - out.writeDouble(uploadTimeMovingAverage); - out.writeLong(bytesLag); - } - } - -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java new file mode 100644 index 0000000000000..fc1f245829b84 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -0,0 +1,377 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.shard.IndexShard; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracks remote store segment download and upload stats + * Used for displaying remote store stats in IndicesStats/NodeStats API + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class RemoteSegmentStats implements Writeable, ToXContentFragment { + /** + * Cumulative bytes attempted to be uploaded to remote store + */ + private long uploadBytesStarted; + /** + * Cumulative bytes failed to be uploaded to the remote store + */ + private long uploadBytesFailed; + /** + * Cumulative bytes successfully uploaded to the remote store + */ + private long uploadBytesSucceeded; + /** + * Cumulative bytes attempted to be downloaded from the remote store + */ + private long downloadBytesStarted; + /** + * Cumulative bytes failed to be downloaded from the remote store + */ + private long downloadBytesFailed; + /** + * Cumulative bytes successfully downloaded from the remote store + */ + private long downloadBytesSucceeded; + /** + * Maximum refresh lag (in milliseconds) between local and the remote store + * Used to check for data freshness in the remote store + */ + private long maxRefreshTimeLag; + /** + * Maximum refresh lag (in bytes) between local and the remote store + * Used to check for data freshness in the remote store + */ + private long maxRefreshBytesLag; + /** + * Total refresh lag (in bytes) between local and the remote store + * Used to check for data freshness in the remote store + */ + private long totalRefreshBytesLag; + /** + * Total time spent in uploading segments to remote store + */ + private long totalUploadTime; + /** + * Total time spent in downloading segments from remote store + */ + private long totalDownloadTime; + /** + * Total rejections due to remote store upload backpressure + */ + private long totalRejections; + + public RemoteSegmentStats() {} + + public RemoteSegmentStats(StreamInput in) throws IOException { + uploadBytesStarted = in.readLong(); + uploadBytesFailed = in.readLong(); + uploadBytesSucceeded = in.readLong(); + downloadBytesStarted = in.readLong(); + downloadBytesFailed = in.readLong(); + downloadBytesSucceeded = in.readLong(); + maxRefreshTimeLag = in.readLong(); + maxRefreshBytesLag = in.readLong(); + totalRefreshBytesLag = in.readLong(); + totalUploadTime = in.readLong(); + totalDownloadTime = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + totalRejections = in.readVLong(); + } + } + + /** + * Constructor to retrieve metrics from {@link RemoteSegmentTransferTracker.Stats} which is used in {@link RemoteStoreStats} and + * provides verbose index level stats of segments transferred to the remote store. + * <p> + * This method is used in {@link IndexShard} to port over a subset of metrics to be displayed in IndexStats and subsequently rolled up to NodesStats + * + * @param trackerStats: Source {@link RemoteSegmentTransferTracker.Stats} object from which metrics would be retrieved + */ + public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { + this.uploadBytesStarted = trackerStats.uploadBytesStarted; + this.uploadBytesFailed = trackerStats.uploadBytesFailed; + this.uploadBytesSucceeded = trackerStats.uploadBytesSucceeded; + this.downloadBytesSucceeded = trackerStats.directoryFileTransferTrackerStats.transferredBytesSucceeded; + this.downloadBytesStarted = trackerStats.directoryFileTransferTrackerStats.transferredBytesStarted; + this.downloadBytesFailed = trackerStats.directoryFileTransferTrackerStats.transferredBytesFailed; + this.maxRefreshTimeLag = trackerStats.refreshTimeLagMs; + // Initializing both total and max bytes lag to the same `bytesLag` + // value from the tracker object + // Aggregations would be performed on the add method + this.maxRefreshBytesLag = trackerStats.bytesLag; + this.totalRefreshBytesLag = trackerStats.bytesLag; + this.totalUploadTime = trackerStats.totalUploadTimeInMs; + this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; + this.totalRejections = trackerStats.rejectionCount; + } + + // Getter and setters. All are visible for testing + // Setters are only used for testing + public long getUploadBytesStarted() { + return uploadBytesStarted; + } + + public void addUploadBytesStarted(long uploadsStarted) { + this.uploadBytesStarted += uploadsStarted; + } + + public long getUploadBytesFailed() { + return uploadBytesFailed; + } + + public void addUploadBytesFailed(long uploadsFailed) { + this.uploadBytesFailed += uploadsFailed; + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded; + } + + public void addUploadBytesSucceeded(long uploadsSucceeded) { + this.uploadBytesSucceeded += uploadsSucceeded; + } + + public long getDownloadBytesStarted() { + return downloadBytesStarted; + } + + public void addDownloadBytesStarted(long downloadsStarted) { + this.downloadBytesStarted += downloadsStarted; + } + + public long getDownloadBytesFailed() { + return downloadBytesFailed; + } + + public void addDownloadBytesFailed(long downloadsFailed) { + this.downloadBytesFailed += downloadsFailed; + } + + public long getDownloadBytesSucceeded() { + return downloadBytesSucceeded; + } + + public void addDownloadBytesSucceeded(long downloadsSucceeded) { + this.downloadBytesSucceeded += downloadsSucceeded; + } + + public long getMaxRefreshTimeLag() { + return maxRefreshTimeLag; + } + + public void setMaxRefreshTimeLag(long maxRefreshTimeLag) { + this.maxRefreshTimeLag = Math.max(this.maxRefreshTimeLag, maxRefreshTimeLag); + } + + public long getMaxRefreshBytesLag() { + return maxRefreshBytesLag; + } + + public void addMaxRefreshBytesLag(long maxRefreshBytesLag) { + this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, maxRefreshBytesLag); + } + + public long getTotalRefreshBytesLag() { + return totalRefreshBytesLag; + } + + public void addTotalRefreshBytesLag(long totalRefreshBytesLag) { + this.totalRefreshBytesLag += totalRefreshBytesLag; + } + + public long getTotalUploadTime() { + return totalUploadTime; + } + + public void addTotalUploadTime(long totalUploadTime) { + this.totalUploadTime += totalUploadTime; + } + + public long getTotalDownloadTime() { + return totalDownloadTime; + } + + public void addTotalDownloadTime(long totalDownloadTime) { + this.totalDownloadTime += totalDownloadTime; + } + + public long getTotalRejections() { + return totalRejections; + } + + public void addTotalRejections(long totalRejections) { + this.totalRejections += totalRejections; + } + + /** + * Adds existing stats. Used for stats roll-ups at index or node level + * + * @param existingStats: Existing {@link RemoteSegmentStats} to add + */ + public void add(RemoteSegmentStats existingStats) { + if (existingStats != null) { + this.uploadBytesStarted += existingStats.getUploadBytesStarted(); + this.uploadBytesSucceeded += existingStats.getUploadBytesSucceeded(); + this.uploadBytesFailed += existingStats.getUploadBytesFailed(); + this.downloadBytesStarted += existingStats.getDownloadBytesStarted(); + this.downloadBytesFailed += existingStats.getDownloadBytesFailed(); + this.downloadBytesSucceeded += existingStats.getDownloadBytesSucceeded(); + this.maxRefreshTimeLag = Math.max(this.maxRefreshTimeLag, existingStats.getMaxRefreshTimeLag()); + this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, existingStats.getMaxRefreshBytesLag()); + this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); + this.totalUploadTime += existingStats.getTotalUploadTime(); + this.totalDownloadTime += existingStats.getTotalDownloadTime(); + this.totalRejections += existingStats.totalRejections; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(uploadBytesStarted); + out.writeLong(uploadBytesFailed); + out.writeLong(uploadBytesSucceeded); + out.writeLong(downloadBytesStarted); + out.writeLong(downloadBytesFailed); + out.writeLong(downloadBytesSucceeded); + out.writeLong(maxRefreshTimeLag); + out.writeLong(maxRefreshBytesLag); + out.writeLong(totalRefreshBytesLag); + out.writeLong(totalUploadTime); + out.writeLong(totalDownloadTime); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejections); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.REMOTE_STORE); + + builder.startObject(Fields.UPLOAD); + buildUploadStats(builder); + builder.endObject(); // UPLOAD + + builder.startObject(Fields.DOWNLOAD); + buildDownloadStats(builder); + builder.endObject(); // DOWNLOAD + + builder.endObject(); // REMOTE_STORE + + return builder; + } + + private void buildUploadStats(XContentBuilder builder) throws IOException { + builder.startObject(Fields.TOTAL_UPLOAD_SIZE); + builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); + builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); + builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); + builder.endObject(); // TOTAL_UPLOAD_SIZE + + builder.startObject(Fields.REFRESH_SIZE_LAG); + builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); + builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); + builder.endObject(); // REFRESH_SIZE_LAG + + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); + + builder.startObject(Fields.PRESSURE); + builder.field(Fields.TOTAL_REJECTIONS, totalRejections); + builder.endObject(); // PRESSURE + } + + private void buildDownloadStats(XContentBuilder builder) throws IOException { + builder.startObject(Fields.TOTAL_DOWNLOAD_SIZE); + builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(downloadBytesStarted)); + builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(downloadBytesSucceeded)); + builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(downloadBytesFailed)); + builder.endObject(); + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalDownloadTime)); + } + + static final class Fields { + static final String REMOTE_STORE = "remote_store"; + static final String UPLOAD = "upload"; + static final String DOWNLOAD = "download"; + static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; + static final String TOTAL_DOWNLOAD_SIZE = "total_download_size"; + static final String MAX_REFRESH_TIME_LAG = "max_refresh_time_lag"; + static final String MAX_REFRESH_TIME_LAG_IN_MILLIS = "max_refresh_time_lag_in_millis"; + static final String REFRESH_SIZE_LAG = "refresh_size_lag"; + static final String STARTED = "started"; + static final String STARTED_BYTES = "started_bytes"; + static final String FAILED = "failed"; + static final String FAILED_BYTES = "failed_bytes"; + static final String SUCCEEDED = "succeeded"; + static final String SUCCEEDED_BYTES = "succeeded_bytes"; + static final String TOTAL = "total"; + static final String TOTAL_BYTES = "total_bytes"; + static final String MAX = "max"; + static final String MAX_BYTES = "max_bytes"; + static final String TOTAL_TIME_SPENT = "total_time_spent"; + static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; + static final String PRESSURE = "pressure"; + static final String TOTAL_REJECTIONS = "total_rejections"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteSegmentStats that = (RemoteSegmentStats) o; + return uploadBytesStarted == that.uploadBytesStarted + && uploadBytesFailed == that.uploadBytesFailed + && uploadBytesSucceeded == that.uploadBytesSucceeded + && downloadBytesStarted == that.downloadBytesStarted + && downloadBytesFailed == that.downloadBytesFailed + && downloadBytesSucceeded == that.downloadBytesSucceeded + && maxRefreshTimeLag == that.maxRefreshTimeLag + && maxRefreshBytesLag == that.maxRefreshBytesLag + && totalRefreshBytesLag == that.totalRefreshBytesLag + && totalUploadTime == that.totalUploadTime + && totalDownloadTime == that.totalDownloadTime + && totalRejections == that.totalRejections; + } + + @Override + public int hashCode() { + return Objects.hash( + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + downloadBytesStarted, + downloadBytesFailed, + downloadBytesSucceeded, + maxRefreshTimeLag, + maxRefreshBytesLag, + totalRefreshBytesLag, + totalUploadTime, + totalDownloadTime, + totalRejections + ); + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java new file mode 100644 index 0000000000000..f1843ea3eef38 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -0,0 +1,584 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.Streak; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.DirectoryFileTransferTracker; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static org.opensearch.index.shard.RemoteStoreRefreshListener.EXCLUDE_FILES; + +/** + * Keeps track of remote refresh which happens in {@link org.opensearch.index.shard.RemoteStoreRefreshListener}. This consist of multiple critical metrics. + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class RemoteSegmentTransferTracker extends RemoteTransferTracker { + + private final Logger logger; + + /** + * Every refresh is assigned a sequence number. This is the sequence number of the most recent refresh. + */ + private volatile long localRefreshSeqNo; + + /** + * The refresh time of the most recent refresh. + */ + private volatile long localRefreshTimeMs; + + /** + * The refresh time(clock) of the most recent refresh. + */ + private volatile long localRefreshClockTimeMs; + + /** + * Sequence number of the most recent remote refresh. + */ + private volatile long remoteRefreshSeqNo; + + /** + * The refresh time of most recent remote refresh. + */ + private volatile long remoteRefreshTimeMs; + + /** + * This is the time of first local refresh after the last successful remote refresh. When the remote store is in + * sync with local refresh, this will be reset to -1. + */ + private volatile long remoteRefreshStartTimeMs = -1; + + /** + * The refresh time(clock) of most recent remote refresh. + */ + private volatile long remoteRefreshClockTimeMs; + + /** + * Keeps the seq no lag computed so that we do not compute it for every request. + */ + private volatile long refreshSeqNoLag; + + /** + * Keeps track of the total bytes of segment files which were uploaded to remote store during last successful remote refresh + */ + private volatile long lastSuccessfulRemoteRefreshBytes; + + /** + * Cumulative sum of rejection counts for this shard. + */ + private final AtomicLong rejectionCount = new AtomicLong(); + + /** + * Keeps track of rejection count with each rejection reason. + */ + private final Map<String, AtomicLong> rejectionCountMap = ConcurrentCollections.newConcurrentMap(); + + /** + * Keeps track of segment files and their size in bytes which are part of the most recent refresh. + */ + private final Map<String, Long> latestLocalFileNameLengthMap = ConcurrentCollections.newConcurrentMap(); + + /** + * This contains the files from the last successful remote refresh and ongoing uploads. This gets reset to just the + * last successful remote refresh state on successful remote refresh. + */ + private final Set<String> latestUploadedFiles = ConcurrentCollections.newConcurrentSet(); + + /** + * Keeps the bytes lag computed so that we do not compute it for every request. + */ + private volatile long bytesLag; + + /** + * Holds count of consecutive failures until last success. Gets reset to zero if there is a success. + */ + private final Streak failures = new Streak(); + + /** + * {@link org.opensearch.index.store.Store.StoreDirectory} level file transfer tracker, used to show download stats + */ + private final DirectoryFileTransferTracker directoryFileTransferTracker; + + public RemoteSegmentTransferTracker( + ShardId shardId, + DirectoryFileTransferTracker directoryFileTransferTracker, + int movingAverageWindowSize + ) { + super(shardId, movingAverageWindowSize); + + logger = Loggers.getLogger(getClass(), shardId); + // Both the local refresh time and remote refresh time are set with current time to give consistent view of time lag when it arises. + long currentClockTimeMs = System.currentTimeMillis(); + long currentTimeMs = currentTimeMsUsingSystemNanos(); + localRefreshTimeMs = currentTimeMs; + remoteRefreshTimeMs = currentTimeMs; + remoteRefreshStartTimeMs = currentTimeMs; + localRefreshClockTimeMs = currentClockTimeMs; + remoteRefreshClockTimeMs = currentClockTimeMs; + this.directoryFileTransferTracker = directoryFileTransferTracker; + } + + public static long currentTimeMsUsingSystemNanos() { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + } + + @Override + public void incrementTotalUploadsFailed() { + super.incrementTotalUploadsFailed(); + failures.record(true); + } + + @Override + public void incrementTotalUploadsSucceeded() { + super.incrementTotalUploadsSucceeded(); + failures.record(false); + } + + public long getLocalRefreshSeqNo() { + return localRefreshSeqNo; + } + + // Visible for testing + void updateLocalRefreshSeqNo(long localRefreshSeqNo) { + assert localRefreshSeqNo >= this.localRefreshSeqNo : "newLocalRefreshSeqNo=" + + localRefreshSeqNo + + " < " + + "currentLocalRefreshSeqNo=" + + this.localRefreshSeqNo; + this.localRefreshSeqNo = localRefreshSeqNo; + computeRefreshSeqNoLag(); + } + + public long getLocalRefreshTimeMs() { + return localRefreshTimeMs; + } + + public long getLocalRefreshClockTimeMs() { + return localRefreshClockTimeMs; + } + + /** + * Updates the last refresh time and refresh seq no which is seen by local store. + */ + public void updateLocalRefreshTimeAndSeqNo() { + updateLocalRefreshClockTimeMs(System.currentTimeMillis()); + updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + updateLocalRefreshSeqNo(getLocalRefreshSeqNo() + 1); + } + + // Visible for testing + synchronized void updateLocalRefreshTimeMs(long localRefreshTimeMs) { + assert localRefreshTimeMs >= this.localRefreshTimeMs : "newLocalRefreshTimeMs=" + + localRefreshTimeMs + + " < " + + "currentLocalRefreshTimeMs=" + + this.localRefreshTimeMs; + boolean isRemoteInSyncBeforeLocalRefresh = this.localRefreshTimeMs == this.remoteRefreshTimeMs; + this.localRefreshTimeMs = localRefreshTimeMs; + if (isRemoteInSyncBeforeLocalRefresh) { + this.remoteRefreshStartTimeMs = localRefreshTimeMs; + } + } + + private void updateLocalRefreshClockTimeMs(long localRefreshClockTimeMs) { + this.localRefreshClockTimeMs = localRefreshClockTimeMs; + } + + long getRemoteRefreshSeqNo() { + return remoteRefreshSeqNo; + } + + public void updateRemoteRefreshSeqNo(long remoteRefreshSeqNo) { + assert remoteRefreshSeqNo >= this.remoteRefreshSeqNo : "newRemoteRefreshSeqNo=" + + remoteRefreshSeqNo + + " < " + + "currentRemoteRefreshSeqNo=" + + this.remoteRefreshSeqNo; + this.remoteRefreshSeqNo = remoteRefreshSeqNo; + computeRefreshSeqNoLag(); + } + + long getRemoteRefreshTimeMs() { + return remoteRefreshTimeMs; + } + + long getRemoteRefreshClockTimeMs() { + return remoteRefreshClockTimeMs; + } + + public synchronized void updateRemoteRefreshTimeMs(long refreshTimeMs) { + assert refreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" + + refreshTimeMs + + " < " + + "currentRemoteRefreshTimeMs=" + + this.remoteRefreshTimeMs; + this.remoteRefreshTimeMs = refreshTimeMs; + // When multiple refreshes have failed, there is a possibility that retry is ongoing while another refresh gets + // triggered. After the segments have been uploaded and before the below code runs, the updateLocalRefreshTimeAndSeqNo + // method is triggered, which will update the local localRefreshTimeMs. Now, the lag would basically become the + // time since the last refresh happened locally. + this.remoteRefreshStartTimeMs = refreshTimeMs == this.localRefreshTimeMs ? -1 : this.localRefreshTimeMs; + } + + public void updateRemoteRefreshClockTimeMs(long remoteRefreshClockTimeMs) { + this.remoteRefreshClockTimeMs = remoteRefreshClockTimeMs; + } + + private void computeRefreshSeqNoLag() { + refreshSeqNoLag = localRefreshSeqNo - remoteRefreshSeqNo; + } + + public long getRefreshSeqNoLag() { + return refreshSeqNoLag; + } + + public long getTimeMsLag() { + if (remoteRefreshTimeMs == localRefreshTimeMs || bytesLag == 0) { + return 0; + } + return currentTimeMsUsingSystemNanos() - remoteRefreshStartTimeMs; + } + + public long getBytesLag() { + return bytesLag; + } + + public long getInflightUploadBytes() { + return uploadBytesStarted.get() - uploadBytesFailed.get() - uploadBytesSucceeded.get(); + } + + public long getInflightUploads() { + return totalUploadsStarted.get() - totalUploadsFailed.get() - totalUploadsSucceeded.get(); + } + + public long getRejectionCount() { + return rejectionCount.get(); + } + + /** public only for testing **/ + public void incrementRejectionCount() { + rejectionCount.incrementAndGet(); + } + + void incrementRejectionCount(String rejectionReason) { + rejectionCountMap.computeIfAbsent(rejectionReason, k -> new AtomicLong()).incrementAndGet(); + incrementRejectionCount(); + } + + long getRejectionCount(String rejectionReason) { + return rejectionCountMap.get(rejectionReason).get(); + } + + public Map<String, Long> getLatestLocalFileNameLengthMap() { + return Collections.unmodifiableMap(latestLocalFileNameLengthMap); + } + + /** + * Updates the latestLocalFileNameLengthMap by adding file name and it's size to the map. + * The method is given a function as an argument which is used for determining the file size (length in bytes). + * This method is also provided the collection of segment files which are the latest refresh local segment files. + * This method also removes the stale segment files from the map that are not part of the input segment files. + * + * @param segmentFiles list of local refreshed segment files + * @param fileSizeFunction function is used to determine the file size in bytes + * + * @return updated map of local segment files and filesize + */ + public Map<String, Long> updateLatestLocalFileNameLengthMap( + Collection<String> segmentFiles, + CheckedFunction<String, Long, IOException> fileSizeFunction + ) { + logger.debug( + "segmentFilesPostRefresh={} latestLocalFileNamesBeforeMapUpdate={}", + segmentFiles, + latestLocalFileNameLengthMap.keySet() + ); + // Update the map + segmentFiles.stream() + .filter(file -> EXCLUDE_FILES.contains(file) == false) + .filter(file -> latestLocalFileNameLengthMap.containsKey(file) == false || latestLocalFileNameLengthMap.get(file) == 0) + .forEach(file -> { + long fileSize = 0; + try { + fileSize = fileSizeFunction.apply(file); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("Exception while reading the fileLength of file={}", file), e); + } + latestLocalFileNameLengthMap.put(file, fileSize); + }); + Set<String> fileSet = new HashSet<>(segmentFiles); + // Remove keys from the fileSizeMap that do not exist in the latest segment files + latestLocalFileNameLengthMap.entrySet().removeIf(entry -> fileSet.contains(entry.getKey()) == false); + computeBytesLag(); + return Collections.unmodifiableMap(latestLocalFileNameLengthMap); + } + + public void addToLatestUploadedFiles(String file) { + this.latestUploadedFiles.add(file); + computeBytesLag(); + } + + public void setLatestUploadedFiles(Set<String> files) { + this.latestUploadedFiles.clear(); + this.latestUploadedFiles.addAll(files); + computeBytesLag(); + } + + private void computeBytesLag() { + if (latestLocalFileNameLengthMap.isEmpty()) { + return; + } + Set<String> filesNotYetUploaded = latestLocalFileNameLengthMap.keySet() + .stream() + .filter(f -> !latestUploadedFiles.contains(f)) + .collect(Collectors.toSet()); + this.bytesLag = filesNotYetUploaded.stream().map(latestLocalFileNameLengthMap::get).mapToLong(Long::longValue).sum(); + } + + int getConsecutiveFailureCount() { + return failures.length(); + } + + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directoryFileTransferTracker; + } + + public RemoteSegmentTransferTracker.Stats stats() { + return new RemoteSegmentTransferTracker.Stats( + shardId, + localRefreshClockTimeMs, + remoteRefreshClockTimeMs, + getTimeMsLag(), + localRefreshSeqNo, + remoteRefreshSeqNo, + uploadBytesStarted.get(), + uploadBytesSucceeded.get(), + uploadBytesFailed.get(), + totalUploadsStarted.get(), + totalUploadsSucceeded.get(), + totalUploadsFailed.get(), + rejectionCount.get(), + failures.length(), + lastSuccessfulRemoteRefreshBytes, + uploadBytesMovingAverageReference.get().getAverage(), + uploadBytesPerSecMovingAverageReference.get().getAverage(), + uploadTimeMsMovingAverageReference.get().getAverage(), + getBytesLag(), + totalUploadTimeInMillis.get(), + directoryFileTransferTracker.stats() + ); + } + + /** + * Represents the tracker's state as seen in the stats API. + * + * @opensearch.api + */ + @PublicApi(since = "2.10.0") + public static class Stats implements Writeable { + + public final ShardId shardId; + public final long localRefreshClockTimeMs; + public final long remoteRefreshClockTimeMs; + public final long refreshTimeLagMs; + public final long localRefreshNumber; + public final long remoteRefreshNumber; + public final long uploadBytesStarted; + public final long uploadBytesFailed; + public final long uploadBytesSucceeded; + public final long totalUploadsStarted; + public final long totalUploadsFailed; + public final long totalUploadsSucceeded; + public final long rejectionCount; + public final long consecutiveFailuresCount; + public final long lastSuccessfulRemoteRefreshBytes; + public final double uploadBytesMovingAverage; + public final double uploadBytesPerSecMovingAverage; + public final long totalUploadTimeInMs; + public final double uploadTimeMovingAverage; + public final long bytesLag; + public final DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats; + + public Stats( + ShardId shardId, + long localRefreshClockTimeMs, + long remoteRefreshClockTimeMs, + long refreshTimeLagMs, + long localRefreshNumber, + long remoteRefreshNumber, + long uploadBytesStarted, + long uploadBytesSucceeded, + long uploadBytesFailed, + long totalUploadsStarted, + long totalUploadsSucceeded, + long totalUploadsFailed, + long rejectionCount, + long consecutiveFailuresCount, + long lastSuccessfulRemoteRefreshBytes, + double uploadBytesMovingAverage, + double uploadBytesPerSecMovingAverage, + double uploadTimeMovingAverage, + long bytesLag, + long totalUploadTimeInMs, + DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats + ) { + this.shardId = shardId; + this.localRefreshClockTimeMs = localRefreshClockTimeMs; + this.remoteRefreshClockTimeMs = remoteRefreshClockTimeMs; + this.refreshTimeLagMs = refreshTimeLagMs; + this.localRefreshNumber = localRefreshNumber; + this.remoteRefreshNumber = remoteRefreshNumber; + this.uploadBytesStarted = uploadBytesStarted; + this.uploadBytesFailed = uploadBytesFailed; + this.uploadBytesSucceeded = uploadBytesSucceeded; + this.totalUploadsStarted = totalUploadsStarted; + this.totalUploadsFailed = totalUploadsFailed; + this.totalUploadsSucceeded = totalUploadsSucceeded; + this.rejectionCount = rejectionCount; + this.consecutiveFailuresCount = consecutiveFailuresCount; + this.lastSuccessfulRemoteRefreshBytes = lastSuccessfulRemoteRefreshBytes; + this.uploadBytesMovingAverage = uploadBytesMovingAverage; + this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; + this.uploadTimeMovingAverage = uploadTimeMovingAverage; + this.bytesLag = bytesLag; + this.totalUploadTimeInMs = totalUploadTimeInMs; + this.directoryFileTransferTrackerStats = directoryFileTransferTrackerStats; + } + + public Stats(StreamInput in) throws IOException { + try { + this.shardId = new ShardId(in); + this.localRefreshClockTimeMs = in.readLong(); + this.remoteRefreshClockTimeMs = in.readLong(); + this.refreshTimeLagMs = in.readLong(); + this.localRefreshNumber = in.readLong(); + this.remoteRefreshNumber = in.readLong(); + this.uploadBytesStarted = in.readLong(); + this.uploadBytesFailed = in.readLong(); + this.uploadBytesSucceeded = in.readLong(); + this.totalUploadsStarted = in.readLong(); + this.totalUploadsFailed = in.readLong(); + this.totalUploadsSucceeded = in.readLong(); + this.rejectionCount = in.readLong(); + this.consecutiveFailuresCount = in.readLong(); + this.lastSuccessfulRemoteRefreshBytes = in.readLong(); + this.uploadBytesMovingAverage = in.readDouble(); + this.uploadBytesPerSecMovingAverage = in.readDouble(); + this.uploadTimeMovingAverage = in.readDouble(); + this.bytesLag = in.readLong(); + this.totalUploadTimeInMs = in.readLong(); + this.directoryFileTransferTrackerStats = in.readOptionalWriteable(DirectoryFileTransferTracker.Stats::new); + } catch (IOException e) { + throw e; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeLong(localRefreshClockTimeMs); + out.writeLong(remoteRefreshClockTimeMs); + out.writeLong(refreshTimeLagMs); + out.writeLong(localRefreshNumber); + out.writeLong(remoteRefreshNumber); + out.writeLong(uploadBytesStarted); + out.writeLong(uploadBytesFailed); + out.writeLong(uploadBytesSucceeded); + out.writeLong(totalUploadsStarted); + out.writeLong(totalUploadsFailed); + out.writeLong(totalUploadsSucceeded); + out.writeLong(rejectionCount); + out.writeLong(consecutiveFailuresCount); + out.writeLong(lastSuccessfulRemoteRefreshBytes); + out.writeDouble(uploadBytesMovingAverage); + out.writeDouble(uploadBytesPerSecMovingAverage); + out.writeDouble(uploadTimeMovingAverage); + out.writeLong(bytesLag); + out.writeLong(totalUploadTimeInMs); + out.writeOptionalWriteable(directoryFileTransferTrackerStats); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + Stats other = (Stats) obj; + + return this.shardId.toString().equals(other.shardId.toString()) + && this.localRefreshClockTimeMs == other.localRefreshClockTimeMs + && this.remoteRefreshClockTimeMs == other.remoteRefreshClockTimeMs + && this.refreshTimeLagMs == other.refreshTimeLagMs + && this.localRefreshNumber == other.localRefreshNumber + && this.remoteRefreshNumber == other.remoteRefreshNumber + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded + && this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.rejectionCount == other.rejectionCount + && this.consecutiveFailuresCount == other.consecutiveFailuresCount + && this.lastSuccessfulRemoteRefreshBytes == other.lastSuccessfulRemoteRefreshBytes + && Double.compare(this.uploadBytesMovingAverage, other.uploadBytesMovingAverage) == 0 + && Double.compare(this.uploadBytesPerSecMovingAverage, other.uploadBytesPerSecMovingAverage) == 0 + && Double.compare(this.uploadTimeMovingAverage, other.uploadTimeMovingAverage) == 0 + && this.bytesLag == other.bytesLag + && this.totalUploadTimeInMs == other.totalUploadTimeInMs + && this.directoryFileTransferTrackerStats.equals(other.directoryFileTransferTrackerStats); + } + + @Override + public int hashCode() { + return Objects.hash( + shardId, + localRefreshClockTimeMs, + remoteRefreshClockTimeMs, + refreshTimeLagMs, + localRefreshNumber, + remoteRefreshNumber, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + rejectionCount, + consecutiveFailuresCount, + lastSuccessfulRemoteRefreshBytes, + uploadBytesMovingAverage, + uploadBytesPerSecMovingAverage, + uploadTimeMovingAverage, + bytesLag, + totalUploadTimeInMs, + directoryFileTransferTrackerStats + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java new file mode 100644 index 0000000000000..33cd40f802d43 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; + +/** + * Service used to validate if the incoming indexing request should be rejected based on the {@link RemoteSegmentTransferTracker}. + * + * @opensearch.internal + */ +public class RemoteStorePressureService { + + private static final Logger logger = LogManager.getLogger(RemoteStorePressureService.class); + + /** + * Remote refresh segment pressure settings which is used for creation of the backpressure tracker and as well as rejection. + */ + private final RemoteStorePressureSettings pressureSettings; + + private final List<LagValidator> lagValidators; + + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + + @Inject + public RemoteStorePressureService( + ClusterService clusterService, + Settings settings, + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + ) { + pressureSettings = new RemoteStorePressureSettings(clusterService, settings, this); + lagValidators = Arrays.asList( + new ConsecutiveFailureValidator(pressureSettings), + new BytesLagValidator(pressureSettings), + new TimeLagValidator(pressureSettings) + ); + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + } + + /** + * Check if remote refresh segments backpressure is enabled. This is backed by a cluster level setting. + * + * @return true if enabled, else false. + */ + public boolean isSegmentsUploadBackpressureEnabled() { + return pressureSettings.isRemoteRefreshSegmentPressureEnabled(); + } + + /** + * Validates if segments are lagging more than the limits. If yes, it would lead to rejections of the requests. + * + * @param shardId shardId for which the validation needs to be done. + */ + public void validateSegmentsUploadLag(ShardId shardId) { + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId); + // condition 1 - This will be null for non-remote backed indexes + // condition 2 - This will be zero if the remote store is + if (remoteSegmentTransferTracker == null || remoteSegmentTransferTracker.getRefreshSeqNoLag() == 0) { + return; + } + + for (LagValidator lagValidator : lagValidators) { + if (lagValidator.validate(remoteSegmentTransferTracker, shardId) == false) { + remoteSegmentTransferTracker.incrementRejectionCount(lagValidator.name()); + throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId)); + } + } + } + + /** + * Abstract class for validating if lag is acceptable or not. + * + * @opensearch.internal + */ + private static abstract class LagValidator { + + final RemoteStorePressureSettings pressureSettings; + + private LagValidator(RemoteStorePressureSettings pressureSettings) { + this.pressureSettings = pressureSettings; + } + + /** + * Validates the lag and returns value accordingly. + * + * @param pressureTracker tracker which holds information about the shard. + * @param shardId shard id of the {@code IndexShard} currently being validated. + * @return true if successfully validated that lag is acceptable. + */ + abstract boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId); + + /** + * Returns the name of the lag validator. + * + * @return the name using class name. + */ + abstract String name(); + + abstract String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId); + } + + /** + * Check if the remote store is lagging more than the upload bytes average multiplied by a variance factor + * + * @opensearch.internal + */ + private static class BytesLagValidator extends LagValidator { + + private static final String NAME = "bytes_lag"; + + private BytesLagValidator(RemoteStorePressureSettings pressureSettings) { + super(pressureSettings); + } + + @Override + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + if (pressureTracker.getRefreshSeqNoLag() <= 1) { + return true; + } + if (pressureTracker.isUploadBytesMovingAverageReady() == false) { + logger.trace("upload bytes moving average is not ready"); + return true; + } + double dynamicBytesLagThreshold = pressureTracker.getUploadBytesMovingAverage() * pressureSettings.getBytesLagVarianceFactor(); + long bytesLag = pressureTracker.getBytesLag(); + return bytesLag <= dynamicBytesLagThreshold; + } + + @Override + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + double dynamicBytesLagThreshold = pressureTracker.getUploadBytesMovingAverage() * pressureSettings.getBytesLagVarianceFactor(); + return String.format( + Locale.ROOT, + "rejected execution on primary shard:%s due to remote segments lagging behind local segments." + + "bytes_lag:%s dynamic_bytes_lag_threshold:%s", + shardId, + pressureTracker.getBytesLag(), + dynamicBytesLagThreshold + ); + } + + @Override + String name() { + return NAME; + } + } + + /** + * Check if the remote store is lagging more than the upload time average multiplied by a variance factor + * + * @opensearch.internal + */ + private static class TimeLagValidator extends LagValidator { + + private static final String NAME = "time_lag"; + + private TimeLagValidator(RemoteStorePressureSettings pressureSettings) { + super(pressureSettings); + } + + @Override + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + if (pressureTracker.getRefreshSeqNoLag() <= 1) { + return true; + } + if (pressureTracker.isUploadTimeMovingAverageReady() == false) { + return true; + } + long timeLag = pressureTracker.getTimeMsLag(); + double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMovingAverage() * pressureSettings + .getUploadTimeLagVarianceFactor(); + return timeLag <= dynamicTimeLagThreshold; + } + + @Override + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMovingAverage() * pressureSettings + .getUploadTimeLagVarianceFactor(); + return String.format( + Locale.ROOT, + "rejected execution on primary shard:%s due to remote segments lagging behind local segments." + + "time_lag:%s ms dynamic_time_lag_threshold:%s ms", + shardId, + pressureTracker.getTimeMsLag(), + dynamicTimeLagThreshold + ); + } + + @Override + String name() { + return NAME; + } + } + + /** + * Check if consecutive failure limit has been breached + * + * @opensearch.internal + */ + private static class ConsecutiveFailureValidator extends LagValidator { + + private static final String NAME = "consecutive_failures_lag"; + + private ConsecutiveFailureValidator(RemoteStorePressureSettings pressureSettings) { + super(pressureSettings); + } + + @Override + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + int failureStreakCount = pressureTracker.getConsecutiveFailureCount(); + int minConsecutiveFailureThreshold = pressureSettings.getMinConsecutiveFailuresLimit(); + return failureStreakCount <= minConsecutiveFailureThreshold; + } + + @Override + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { + return String.format( + Locale.ROOT, + "rejected execution on primary shard:%s due to remote segments lagging behind local segments." + + "failure_streak_count:%s min_consecutive_failure_threshold:%s", + shardId, + pressureTracker.getConsecutiveFailureCount(), + pressureSettings.getMinConsecutiveFailuresLimit() + ); + } + + @Override + String name() { + return NAME; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java new file mode 100644 index 0000000000000..e66aa3444c214 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Settings related to back pressure on account of segments upload failures / lags. + * + * @opensearch.internal + */ +public class RemoteStorePressureSettings { + + static class Defaults { + private static final double BYTES_LAG_VARIANCE_FACTOR = 10.0; + private static final double UPLOAD_TIME_LAG_VARIANCE_FACTOR = 10.0; + private static final double VARIANCE_FACTOR_MIN_VALUE = 1.0; + private static final int MIN_CONSECUTIVE_FAILURES_LIMIT = 5; + private static final int MIN_CONSECUTIVE_FAILURES_LIMIT_MIN_VALUE = 1; + } + + public static final Setting<Boolean> REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED = Setting.boolSetting( + "remote_store.segment.pressure.enabled", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Double> BYTES_LAG_VARIANCE_FACTOR = Setting.doubleSetting( + "remote_store.segment.pressure.bytes_lag.variance_factor", + Defaults.BYTES_LAG_VARIANCE_FACTOR, + Defaults.VARIANCE_FACTOR_MIN_VALUE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Double> UPLOAD_TIME_LAG_VARIANCE_FACTOR = Setting.doubleSetting( + "remote_store.segment.pressure.time_lag.variance_factor", + Defaults.UPLOAD_TIME_LAG_VARIANCE_FACTOR, + Defaults.VARIANCE_FACTOR_MIN_VALUE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Integer> MIN_CONSECUTIVE_FAILURES_LIMIT = Setting.intSetting( + "remote_store.segment.pressure.consecutive_failures.limit", + Defaults.MIN_CONSECUTIVE_FAILURES_LIMIT, + Defaults.MIN_CONSECUTIVE_FAILURES_LIMIT_MIN_VALUE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private volatile boolean remoteRefreshSegmentPressureEnabled; + + private volatile long minRefreshSeqNoLagLimit; + + private volatile double bytesLagVarianceFactor; + + private volatile double uploadTimeLagVarianceFactor; + + private volatile int minConsecutiveFailuresLimit; + + public RemoteStorePressureSettings( + ClusterService clusterService, + Settings settings, + RemoteStorePressureService remoteStorePressureService + ) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + + this.remoteRefreshSegmentPressureEnabled = REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.get(settings); + clusterSettings.addSettingsUpdateConsumer(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED, this::setRemoteRefreshSegmentPressureEnabled); + + this.bytesLagVarianceFactor = BYTES_LAG_VARIANCE_FACTOR.get(settings); + clusterSettings.addSettingsUpdateConsumer(BYTES_LAG_VARIANCE_FACTOR, this::setBytesLagVarianceFactor); + + this.uploadTimeLagVarianceFactor = UPLOAD_TIME_LAG_VARIANCE_FACTOR.get(settings); + clusterSettings.addSettingsUpdateConsumer(UPLOAD_TIME_LAG_VARIANCE_FACTOR, this::setUploadTimeLagVarianceFactor); + + this.minConsecutiveFailuresLimit = MIN_CONSECUTIVE_FAILURES_LIMIT.get(settings); + clusterSettings.addSettingsUpdateConsumer(MIN_CONSECUTIVE_FAILURES_LIMIT, this::setMinConsecutiveFailuresLimit); + } + + boolean isRemoteRefreshSegmentPressureEnabled() { + return remoteRefreshSegmentPressureEnabled; + } + + private void setRemoteRefreshSegmentPressureEnabled(boolean remoteRefreshSegmentPressureEnabled) { + this.remoteRefreshSegmentPressureEnabled = remoteRefreshSegmentPressureEnabled; + } + + long getMinRefreshSeqNoLagLimit() { + return minRefreshSeqNoLagLimit; + } + + private void setMinRefreshSeqNoLagLimit(long minRefreshSeqNoLagLimit) { + this.minRefreshSeqNoLagLimit = minRefreshSeqNoLagLimit; + } + + double getBytesLagVarianceFactor() { + return bytesLagVarianceFactor; + } + + private void setBytesLagVarianceFactor(double bytesLagVarianceFactor) { + this.bytesLagVarianceFactor = bytesLagVarianceFactor; + } + + double getUploadTimeLagVarianceFactor() { + return uploadTimeLagVarianceFactor; + } + + private void setUploadTimeLagVarianceFactor(double uploadTimeLagVarianceFactor) { + this.uploadTimeLagVarianceFactor = uploadTimeLagVarianceFactor; + } + + int getMinConsecutiveFailuresLimit() { + return minConsecutiveFailuresLimit; + } + + private void setMinConsecutiveFailuresLimit(int minConsecutiveFailuresLimit) { + this.minConsecutiveFailuresLimit = minConsecutiveFailuresLimit; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java new file mode 100644 index 0000000000000..9a146be96c9de --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.index.shard.IndexShard; + +import java.util.Map; + +/** + * Factory to manage stats trackers for Remote Store operations + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class RemoteStoreStatsTrackerFactory implements IndexEventListener { + static class Defaults { + static final int MOVING_AVERAGE_WINDOW_SIZE = 20; + static final int MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE = 5; + } + + public static final Setting<Integer> MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( + "remote_store.moving_average_window_size", + Defaults.MOVING_AVERAGE_WINDOW_SIZE, + Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(RemoteStoreStatsTrackerFactory.class); + + /** + * Number of data points to consider for a moving average statistic + */ + private volatile int movingAverageWindowSize; + + /** + * Keeps map of remote-backed index shards and their corresponding stats tracker. + */ + private final Map<ShardId, RemoteSegmentTransferTracker> remoteSegmentTrackerMap = ConcurrentCollections.newConcurrentMap(); + + /** + * Keeps map of remote-backed index shards and their corresponding stats tracker. + */ + private final Map<ShardId, RemoteTranslogTransferTracker> remoteTranslogTrackerMap = ConcurrentCollections.newConcurrentMap(); + + public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings settings) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + + this.movingAverageWindowSize = MOVING_AVERAGE_WINDOW_SIZE.get(settings); + clusterSettings.addSettingsUpdateConsumer(MOVING_AVERAGE_WINDOW_SIZE, this::updateMovingAverageWindowSize); + } + + @Override + public void afterIndexShardCreated(IndexShard indexShard) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { + return; + } + ShardId shardId = indexShard.shardId(); + remoteSegmentTrackerMap.put( + shardId, + new RemoteSegmentTransferTracker(shardId, indexShard.store().getDirectoryFileTransferTracker(), movingAverageWindowSize) + ); + logger.trace("Created RemoteSegmentTransferTracker for shardId={}", shardId); + remoteTranslogTrackerMap.put(shardId, new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize)); + logger.trace("Created RemoteTranslogTransferTracker for shardId={}", shardId); + } + + @Override + public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteSegmentTrackerMap.remove(shardId); + if (remoteSegmentTransferTracker != null) { + logger.trace("Deleted RemoteSegmentTransferTracker for shardId={}", shardId); + } + + RemoteTranslogTransferTracker remoteTranslogTransferTracker = remoteTranslogTrackerMap.remove(shardId); + if (remoteTranslogTransferTracker != null) { + logger.trace("Deleted RemoteTranslogTransferTracker for shardId={}", shardId); + } + } + + private void updateMovingAverageWindowSize(int updatedSize) { + remoteSegmentTrackerMap.values().forEach(tracker -> tracker.updateMovingAverageWindowSize(updatedSize)); + remoteTranslogTrackerMap.values().forEach(tracker -> tracker.updateMovingAverageWindowSize(updatedSize)); + + // Update movingAverageWindowSize only if the trackers were successfully updated + movingAverageWindowSize = updatedSize; + } + + public RemoteSegmentTransferTracker getRemoteSegmentTransferTracker(ShardId shardId) { + return remoteSegmentTrackerMap.get(shardId); + } + + public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker(ShardId shardId) { + return remoteTranslogTrackerMap.get(shardId); + } + + // visible for testing + int getMovingAverageWindowSize() { + return movingAverageWindowSize; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 114d07589b0c0..b4c33d781af86 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -8,7 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.common.collect.Tuple; + import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; /** * Utils for remote store @@ -69,4 +75,30 @@ public static String getSegmentName(String filename) { return filename.substring(0, endIdx); } + + /** + * + * @param mdFiles List of segment/translog metadata files + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name + */ + public static void verifyNoMultipleWriters(List<String> mdFiles, Function<String, Tuple<String, String>> fn) { + Map<String, String> nodesByPrimaryTermAndGen = new HashMap<>(); + mdFiles.forEach(mdFile -> { + Tuple<String, String> nodeIdByPrimaryTermAndGen = fn.apply(mdFile); + if (nodeIdByPrimaryTermAndGen != null) { + if (nodesByPrimaryTermAndGen.containsKey(nodeIdByPrimaryTermAndGen.v1()) + && (!nodesByPrimaryTermAndGen.get(nodeIdByPrimaryTermAndGen.v1()).equals(nodeIdByPrimaryTermAndGen.v2()))) { + throw new IllegalStateException( + "Multiple metadata files from different nodes" + + "having same primary term and generations " + + nodeIdByPrimaryTermAndGen.v1() + + " detected " + ); + } + nodesByPrimaryTermAndGen.put(nodeIdByPrimaryTermAndGen.v1(), nodeIdByPrimaryTermAndGen.v2()); + } + }); + } + } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java new file mode 100644 index 0000000000000..cbae4931b7001 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java @@ -0,0 +1,269 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.index.shard.ShardId; + +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for remote store stats trackers + * + * @opensearch.internal + */ +public abstract class RemoteTransferTracker { + /** + * The shard that this tracker is associated with + */ + protected final ShardId shardId; + + /** + * Total time spent on Remote Store uploads. + */ + protected final AtomicLong totalUploadTimeInMillis; + + /** + * Total number of Remote Store uploads that have been started. + */ + protected final AtomicLong totalUploadsStarted; + + /** + * Total number of Remote Store uploads that have failed. + */ + protected final AtomicLong totalUploadsFailed; + + /** + * Total number of Remote Store that have been successful. + */ + protected final AtomicLong totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Store that have been started. + */ + protected final AtomicLong uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Store that have failed. + */ + protected final AtomicLong uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Store that have been successful. + */ + protected final AtomicLong uploadBytesSucceeded; + + /** + * Provides moving average over the last N total size in bytes of files uploaded as part of Remote Store upload. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference<MovingAverage> uploadBytesMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadBytesMutex; + + /** + * Provides moving average over the last N upload speed (in bytes/s) of files uploaded as part of Remote Store upload. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference<MovingAverage> uploadBytesPerSecMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadBytesPerSecMutex; + + /** + * Provides moving average over the last N overall upload time (in nanos) as part of Remote Store upload. N is window size. + * Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference<MovingAverage> uploadTimeMsMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadTimeMsMutex; + + public RemoteTransferTracker(ShardId shardId, int movingAverageWindowSize) { + this.shardId = shardId; + totalUploadTimeInMillis = new AtomicLong(0); + totalUploadsStarted = new AtomicLong(0); + totalUploadsFailed = new AtomicLong(0); + totalUploadsSucceeded = new AtomicLong(0); + uploadBytesStarted = new AtomicLong(0); + uploadBytesFailed = new AtomicLong(0); + uploadBytesSucceeded = new AtomicLong(0); + uploadBytesMutex = new Object(); + uploadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + uploadBytesPerSecMutex = new Object(); + uploadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + uploadTimeMsMutex = new Object(); + uploadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + } + + ShardId getShardId() { + return shardId; + } + + public long getTotalUploadTimeInMillis() { + return totalUploadTimeInMillis.get(); + } + + public void addUploadTimeInMillis(long duration) { + totalUploadTimeInMillis.addAndGet(duration); + } + + public long getTotalUploadsStarted() { + return totalUploadsStarted.get(); + } + + public long getTotalUploadsFailed() { + return totalUploadsFailed.get(); + } + + public long getTotalUploadsSucceeded() { + return totalUploadsSucceeded.get(); + } + + public void incrementTotalUploadsStarted() { + totalUploadsStarted.addAndGet(1); + } + + public void incrementTotalUploadsFailed() { + checkTotal(totalUploadsStarted.get(), totalUploadsFailed.get(), totalUploadsSucceeded.get(), 1); + totalUploadsFailed.addAndGet(1); + } + + public void incrementTotalUploadsSucceeded() { + checkTotal(totalUploadsStarted.get(), totalUploadsFailed.get(), totalUploadsSucceeded.get(), 1); + totalUploadsSucceeded.addAndGet(1); + } + + public long getUploadBytesStarted() { + return uploadBytesStarted.get(); + } + + public long getUploadBytesFailed() { + return uploadBytesFailed.get(); + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded.get(); + } + + public void addUploadBytesStarted(long count) { + uploadBytesStarted.addAndGet(count); + } + + public void addUploadBytesFailed(long count) { + checkTotal(uploadBytesStarted.get(), uploadBytesFailed.get(), uploadBytesSucceeded.get(), count); + uploadBytesFailed.addAndGet(count); + } + + public void addUploadBytesSucceeded(long count) { + checkTotal(uploadBytesStarted.get(), uploadBytesFailed.get(), uploadBytesSucceeded.get(), count); + uploadBytesSucceeded.addAndGet(count); + } + + boolean isUploadBytesMovingAverageReady() { + return uploadBytesMovingAverageReference.get().isReady(); + } + + double getUploadBytesMovingAverage() { + return uploadBytesMovingAverageReference.get().getAverage(); + } + + public void updateUploadBytesMovingAverage(long count) { + updateMovingAverage(count, uploadBytesMutex, uploadBytesMovingAverageReference); + } + + boolean isUploadBytesPerSecMovingAverageReady() { + return uploadBytesPerSecMovingAverageReference.get().isReady(); + } + + double getUploadBytesPerSecMovingAverage() { + return uploadBytesPerSecMovingAverageReference.get().getAverage(); + } + + public void updateUploadBytesPerSecMovingAverage(long speed) { + updateMovingAverage(speed, uploadBytesPerSecMutex, uploadBytesPerSecMovingAverageReference); + } + + boolean isUploadTimeMovingAverageReady() { + return uploadTimeMsMovingAverageReference.get().isReady(); + } + + double getUploadTimeMovingAverage() { + return uploadTimeMsMovingAverageReference.get().getAverage(); + } + + public void updateUploadTimeMovingAverage(long duration) { + updateMovingAverage(duration, uploadTimeMsMutex, uploadTimeMsMovingAverageReference); + } + + /** + * Records a new data point for a moving average stat + * + * @param value The new data point to be added + * @param mutex The mutex to use for the update + * @param movingAverageReference The atomic reference to be updated + */ + protected void updateMovingAverage(long value, Object mutex, AtomicReference<MovingAverage> movingAverageReference) { + synchronized (mutex) { + movingAverageReference.get().record(value); + } + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize The updated size + */ + void updateMovingAverageWindowSize(int updatedSize) { + updateMovingAverageWindowSize(updatedSize, uploadBytesMutex, uploadBytesMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, uploadBytesPerSecMutex, uploadBytesPerSecMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, uploadTimeMsMutex, uploadTimeMsMovingAverageReference); + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize The updated size + * @param mutex The mutex to use for the update + * @param movingAverageReference The atomic reference to be updated + */ + protected void updateMovingAverageWindowSize(int updatedSize, Object mutex, AtomicReference<MovingAverage> movingAverageReference) { + synchronized (mutex) { + movingAverageReference.set(movingAverageReference.get().copyWithSize(updatedSize)); + } + } + + /** + * Validates that the sum of successful operations, failed operations, and the number of operations to add (irrespective of failed/successful) does not exceed the number of operations originally started + * @param startedCount Number of operations started + * @param failedCount Number of operations failed + * @param succeededCount Number of operations successful + * @param countToAdd Number of operations to add + */ + private void checkTotal(long startedCount, long failedCount, long succeededCount, long countToAdd) { + long delta = startedCount - (failedCount + succeededCount + countToAdd); + assert delta >= 0 : "Sum of failure count (" + + failedCount + + "), success count (" + + succeededCount + + "), and count to add (" + + countToAdd + + ") cannot exceed started count (" + + startedCount + + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java new file mode 100644 index 0000000000000..25bee10e1fbe3 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java @@ -0,0 +1,548 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Stores Remote Translog Store-related stats for a given IndexShard. + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class RemoteTranslogTransferTracker extends RemoteTransferTracker { + /** + * Epoch timestamp of the last successful Remote Translog Store upload. + */ + private final AtomicLong lastSuccessfulUploadTimestamp; + + /** + * Epoch timestamp of the last successful Remote Translog Store download. + */ + private final AtomicLong lastSuccessfulDownloadTimestamp; + + /** + * Total number of Remote Translog Store downloads that have been successful. + */ + private final AtomicLong totalDownloadsSucceeded; + + /** + * Total number of byte downloads to Remote Translog Store that have been successful. + */ + private final AtomicLong downloadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store downloads. + */ + private final AtomicLong totalDownloadTimeInMillis; + + /** + * Provides moving average over the last N total size in bytes of translog files downloaded as part of Remote Translog Store download. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference<MovingAverage> downloadBytesMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadBytesMutex; + + /** + * Provides moving average over the last N download speed (in bytes/s) of translog files downloaded as part of Remote Translog Store download. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference<MovingAverage> downloadBytesPerSecMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadBytesPerSecMutex; + + /** + * Provides moving average over the last N overall download time (in nanos) as part of Remote Translog Store download. N is window size. + * Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference<MovingAverage> downloadTimeMsMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadTimeMsMutex; + + public RemoteTranslogTransferTracker(ShardId shardId, int movingAverageWindowSize) { + super(shardId, movingAverageWindowSize); + + lastSuccessfulUploadTimestamp = new AtomicLong(0); + lastSuccessfulDownloadTimestamp = new AtomicLong(0); + totalDownloadsSucceeded = new AtomicLong(0); + downloadBytesSucceeded = new AtomicLong(0); + totalDownloadTimeInMillis = new AtomicLong(0); + downloadBytesMutex = new Object(); + downloadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + downloadBytesPerSecMutex = new Object(); + downloadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + downloadTimeMsMutex = new Object(); + downloadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + } + + public long getLastSuccessfulUploadTimestamp() { + return lastSuccessfulUploadTimestamp.get(); + } + + public void setLastSuccessfulUploadTimestamp(long lastSuccessfulUploadTimestamp) { + this.lastSuccessfulUploadTimestamp.set(lastSuccessfulUploadTimestamp); + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize the updated size + */ + void updateMovingAverageWindowSize(int updatedSize) { + super.updateMovingAverageWindowSize(updatedSize); + updateMovingAverageWindowSize(updatedSize, downloadBytesMutex, downloadBytesMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, downloadBytesPerSecMutex, downloadBytesPerSecMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, downloadTimeMsMutex, downloadTimeMsMovingAverageReference); + } + + public long getTotalDownloadsSucceeded() { + return totalDownloadsSucceeded.get(); + } + + void incrementDownloadsSucceeded() { + totalDownloadsSucceeded.addAndGet(1); + } + + public long getDownloadBytesSucceeded() { + return downloadBytesSucceeded.get(); + } + + public void addDownloadBytesSucceeded(long count) { + downloadBytesSucceeded.addAndGet(count); + } + + public long getTotalDownloadTimeInMillis() { + return totalDownloadTimeInMillis.get(); + } + + public void addDownloadTimeInMillis(long duration) { + totalDownloadTimeInMillis.addAndGet(duration); + } + + public long getLastSuccessfulDownloadTimestamp() { + return lastSuccessfulDownloadTimestamp.get(); + } + + void setLastSuccessfulDownloadTimestamp(long lastSuccessfulDownloadTimestamp) { + this.lastSuccessfulDownloadTimestamp.set(lastSuccessfulDownloadTimestamp); + } + + boolean isDownloadBytesMovingAverageReady() { + return downloadBytesMovingAverageReference.get().isReady(); + } + + double getDownloadBytesMovingAverage() { + return downloadBytesMovingAverageReference.get().getAverage(); + } + + void updateDownloadBytesMovingAverage(long count) { + updateMovingAverage(count, downloadBytesMutex, downloadBytesMovingAverageReference); + } + + boolean isDownloadBytesPerSecMovingAverageReady() { + return downloadBytesPerSecMovingAverageReference.get().isReady(); + } + + double getDownloadBytesPerSecMovingAverage() { + return downloadBytesPerSecMovingAverageReference.get().getAverage(); + } + + void updateDownloadBytesPerSecMovingAverage(long speed) { + updateMovingAverage(speed, downloadBytesPerSecMutex, downloadBytesPerSecMovingAverageReference); + } + + boolean isDownloadTimeMovingAverageReady() { + return downloadTimeMsMovingAverageReference.get().isReady(); + } + + double getDownloadTimeMovingAverage() { + return downloadTimeMsMovingAverageReference.get().getAverage(); + } + + void updateDownloadTimeMovingAverage(long duration) { + updateMovingAverage(duration, downloadTimeMsMutex, downloadTimeMsMovingAverageReference); + } + + /** + * Record stats related to a download from Remote Translog Store + * @param prevDownloadBytesSucceeded Number of downloadBytesSucceeded in this tracker before the download was started + * @param prevDownloadTimeInMillis Amount of downloadTimeInMillis in this tracker before the download was started + */ + public void recordDownloadStats(long prevDownloadBytesSucceeded, long prevDownloadTimeInMillis) { + setLastSuccessfulDownloadTimestamp(System.currentTimeMillis()); + incrementDownloadsSucceeded(); + long bytesDownloaded = getDownloadBytesSucceeded() - prevDownloadBytesSucceeded; + updateDownloadBytesMovingAverage(bytesDownloaded); + long durationInMillis = getTotalDownloadTimeInMillis() - prevDownloadTimeInMillis; + updateDownloadTimeMovingAverage(durationInMillis); + if (durationInMillis > 0) { + updateDownloadBytesPerSecMovingAverage(bytesDownloaded * 1_000L / durationInMillis); + } + } + + /** + * Gets the tracker's state as seen in the stats API + * @return Stats object with the tracker's stats + */ + public RemoteTranslogTransferTracker.Stats stats() { + return new RemoteTranslogTransferTracker.Stats( + shardId, + lastSuccessfulUploadTimestamp.get(), + totalUploadsStarted.get(), + totalUploadsSucceeded.get(), + totalUploadsFailed.get(), + uploadBytesStarted.get(), + uploadBytesSucceeded.get(), + uploadBytesFailed.get(), + totalUploadTimeInMillis.get(), + uploadBytesMovingAverageReference.get().getAverage(), + uploadBytesPerSecMovingAverageReference.get().getAverage(), + uploadTimeMsMovingAverageReference.get().getAverage(), + lastSuccessfulDownloadTimestamp.get(), + totalDownloadsSucceeded.get(), + downloadBytesSucceeded.get(), + totalDownloadTimeInMillis.get(), + downloadBytesMovingAverageReference.get().getAverage(), + downloadBytesPerSecMovingAverageReference.get().getAverage(), + downloadTimeMsMovingAverageReference.get().getAverage() + ); + } + + @Override + public String toString() { + return "RemoteTranslogTransferStats{" + + "lastSuccessfulUploadTimestamp=" + + lastSuccessfulUploadTimestamp.get() + + "," + + "totalUploadsStarted=" + + totalUploadsStarted.get() + + "," + + "totalUploadsSucceeded=" + + totalUploadsSucceeded.get() + + "," + + "totalUploadsFailed=" + + totalUploadsFailed.get() + + "," + + "uploadBytesStarted=" + + uploadBytesStarted.get() + + "," + + "uploadBytesFailed=" + + uploadBytesFailed.get() + + "," + + "totalUploadTimeInMillis=" + + totalUploadTimeInMillis.get() + + "," + + "uploadBytesMovingAverage=" + + uploadBytesMovingAverageReference.get().getAverage() + + "," + + "uploadBytesPerSecMovingAverage=" + + uploadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "uploadTimeMovingAverage=" + + uploadTimeMsMovingAverageReference.get().getAverage() + + "," + + "lastSuccessfulDownloadTimestamp=" + + lastSuccessfulDownloadTimestamp.get() + + "," + + "totalDownloadsSucceeded=" + + totalDownloadsSucceeded.get() + + "," + + "downloadBytesSucceeded=" + + downloadBytesSucceeded.get() + + "," + + "totalDownloadTimeInMillis=" + + totalDownloadTimeInMillis.get() + + "," + + "downloadBytesMovingAverage=" + + downloadBytesMovingAverageReference.get().getAverage() + + "," + + "downloadBytesPerSecMovingAverage=" + + downloadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "downloadTimeMovingAverage=" + + downloadTimeMsMovingAverageReference.get().getAverage() + + "," + + "}"; + } + + /** + * Represents the tracker's state as seen in the stats API. + * + * @opensearch.api + */ + @PublicApi(since = "2.10.0") + public static class Stats implements Writeable { + + final ShardId shardId; + + /** + * Epoch timestamp of the last successful Remote Translog Store upload. + */ + public final long lastSuccessfulUploadTimestamp; + + /** + * Total number of Remote Translog Store uploads that have been started. + */ + public final long totalUploadsStarted; + + /** + * Total number of Remote Translog Store uploads that have failed. + */ + public final long totalUploadsFailed; + + /** + * Total number of Remote Translog Store that have been successful. + */ + public final long totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Translog Store that have been started. + */ + public final long uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Translog Store that have failed. + */ + public final long uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Translog Store that have been successful. + */ + public final long uploadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store uploads. + */ + public final long totalUploadTimeInMillis; + + /** + * Size of a Remote Translog Store upload in bytes. + */ + public final double uploadBytesMovingAverage; + + /** + * Speed of a Remote Translog Store upload in bytes-per-second. + */ + public final double uploadBytesPerSecMovingAverage; + + /** + * Time taken by a Remote Translog Store upload. + */ + public final double uploadTimeMovingAverage; + + /** + * Epoch timestamp of the last successful Remote Translog Store download. + */ + public final long lastSuccessfulDownloadTimestamp; + + /** + * Total number of Remote Translog Store downloads that have been successful. + */ + public final long totalDownloadsSucceeded; + + /** + * Total number of byte downloads from Remote Translog Store that have been successful. + */ + public final long downloadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store downloads. + */ + public final long totalDownloadTimeInMillis; + + /** + * Size of a Remote Translog Store download in bytes. + */ + public final double downloadBytesMovingAverage; + + /** + * Speed of a Remote Translog Store download in bytes-per-second. + */ + public final double downloadBytesPerSecMovingAverage; + + /** + * Time taken by a Remote Translog Store download. + */ + public final double downloadTimeMovingAverage; + + public Stats( + ShardId shardId, + long lastSuccessfulUploadTimestamp, + long totalUploadsStarted, + long totalUploadsSucceeded, + long totalUploadsFailed, + long uploadBytesStarted, + long uploadBytesSucceeded, + long uploadBytesFailed, + long totalUploadTimeInMillis, + double uploadBytesMovingAverage, + double uploadBytesPerSecMovingAverage, + double uploadTimeMovingAverage, + long lastSuccessfulDownloadTimestamp, + long totalDownloadsSucceeded, + long downloadBytesSucceeded, + long totalDownloadTimeInMillis, + double downloadBytesMovingAverage, + double downloadBytesPerSecMovingAverage, + double downloadTimeMovingAverage + ) { + this.shardId = shardId; + + this.lastSuccessfulUploadTimestamp = lastSuccessfulUploadTimestamp; + this.totalUploadsStarted = totalUploadsStarted; + this.totalUploadsFailed = totalUploadsFailed; + this.totalUploadsSucceeded = totalUploadsSucceeded; + this.uploadBytesStarted = uploadBytesStarted; + this.uploadBytesFailed = uploadBytesFailed; + this.uploadBytesSucceeded = uploadBytesSucceeded; + this.totalUploadTimeInMillis = totalUploadTimeInMillis; + this.uploadBytesMovingAverage = uploadBytesMovingAverage; + this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; + this.uploadTimeMovingAverage = uploadTimeMovingAverage; + + this.lastSuccessfulDownloadTimestamp = lastSuccessfulDownloadTimestamp; + this.totalDownloadsSucceeded = totalDownloadsSucceeded; + this.downloadBytesSucceeded = downloadBytesSucceeded; + this.totalDownloadTimeInMillis = totalDownloadTimeInMillis; + this.downloadBytesMovingAverage = downloadBytesMovingAverage; + this.downloadBytesPerSecMovingAverage = downloadBytesPerSecMovingAverage; + this.downloadTimeMovingAverage = downloadTimeMovingAverage; + } + + public Stats(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + + this.lastSuccessfulUploadTimestamp = in.readVLong(); + this.totalUploadsStarted = in.readVLong(); + this.totalUploadsFailed = in.readVLong(); + this.totalUploadsSucceeded = in.readVLong(); + this.uploadBytesStarted = in.readVLong(); + this.uploadBytesFailed = in.readVLong(); + this.uploadBytesSucceeded = in.readVLong(); + this.totalUploadTimeInMillis = in.readVLong(); + this.uploadBytesMovingAverage = in.readDouble(); + this.uploadBytesPerSecMovingAverage = in.readDouble(); + this.uploadTimeMovingAverage = in.readDouble(); + + this.lastSuccessfulDownloadTimestamp = in.readVLong(); + this.totalDownloadsSucceeded = in.readVLong(); + this.downloadBytesSucceeded = in.readVLong(); + this.totalDownloadTimeInMillis = in.readVLong(); + this.downloadBytesMovingAverage = in.readDouble(); + this.downloadBytesPerSecMovingAverage = in.readDouble(); + this.downloadTimeMovingAverage = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + + out.writeVLong(lastSuccessfulUploadTimestamp); + out.writeVLong(totalUploadsStarted); + out.writeVLong(totalUploadsFailed); + out.writeVLong(totalUploadsSucceeded); + out.writeVLong(uploadBytesStarted); + out.writeVLong(uploadBytesFailed); + out.writeVLong(uploadBytesSucceeded); + out.writeVLong(totalUploadTimeInMillis); + out.writeDouble(uploadBytesMovingAverage); + out.writeDouble(uploadBytesPerSecMovingAverage); + out.writeDouble(uploadTimeMovingAverage); + + out.writeVLong(lastSuccessfulDownloadTimestamp); + out.writeVLong(totalDownloadsSucceeded); + out.writeVLong(downloadBytesSucceeded); + out.writeVLong(totalDownloadTimeInMillis); + out.writeDouble(downloadBytesMovingAverage); + out.writeDouble(downloadBytesPerSecMovingAverage); + out.writeDouble(downloadTimeMovingAverage); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteTranslogTransferTracker.Stats other = (RemoteTranslogTransferTracker.Stats) obj; + + return this.shardId.equals(other.shardId) + && this.lastSuccessfulUploadTimestamp == other.lastSuccessfulUploadTimestamp + && this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded + && this.totalUploadTimeInMillis == other.totalUploadTimeInMillis + && Double.compare(this.uploadBytesMovingAverage, other.uploadBytesMovingAverage) == 0 + && Double.compare(this.uploadBytesPerSecMovingAverage, other.uploadBytesPerSecMovingAverage) == 0 + && Double.compare(this.uploadTimeMovingAverage, other.uploadTimeMovingAverage) == 0 + && this.lastSuccessfulDownloadTimestamp == other.lastSuccessfulDownloadTimestamp + && this.totalDownloadsSucceeded == other.totalDownloadsSucceeded + && this.downloadBytesSucceeded == other.downloadBytesSucceeded + && this.totalDownloadTimeInMillis == other.totalDownloadTimeInMillis + && Double.compare(this.downloadBytesMovingAverage, other.downloadBytesMovingAverage) == 0 + && Double.compare(this.downloadBytesPerSecMovingAverage, other.downloadBytesPerSecMovingAverage) == 0 + && Double.compare(this.downloadTimeMovingAverage, other.downloadTimeMovingAverage) == 0; + } + + @Override + public int hashCode() { + return Objects.hash( + shardId.toString(), + lastSuccessfulUploadTimestamp, + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + totalUploadTimeInMillis, + uploadBytesMovingAverage, + uploadBytesPerSecMovingAverage, + uploadTimeMovingAverage, + lastSuccessfulDownloadTimestamp, + totalDownloadsSucceeded, + downloadBytesSucceeded, + totalDownloadTimeInMillis, + downloadBytesMovingAverage, + downloadBytesPerSecMovingAverage, + downloadTimeMovingAverage + ); + } + } + + /** + * Validates if the stats in this tracker and the stats contained in the given stats object are same or not + * @param other Stats object to compare this tracker against + * @return true if stats are same and false otherwise + */ + boolean hasSameStatsAs(RemoteTranslogTransferTracker.Stats other) { + return this.stats().equals(other); + } +} diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index d38c9d365107b..ec6755ea25703 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -58,18 +58,20 @@ import org.apache.lucene.util.QueryBuilder; import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.support.QueryParsers; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -700,7 +702,7 @@ private Query analyzeMultiBoolean(String field, TokenStream stream, BooleanClaus protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { try { checkForPositions(field); - return fieldType.phraseQuery(stream, slop, enablePositionIncrements); + return fieldType.phraseQuery(stream, slop, enablePositionIncrements, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -713,7 +715,7 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws protected Query analyzeMultiPhrase(String field, TokenStream stream, int slop) throws IOException { try { checkForPositions(field); - return fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements); + return fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -727,7 +729,7 @@ private Query analyzePhrasePrefix(String field, TokenStream stream, int slop, in if (positionCount > 1) { checkForPositions(field); } - return fieldType.phrasePrefixQuery(stream, slop, maxExpansions); + return fieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -752,9 +754,9 @@ private Query analyzeGraphBoolean(String field, TokenStream source, BooleanClaus lastState = end; final Query queryPos; boolean usePrefix = isPrefix && end == -1; - /** - * check if the GraphTokenStreamFiniteStrings graph is empty - * return empty BooleanQuery result + /* + check if the GraphTokenStreamFiniteStrings graph is empty + return empty BooleanQuery result */ Iterator<TokenStream> graphIt = graph.getFiniteStrings(); if (!graphIt.hasNext()) { @@ -886,6 +888,9 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in private void checkForPositions(String field) { if (fieldType.getTextSearchInfo().hasPositions() == false) { + if (fieldType instanceof MatchOnlyTextFieldMapper.MatchOnlyTextFieldType) { + return; + } throw new IllegalStateException("field:[" + field + "] was indexed without position data; cannot run PhraseQuery"); } } diff --git a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java index 241f05af2c512..8c0c87e8c9d0c 100644 --- a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java @@ -248,7 +248,7 @@ protected Query newPrefixQuery(Term term) { protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { List<Query> disjunctions = new ArrayList<>(); for (FieldAndBoost fieldType : blendedFields) { - Query query = fieldType.fieldType.phraseQuery(stream, slop, enablePositionIncrements); + Query query = fieldType.fieldType.phraseQuery(stream, slop, enablePositionIncrements, context); if (fieldType.boost != 1f) { query = new BoostQuery(query, fieldType.boost); } @@ -261,7 +261,7 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws protected Query analyzeMultiPhrase(String field, TokenStream stream, int slop) throws IOException { List<Query> disjunctions = new ArrayList<>(); for (FieldAndBoost fieldType : blendedFields) { - Query query = fieldType.fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements); + Query query = fieldType.fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); if (fieldType.boost != 1f) { query = new BoostQuery(query, fieldType.boost); } diff --git a/server/src/main/java/org/opensearch/index/search/NestedHelper.java b/server/src/main/java/org/opensearch/index/search/NestedHelper.java index ae91f66c5aebd..50e7e41b95be7 100644 --- a/server/src/main/java/org/opensearch/index/search/NestedHelper.java +++ b/server/src/main/java/org/opensearch/index/search/NestedHelper.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -44,7 +45,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.BooleanClause.Occur; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; diff --git a/server/src/main/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQuery.java b/server/src/main/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQuery.java index 770301629d964..cd28d8b6ef04e 100644 --- a/server/src/main/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQuery.java +++ b/server/src/main/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQuery.java @@ -32,7 +32,6 @@ package org.opensearch.index.search; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -82,8 +81,8 @@ public ScoreMode getScoreMode() { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query innerRewrite = query.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query innerRewrite = query.rewrite(searcher); if (innerRewrite != query) { // Right now ToParentBlockJoinQuery always rewrites to a ToParentBlockJoinQuery // so the else block will never be used. It is useful in the case that @@ -97,7 +96,7 @@ public Query rewrite(IndexReader reader) throws IOException { return innerRewrite; } } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java new file mode 100644 index 0000000000000..b2e2ba8001b88 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java @@ -0,0 +1,112 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.search.comparators; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; +import org.apache.lucene.search.comparators.NumericComparator; + +import java.io.IOException; + +/** + * The comparator for half_float numeric type. + * Comparator based on {@link Float#compare} for {@code numHits}. This comparator provides a + * skipping functionality – an iterator that can skip over non-competitive documents. + */ +public class HalfFloatComparator extends NumericComparator<Float> { + private final float[] values; + protected float topValue; + protected float bottom; + + public HalfFloatComparator(int numHits, String field, Float missingValue, boolean reverse, Pruning pruning) { + super(field, missingValue != null ? missingValue : 0.0f, reverse, pruning, HalfFloatPoint.BYTES); + values = new float[numHits]; + } + + @Override + public int compare(int slot1, int slot2) { + return Float.compare(values[slot1], values[slot2]); + } + + @Override + public void setTopValue(Float value) { + super.setTopValue(value); + topValue = value; + } + + @Override + public Float value(int slot) { + return Float.valueOf(values[slot]); + } + + @Override + public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { + return new HalfFloatLeafComparator(context); + } + + /** Leaf comparator for {@link HalfFloatComparator} that provides skipping functionality */ + public class HalfFloatLeafComparator extends NumericLeafComparator { + + public HalfFloatLeafComparator(LeafReaderContext context) throws IOException { + super(context); + } + + private float getValueForDoc(int doc) throws IOException { + if (docValues.advanceExact(doc)) { + return Float.intBitsToFloat((int) docValues.longValue()); + } else { + return missingValue; + } + } + + @Override + public void setBottom(int slot) throws IOException { + bottom = values[slot]; + super.setBottom(slot); + } + + @Override + public int compareBottom(int doc) throws IOException { + return Float.compare(bottom, getValueForDoc(doc)); + } + + @Override + public int compareTop(int doc) throws IOException { + return Float.compare(topValue, getValueForDoc(doc)); + } + + @Override + public void copy(int slot, int doc) throws IOException { + values[slot] = getValueForDoc(doc); + super.copy(slot, doc); + } + + @Override + protected int compareMissingValueWithBottomValue() { + return Float.compare(missingValue, bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return Float.compare(missingValue, topValue); + } + + @Override + protected void encodeBottom(byte[] packedValue) { + HalfFloatPoint.encodeDimension(bottom, packedValue, 0); + } + + @Override + protected void encodeTop(byte[] packedValue) { + HalfFloatPoint.encodeDimension(topValue, packedValue, 0); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java index 78b4a5f04c955..2b6bd9933e553 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.comparators.NumericComparator; import org.opensearch.common.Numbers; @@ -23,8 +24,8 @@ public class UnsignedLongComparator extends NumericComparator<BigInteger> { protected BigInteger topValue; protected BigInteger bottom; - public UnsignedLongComparator(int numHits, String field, BigInteger missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : Numbers.MIN_UNSIGNED_LONG_VALUE, reverse, enableSkipping, BigIntegerPoint.BYTES); + public UnsignedLongComparator(int numHits, String field, BigInteger missingValue, boolean reverse, Pruning pruning) { + super(field, missingValue != null ? missingValue : Numbers.MIN_UNSIGNED_LONG_VALUE, reverse, pruning, BigIntegerPoint.BYTES); values = new BigInteger[numHits]; } @@ -86,14 +87,6 @@ public void copy(int slot, int doc) throws IOException { super.copy(slot, doc); } - @Override - protected boolean isMissingValueCompetitive() { - int result = missingValue.compareTo(bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - @Override protected void encodeBottom(byte[] packedValue) { BigIntegerPoint.encodeDimension(bottom, packedValue, 0); @@ -103,5 +96,15 @@ protected void encodeBottom(byte[] packedValue) { protected void encodeTop(byte[] packedValue) { BigIntegerPoint.encodeDimension(topValue, packedValue, 0); } + + @Override + protected int compareMissingValueWithBottomValue() { + return missingValue.compareTo(bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return missingValue.compareTo(topValue); + } } } diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 4bae210f183c2..576e00f8f30d1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -33,13 +33,16 @@ package org.opensearch.index.search.stats; import org.opensearch.Version; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -52,21 +55,98 @@ /** * Encapsulates stats for search time * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchStats implements Writeable, ToXContentFragment { /** - * Statistics for search + * Holds statistic values for a particular phase. * - * @opensearch.internal + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public static class PhaseStatsLongHolder implements Writeable { + + long current; + long total; + long timeInMillis; + + public long getCurrent() { + return current; + } + + public long getTotal() { + return total; + } + + public long getTimeInMillis() { + return timeInMillis; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(current); + out.writeVLong(total); + out.writeVLong(timeInMillis); + } + + PhaseStatsLongHolder() { + this(0, 0, 0); + } + + PhaseStatsLongHolder(long current, long total, long timeInMillis) { + this.current = current; + this.total = total; + this.timeInMillis = timeInMillis; + } + + PhaseStatsLongHolder(StreamInput in) throws IOException { + this.current = in.readVLong(); + this.total = in.readVLong(); + this.timeInMillis = in.readVLong(); + } + + } + + /** + * Holds requests stats for different phases. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") + public static class RequestStatsLongHolder { + + Map<String, PhaseStatsLongHolder> requestStatsHolder = new HashMap<>(); + + public Map<String, PhaseStatsLongHolder> getRequestStatsHolder() { + return requestStatsHolder; + } + + RequestStatsLongHolder() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + requestStatsHolder.put(searchPhaseName.getName(), new PhaseStatsLongHolder()); + } + } + } + + /** + * Holder of statistics values + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment { private long queryCount; private long queryTimeInMillis; private long queryCurrent; + private long concurrentQueryCount; + private long concurrentQueryTimeInMillis; + private long concurrentQueryCurrent; + private long queryConcurrency; + private long fetchCount; private long fetchTimeInMillis; private long fetchCurrent; @@ -83,6 +163,13 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + @Nullable + private RequestStatsLongHolder requestStatsLongHolder; + + public RequestStatsLongHolder getRequestStatsLongHolder() { + return requestStatsLongHolder; + } + private Stats() { // for internal use, initializes all counts to 0 } @@ -91,6 +178,10 @@ public Stats( long queryCount, long queryTimeInMillis, long queryCurrent, + long concurrentQueryCount, + long concurrentQueryTimeInMillis, + long concurrentQueryCurrent, + long queryConcurrency, long fetchCount, long fetchTimeInMillis, long fetchCurrent, @@ -104,10 +195,16 @@ public Stats( long suggestTimeInMillis, long suggestCurrent ) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; this.queryTimeInMillis = queryTimeInMillis; this.queryCurrent = queryCurrent; + this.concurrentQueryCount = concurrentQueryCount; + this.concurrentQueryTimeInMillis = concurrentQueryTimeInMillis; + this.concurrentQueryCurrent = concurrentQueryCurrent; + this.queryConcurrency = queryConcurrency; + this.fetchCount = fetchCount; this.fetchTimeInMillis = fetchTimeInMillis; this.fetchCurrent = fetchCurrent; @@ -147,6 +244,17 @@ private Stats(StreamInput in) throws IOException { pitTimeInMillis = in.readVLong(); pitCurrent = in.readVLong(); } + + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); + requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); + } + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + concurrentQueryCount = in.readVLong(); + concurrentQueryTimeInMillis = in.readVLong(); + concurrentQueryCurrent = in.readVLong(); + queryConcurrency = in.readVLong(); + } } public void add(Stats stats) { @@ -154,6 +262,11 @@ public void add(Stats stats) { queryTimeInMillis += stats.queryTimeInMillis; queryCurrent += stats.queryCurrent; + concurrentQueryCount += stats.concurrentQueryCount; + concurrentQueryTimeInMillis += stats.concurrentQueryTimeInMillis; + concurrentQueryCurrent += stats.concurrentQueryCurrent; + queryConcurrency += stats.queryConcurrency; + fetchCount += stats.fetchCount; fetchTimeInMillis += stats.fetchTimeInMillis; fetchCurrent += stats.fetchCurrent; @@ -175,6 +288,9 @@ public void addForClosingShard(Stats stats) { queryCount += stats.queryCount; queryTimeInMillis += stats.queryTimeInMillis; + concurrentQueryCount += stats.concurrentQueryCount; + concurrentQueryTimeInMillis += stats.concurrentQueryTimeInMillis; + fetchCount += stats.fetchCount; fetchTimeInMillis += stats.fetchTimeInMillis; @@ -189,6 +305,7 @@ public void addForClosingShard(Stats stats) { pitCount += stats.pitCount; pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; + queryConcurrency += stats.queryConcurrency; } public long getQueryCount() { @@ -207,6 +324,30 @@ public long getQueryCurrent() { return queryCurrent; } + public long getConcurrentQueryCount() { + return concurrentQueryCount; + } + + public TimeValue getConcurrentQueryTime() { + return new TimeValue(concurrentQueryTimeInMillis); + } + + public double getConcurrentAvgSliceCount() { + if (concurrentQueryCount == 0) { + return 0; + } else { + return queryConcurrency / (double) concurrentQueryCount; + } + } + + public long getConcurrentQueryTimeInMillis() { + return concurrentQueryTimeInMillis; + } + + public long getConcurrentQueryCurrent() { + return concurrentQueryCurrent; + } + public long getFetchCount() { return fetchCount; } @@ -298,6 +439,24 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitTimeInMillis); out.writeVLong(pitCurrent); } + + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + if (requestStatsLongHolder == null) { + requestStatsLongHolder = new RequestStatsLongHolder(); + } + out.writeMap( + requestStatsLongHolder.getRequestStatsHolder(), + StreamOutput::writeString, + (stream, stats) -> stats.writeTo(stream) + ); + } + + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeVLong(concurrentQueryCount); + out.writeVLong(concurrentQueryTimeInMillis); + out.writeVLong(concurrentQueryCurrent); + out.writeVLong(queryConcurrency); + } } @Override @@ -306,6 +465,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.QUERY_TIME_IN_MILLIS, Fields.QUERY_TIME, getQueryTime()); builder.field(Fields.QUERY_CURRENT, queryCurrent); + builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); + builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); + builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); + builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); + builder.field(Fields.FETCH_TOTAL, fetchCount); builder.humanReadableField(Fields.FETCH_TIME_IN_MILLIS, Fields.FETCH_TIME, getFetchTime()); builder.field(Fields.FETCH_CURRENT, fetchCurrent); @@ -322,6 +486,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + if (requestStatsLongHolder != null) { + builder.startObject(Fields.REQUEST); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + PhaseStatsLongHolder statsLongHolder = requestStatsLongHolder.requestStatsHolder.get(searchPhaseName.getName()); + if (statsLongHolder == null) { + continue; + } + builder.startObject(searchPhaseName.getName()); + builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(statsLongHolder.timeInMillis)); + builder.field(Fields.CURRENT, statsLongHolder.current); + builder.field(Fields.TOTAL, statsLongHolder.total); + builder.endObject(); + } + builder.endObject(); + } return builder; } } @@ -336,6 +516,24 @@ public SearchStats() { totalStats = new Stats(); } + // Set the different Request Stats fields in here + public void setSearchRequestStats(SearchRequestStats searchRequestStats) { + if (totalStats.requestStatsLongHolder == null) { + totalStats.requestStatsLongHolder = new RequestStatsLongHolder(); + } + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + totalStats.requestStatsLongHolder.requestStatsHolder.put( + searchPhaseName.getName(), + new PhaseStatsLongHolder( + searchRequestStats.getPhaseCurrent(searchPhaseName), + searchRequestStats.getPhaseTotal(searchPhaseName), + searchRequestStats.getPhaseMetric(searchPhaseName) + ) + ); + } + } + public SearchStats(Stats totalStats, long openContexts, @Nullable Map<String, Stats> groupStats) { this.totalStats = totalStats; this.openContexts = openContexts; @@ -414,7 +612,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } /** @@ -430,6 +628,11 @@ static final class Fields { static final String QUERY_TIME = "query_time"; static final String QUERY_TIME_IN_MILLIS = "query_time_in_millis"; static final String QUERY_CURRENT = "query_current"; + static final String CONCURRENT_QUERY_TOTAL = "concurrent_query_total"; + static final String CONCURRENT_QUERY_TIME = "concurrent_query_time"; + static final String CONCURRENT_QUERY_TIME_IN_MILLIS = "concurrent_query_time_in_millis"; + static final String CONCURRENT_QUERY_CURRENT = "concurrent_query_current"; + static final String CONCURRENT_AVG_SLICE_COUNT = "concurrent_avg_slice_count"; static final String FETCH_TOTAL = "fetch_total"; static final String FETCH_TIME = "fetch_time"; static final String FETCH_TIME_IN_MILLIS = "fetch_time_in_millis"; @@ -446,6 +649,12 @@ static final class Fields { static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; static final String SUGGEST_CURRENT = "suggest_current"; + static final String REQUEST = "request"; + static final String TIME_IN_MILLIS = "time_in_millis"; + static final String TIME = "time"; + static final String CURRENT = "current"; + static final String TOTAL = "total"; + } @Override diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 6d0eb3a5949ca..99e3f8465c5db 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -36,7 +36,7 @@ import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.regex.Regex; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.SearchContext; @@ -91,6 +91,9 @@ public void onPreQueryPhase(SearchContext searchContext) { statsHolder.suggestCurrent.inc(); } else { statsHolder.queryCurrent.inc(); + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryCurrent.inc(); + } } }); } @@ -104,6 +107,10 @@ public void onFailedQueryPhase(SearchContext searchContext) { } else { statsHolder.queryCurrent.dec(); assert statsHolder.queryCurrent.count() >= 0; + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryCurrent.dec(); + assert statsHolder.concurrentQueryCurrent.count() >= 0; + } } }); } @@ -119,6 +126,13 @@ public void onQueryPhase(SearchContext searchContext, long tookInNanos) { statsHolder.queryMetric.inc(tookInNanos); statsHolder.queryCurrent.dec(); assert statsHolder.queryCurrent.count() >= 0; + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryMetric.inc(tookInNanos); + statsHolder.concurrentQueryCurrent.dec(); + assert statsHolder.concurrentQueryCurrent.count() >= 0; + assert searchContext.searcher().getSlices() != null; + statsHolder.queryConcurrencyMetric.inc(searchContext.searcher().getSlices().length); + } } }); } @@ -206,6 +220,8 @@ public void onFreePitContext(ReaderContext readerContext) { */ static final class StatsHolder { final MeanMetric queryMetric = new MeanMetric(); + final MeanMetric concurrentQueryMetric = new MeanMetric(); + final CounterMetric queryConcurrencyMetric = new CounterMetric(); final MeanMetric fetchMetric = new MeanMetric(); /* We store scroll statistics in microseconds because with nanoseconds we run the risk of overflowing the total stats if there are * many scrolls. For example, on a system with 2^24 scrolls that have been executed, each executing for 2^10 seconds, then using @@ -218,6 +234,7 @@ static final class StatsHolder { final MeanMetric pitMetric = new MeanMetric(); final MeanMetric suggestMetric = new MeanMetric(); final CounterMetric queryCurrent = new CounterMetric(); + final CounterMetric concurrentQueryCurrent = new CounterMetric(); final CounterMetric fetchCurrent = new CounterMetric(); final CounterMetric scrollCurrent = new CounterMetric(); final CounterMetric pitCurrent = new CounterMetric(); @@ -228,6 +245,10 @@ SearchStats.Stats stats() { queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(), + concurrentQueryMetric.count(), + TimeUnit.NANOSECONDS.toMillis(concurrentQueryMetric.sum()), + concurrentQueryCurrent.count(), + queryConcurrencyMetric.count(), fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count(), diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index 4ae693851c85f..ca1dfe2d5ad01 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.support.replication.ReplicationResponse; @@ -43,12 +42,13 @@ import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 0a4c197898d3d..155866e20d007 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -147,7 +147,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { /** * Updates the processed sequence checkpoint to the given value. - * + * <p> * This method is only used for segment replication since indexing doesn't * take place on the replica allowing us to avoid the check that all sequence numbers * are consecutively processed. @@ -208,7 +208,7 @@ public long getMaxSeqNo() { /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint - * + * <p> * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 19fe9ee97cd2f..7b9c1d3aa548f 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -43,11 +42,14 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.WriteStateException; import org.opensearch.index.IndexSettings; @@ -56,9 +58,10 @@ import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationTimer; +import org.opensearch.indices.replication.common.SegmentReplicationLagTimer; import java.io.IOException; import java.nio.file.Path; @@ -69,6 +72,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -86,7 +90,7 @@ /** * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). - * + * <p> * The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed * on all shards that are currently active. Since shards count as "active" when the cluster-manager starts * them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards @@ -110,10 +114,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * checkpoint based on the local checkpoints of all in-sync shard copies. * - replica: this shard receives global checkpoint information from the primary (see * {@link #updateGlobalCheckpointOnReplica(long, String)}). - * + * <p> * When a shard is initialized (be it a primary or replica), it initially operates in replica mode. The global checkpoint tracker is * then switched to primary mode in the following three scenarios: - * + * <p> * - An initializing primary shard that is not a relocation target is moved to primary mode (using {@link #activatePrimaryMode}) once * the shard becomes active. * - An active replica shard is moved to primary mode (using {@link #activatePrimaryMode}) once it is promoted to primary. @@ -138,7 +142,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * in-sync shard copies cannot grow, otherwise the relocation target might miss this information and increase the global checkpoint * to eagerly. As consequence, some of the methods in this class are not allowed to be called while a handoff is in progress, * in particular {@link #markAllocationIdAsInSync}. - * + * <p> * A notable exception to this is the method {@link #updateFromClusterManager}, which is still allowed to be called during a relocation handoff. * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. @@ -673,8 +677,9 @@ public synchronized void renewPeerRecoveryRetentionLeases() { /** * The state of the lucene checkpoint * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CheckpointState implements Writeable { /** @@ -714,7 +719,7 @@ public static class CheckpointState implements Writeable { * Map of ReplicationCheckpoints to ReplicationTimers. Timers are added as new checkpoints are published, and removed when * the replica is caught up. */ - Map<ReplicationCheckpoint, ReplicationTimer> checkpointTimers; + Map<ReplicationCheckpoint, SegmentReplicationLagTimer> checkpointTimers; /** * The time it took to complete the most recent replication event. @@ -1161,7 +1166,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI /** * Update the local knowledge of the visible checkpoint for the specified allocation ID. - * + * <p> * This method will also stop timers for each shard and compute replication lag metrics. * * @param allocationId the allocation ID to update the global checkpoint for @@ -1186,9 +1191,9 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation cps.checkpointTimers.entrySet().removeIf((entry) -> { boolean result = entry.getKey().isAheadOf(visibleCheckpoint) == false; if (result) { - final ReplicationTimer timer = entry.getValue(); + final SegmentReplicationLagTimer timer = entry.getValue(); timer.stop(); - lastFinished.set(Math.max(lastFinished.get(), timer.time())); + lastFinished.set(Math.max(lastFinished.get(), timer.totalElapsedTime())); } return result; }); @@ -1208,7 +1213,7 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation } /** - * After a new checkpoint is published, start a timer for each replica to the checkpoint. + * After a new checkpoint is published, create a timer for each replica to the checkpoint. * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) { @@ -1217,7 +1222,7 @@ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint ch this.latestReplicationCheckpoint = checkpoint; } if (primaryMode) { - startReplicationLagTimers(); + createReplicationLagTimers(); } } @@ -1225,7 +1230,15 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return this.latestReplicationCheckpoint; } - private void startReplicationLagTimers() { + private boolean isPrimaryRelocation(String allocationId) { + Optional<ShardRouting> shardRouting = routingTable.shards() + .stream() + .filter(routing -> routing.allocationId().getId().equals(allocationId)) + .findAny(); + return shardRouting.isPresent() && shardRouting.get().primary(); + } + + private void createReplicationLagTimers() { for (Map.Entry<String, CheckpointState> entry : checkpoints.entrySet()) { final String allocationId = entry.getKey(); if (allocationId.equals(this.shardAllocationId) == false) { @@ -1234,12 +1247,9 @@ private void startReplicationLagTimers() { // it is possible for a shard to be in-sync but not yet removed from the checkpoints collection after a failover event. if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(allocationId) == false && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { - cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> { - final ReplicationTimer replicationTimer = new ReplicationTimer(); - replicationTimer.start(); - return replicationTimer; - }); + cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( () -> new ParameterizedMessage( "updated last published checkpoint for {} at visible cp {} to {} - timers [{}]", @@ -1254,6 +1264,30 @@ private void startReplicationLagTimers() { } } + /** + * After a new checkpoint is published, start a timer per replica for the checkpoint. + * @param checkpoint {@link ReplicationCheckpoint} + */ + public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpoint) { + assert indexSettings.isSegRepEnabled(); + if (checkpoint.equals(latestReplicationCheckpoint) == false) { + this.latestReplicationCheckpoint = checkpoint; + } + if (primaryMode) { + checkpoints.entrySet().stream().filter(e -> !e.getKey().equals(this.shardAllocationId)).forEach(e -> { + String allocationId = e.getKey(); + final CheckpointState cps = e.getValue(); + if (cps.inSync + && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(e.getKey()) == false + && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) + && cps.checkpointTimers.containsKey(latestReplicationCheckpoint)) { + cps.checkpointTimers.get(latestReplicationCheckpoint).start(); + } + }); + } + } + /** * Fetch stats on segment replication. * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, @@ -1270,27 +1304,29 @@ public synchronized Set<SegmentReplicationShardStats> getSegmentReplicationStats entry -> entry.getKey().equals(this.shardAllocationId) == false && entry.getValue().inSync && replicationGroup.getUnavailableInSyncShards().contains(entry.getKey()) == false + && isPrimaryRelocation(entry.getKey()) == false ) - .map(entry -> buildShardStats(latestReplicationCheckpoint.getLength(), entry.getKey(), entry.getValue())) + .map(entry -> buildShardStats(entry.getKey(), entry.getValue())) .collect(Collectors.toUnmodifiableSet()); } return Collections.emptySet(); } - private SegmentReplicationShardStats buildShardStats( - final long latestCheckpointLength, - final String allocationId, - final CheckpointState checkpointState - ) { - final Map<ReplicationCheckpoint, ReplicationTimer> checkpointTimers = checkpointState.checkpointTimers; + private SegmentReplicationShardStats buildShardStats(final String allocationId, final CheckpointState cps) { + final Store.RecoveryDiff diff = Store.segmentReplicationDiff( + latestReplicationCheckpoint.getMetadataMap(), + cps.visibleReplicationCheckpoint != null ? cps.visibleReplicationCheckpoint.getMetadataMap() : Collections.emptyMap() + ); + final long bytesBehind = diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); return new SegmentReplicationShardStats( allocationId, - checkpointTimers.size(), - checkpointState.visibleReplicationCheckpoint == null - ? latestCheckpointLength - : Math.max(latestCheckpointLength - checkpointState.visibleReplicationCheckpoint.getLength(), 0), - checkpointTimers.values().stream().mapToLong(ReplicationTimer::time).max().orElse(0), - checkpointState.lastCompletedReplicationLag + cps.checkpointTimers.size(), + bytesBehind, + bytesBehind > 0L ? cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::time).max().orElse(0) : 0, + bytesBehind > 0L + ? cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::totalElapsedTime).max().orElse(0) + : 0, + cps.lastCompletedReplicationLag ); } @@ -1865,8 +1901,9 @@ private synchronized void waitForLocalCheckpointToAdvance() throws InterruptedEx * Represents the sequence number component of the primary context. This is the knowledge on the primary of the in-sync and initializing * shards and their local checkpoints. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class PrimaryContext implements Writeable { private final long clusterStateVersion; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java index 5d4483e4a2930..255ff115555d8 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLease.java @@ -32,15 +32,16 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContent; import java.io.IOException; import java.util.Objects; @@ -51,8 +52,9 @@ * otherwise merge away operations that have been soft deleted). Each retention lease contains a unique identifier, the retaining sequence * number, the timestamp of when the lease was created or renewed, and the source of the retention lease (e.g., "ccr"). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RetentionLease implements ToXContentObject, Writeable { private final String id; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseActions.java index 88a4f5d8ab2b5..db8b06489f2ae 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseActions.java @@ -32,9 +32,7 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.SingleShardRequest; @@ -44,13 +42,15 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index ec8d44846e30e..5fa0a1a6459e7 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.replication.ReplicationRequest; @@ -47,18 +46,19 @@ import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java index a1ed615b83a14..d34d385c66eb6 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseStats.java @@ -32,12 +32,13 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContent; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -45,8 +46,9 @@ /** * Represents retention lease stats. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RetentionLeaseStats implements ToXContentFragment, Writeable { private final RetentionLeases retentionLeases; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index 0c50f9b06b2af..ca3c7e1d49700 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.WriteResponse; @@ -49,19 +48,21 @@ import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +100,8 @@ public RetentionLeaseSyncAction( final ShardStateAction shardStateAction, final ActionFilters actionFilters, final IndexingPressureService indexingPressureService, - final SystemIndices systemIndices + final SystemIndices systemIndices, + final Tracer tracer ) { super( settings, @@ -115,7 +117,8 @@ public RetentionLeaseSyncAction( ignore -> ThreadPool.Names.MANAGEMENT, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncer.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncer.java index db3e8c4012ae5..ece0ac2b7bf6b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncer.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncer.java @@ -32,9 +32,10 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import java.util.Objects; @@ -44,6 +45,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public class RetentionLeaseSyncer { private final SyncAction syncAction; private final BackgroundSyncAction backgroundSyncAction; diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java index a764ca3e7a581..4e28e19c4b164 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeases.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -39,9 +40,9 @@ import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContent; import org.opensearch.gateway.MetadataStateFormat; import java.io.IOException; @@ -57,8 +58,9 @@ * Represents a versioned collection of retention leases. We version the collection of retention leases to ensure that sync requests that * arrive out of order on the replica, using the version to ensure that older sync requests are rejected. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RetentionLeases implements ToXContentFragment, Writeable { private final long primaryTerm; diff --git a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java index be7888ada2801..a8acf1fac7846 100644 --- a/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java +++ b/server/src/main/java/org/opensearch/index/seqno/SeqNoStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Sequence number statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SeqNoStats implements ToXContentFragment, Writeable { private static final String SEQ_NO = "seq_no"; diff --git a/server/src/main/java/org/opensearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/opensearch/index/seqno/SequenceNumbers.java index 210976ffba58a..b9959941d843f 100644 --- a/server/src/main/java/org/opensearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/opensearch/index/seqno/SequenceNumbers.java @@ -32,6 +32,8 @@ package org.opensearch.index.seqno; +import org.opensearch.common.annotation.PublicApi; + import java.util.Map; /** @@ -132,8 +134,9 @@ public static long max(final long maxSeqNo, final long seqNo) { /** * Commit information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class CommitInfo { public final long maxSeqNo; public final long localCheckpoint; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 357b6c2eaa456..675d60ec2b63d 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -21,7 +21,7 @@ * * @opensearch.internal */ -public class CheckpointRefreshListener extends CloseableRetryableRefreshListener { +public class CheckpointRefreshListener extends ReleasableRetryableRefreshListener { protected static Logger logger = LogManager.getLogger(CheckpointRefreshListener.class); @@ -39,7 +39,7 @@ public void beforeRefresh() throws IOException { } @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() diff --git a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java deleted file mode 100644 index 10e3e04033da3..0000000000000 --- a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.shard; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.ReferenceManager; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * RefreshListener that runs afterRefresh method if and only if there is a permit available. Once the listener - * is closed, all the permits are acquired and there are no available permits to afterRefresh. This abstract class provides - * necessary abstract methods to schedule retry. - */ -public abstract class CloseableRetryableRefreshListener implements ReferenceManager.RefreshListener, Closeable { - - /** - * Total permits = 1 ensures that there is only single instance of performAfterRefresh that is running at a time. - * In case there are use cases where concurrency is required, the total permit variable can be put inside the ctor. - */ - private static final int TOTAL_PERMITS = 1; - - private final Semaphore semaphore = new Semaphore(TOTAL_PERMITS); - - private final ThreadPool threadPool; - - /** - * This boolean is used to ensure that there is only 1 retry scheduled/running at any time. - */ - private final AtomicBoolean retryScheduled = new AtomicBoolean(false); - - public CloseableRetryableRefreshListener() { - this.threadPool = null; - } - - public CloseableRetryableRefreshListener(ThreadPool threadPool) { - this.threadPool = threadPool; - } - - @Override - public final void afterRefresh(boolean didRefresh) throws IOException { - boolean successful; - boolean permitAcquired = semaphore.tryAcquire(); - try { - successful = permitAcquired && performAfterRefresh(didRefresh, false); - } finally { - if (permitAcquired) { - semaphore.release(); - } - } - scheduleRetry(successful, didRefresh, permitAcquired); - } - - protected String getRetryThreadPoolName() { - return null; - } - - protected TimeValue getNextRetryInterval() { - return null; - } - - private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boolean didRefresh, boolean isRetry) { - if (this.threadPool == null - || interval == null - || retryThreadPoolName == null - || ThreadPool.THREAD_POOL_TYPES.containsKey(retryThreadPoolName) == false - || interval == TimeValue.MINUS_ONE - || retryScheduled.compareAndSet(false, true) == false) { - return; - } - boolean scheduled = false; - try { - this.threadPool.schedule(() -> { - boolean successful; - boolean permitAcquired = semaphore.tryAcquire(); - try { - successful = permitAcquired && performAfterRefresh(didRefresh, isRetry); - } finally { - if (permitAcquired) { - semaphore.release(); - } - retryScheduled.set(false); - } - scheduleRetry(successful, didRefresh, isRetry || permitAcquired); - }, interval, retryThreadPoolName); - scheduled = true; - getLogger().info("Scheduled retry with didRefresh={} isRetry={}", didRefresh, isRetry); - } finally { - if (scheduled == false) { - retryScheduled.set(false); - } - } - } - - /** - * Schedules the retry based on the {@code afterRefreshSuccessful} value. - * - * @param afterRefreshSuccessful is sent true if the performAfterRefresh(..) is successful. - * @param didRefresh if the refresh did open a new reference then didRefresh will be true - * @param isRetry if this is a failure or permit was not acquired. - */ - private void scheduleRetry(boolean afterRefreshSuccessful, boolean didRefresh, boolean isRetry) { - if (afterRefreshSuccessful == false) { - scheduleRetry(getNextRetryInterval(), getRetryThreadPoolName(), didRefresh, isRetry); - } - } - - /** - * This method needs to be overridden and be provided with what needs to be run on after refresh. - * - * @param didRefresh true if the refresh opened a new reference - * @param isRetry true if this is a retry attempt - * @return true if a retry is needed else false. - */ - protected abstract boolean performAfterRefresh(boolean didRefresh, boolean isRetry); - - @Override - public final void close() throws IOException { - try { - if (semaphore.tryAcquire(TOTAL_PERMITS, 10, TimeUnit.MINUTES)) { - assert semaphore.availablePermits() == 0; - } else { - throw new RuntimeException("timeout while closing gated refresh listener"); - } - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - protected abstract Logger getLogger(); -} diff --git a/server/src/main/java/org/opensearch/index/shard/DocsStats.java b/server/src/main/java/org/opensearch/index/shard/DocsStats.java index 83cc69752db2f..4ca475a45c04b 100644 --- a/server/src/main/java/org/opensearch/index/shard/DocsStats.java +++ b/server/src/main/java/org/opensearch/index/shard/DocsStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Document statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocsStats implements Writeable, ToXContentFragment { private long count = 0; diff --git a/server/src/main/java/org/opensearch/index/shard/GlobalCheckpointListeners.java b/server/src/main/java/org/opensearch/index/shard/GlobalCheckpointListeners.java index ac21b7a9b1ab8..c1b66909a82a9 100644 --- a/server/src/main/java/org/opensearch/index/shard/GlobalCheckpointListeners.java +++ b/server/src/main/java/org/opensearch/index/shard/GlobalCheckpointListeners.java @@ -34,10 +34,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.core.Assertions; import org.opensearch.core.index.shard.ShardId; import java.io.Closeable; @@ -67,8 +68,9 @@ public class GlobalCheckpointListeners implements Closeable { /** * A global checkpoint listener consisting of a callback that is notified when the global checkpoint is updated or the shard is closed. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface GlobalCheckpointListener { /** diff --git a/server/src/main/java/org/opensearch/index/shard/IllegalIndexShardStateException.java b/server/src/main/java/org/opensearch/index/shard/IllegalIndexShardStateException.java index 47ed149913914..991481899c83f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IllegalIndexShardStateException.java +++ b/server/src/main/java/org/opensearch/index/shard/IllegalIndexShardStateException.java @@ -33,6 +33,7 @@ package org.opensearch.index.shard; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; @@ -43,8 +44,9 @@ /** * Exception thrown when an index shard is in an illegal state * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IllegalIndexShardStateException extends OpenSearchException { private final IndexShardState currentState; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index 9e23a84eac030..693686ec228e9 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -33,9 +33,10 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; @@ -51,8 +52,9 @@ * modify local state without sufficient synchronization. * </p> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexEventListener { /** @@ -155,7 +157,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** * Called after the index shard has been deleted from disk. - * + * <p> * Note: this method is only called if the deletion of the shard did finish without an exception * * @param shardId The shard id diff --git a/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java b/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java index 441a9a6413ffc..861a325c45d4b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java @@ -32,14 +32,16 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; /** * An {@link IndexSettingProvider} is a provider for index level settings that can be set * explicitly as a default value (so they show up as "set" for newly created indices) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexSettingProvider { /** * Returns explicitly set default index {@link Settings} for the given index. This should not diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index e43b9773cc1e0..977155a1cbb72 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -56,18 +56,15 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.ThreadInterruptedException; -import org.opensearch.common.lucene.store.ByteArrayIndexInput; -import org.opensearch.cluster.metadata.DataStream; -import org.opensearch.core.Assertions; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -81,15 +78,17 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -97,18 +96,21 @@ import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.gateway.WriteStateException; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.VersionType; import org.opensearch.index.cache.IndexCache; @@ -145,7 +147,8 @@ import org.opensearch.index.merge.MergeStats; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.search.stats.ShardSearchStats; import org.opensearch.index.seqno.ReplicationTracker; @@ -158,6 +161,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteStoreFileDownloader; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -165,6 +169,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.RemoteFsTranslog; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogFactory; @@ -174,18 +179,18 @@ import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.suggest.completion.CompletionStats; import org.opensearch.threadpool.ThreadPool; @@ -194,14 +199,14 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -235,8 +240,9 @@ /** * An OpenSearch index shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { private final ThreadPool threadPool; @@ -335,9 +341,11 @@ Runnable getGlobalCheckpointSyncer() { private final Store remoteStore; private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier; private final boolean isTimeSeriesIndex; - private final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private final List<ReferenceManager.RefreshListener> internalRefreshListener = new ArrayList<>(); + private final RemoteStoreFileDownloader fileDownloader; + private final RecoverySettings recoverySettings; public IndexShard( final ShardRouting shardRouting, @@ -363,7 +371,10 @@ public IndexShard( final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier, @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier, + final String nodeId, + final RecoverySettings recoverySettings ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -384,7 +395,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteTranslogStoreEnabled(), - indexSettings::getRemoteTranslogUploadBufferInterval + () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -409,7 +420,7 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); - this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); + this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId); final String aId = shardRouting.allocationId().getId(); final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id()); this.pendingPrimaryTerm = primaryTerm; @@ -458,7 +469,9 @@ public boolean shouldCache(Query query) { this.isTimeSeriesIndex = (mapperService == null || mapperService.documentMapper() == null) ? false : mapperService.documentMapper().mappers().containsTimeStampField(); - this.remoteRefreshSegmentPressureService = remoteRefreshSegmentPressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.recoverySettings = recoverySettings; + this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); } public ThreadPool getThreadPool() { @@ -547,6 +560,23 @@ public QueryCachingPolicy getQueryCachingPolicy() { return cachingPolicy; } + /** Only used for testing **/ + protected RemoteStoreStatsTrackerFactory getRemoteStoreStatsTrackerFactory() { + return remoteStoreStatsTrackerFactory; + } + + public String getNodeId() { + return translogConfig.getNodeId(); + } + + public RecoverySettings getRecoverySettings() { + return recoverySettings; + } + + public RemoteStoreFileDownloader getFileDownloader() { + return fileDownloader; + } + @Override public void updateShardState( final ShardRouting newRouting, @@ -620,7 +650,7 @@ public void updateShardState( if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); } } else { assert currentRouting.primary() == false : "term is only increased as part of primary promotion"; @@ -678,10 +708,29 @@ public void updateShardState( if (indexSettings.isSegRepEnabled()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; + ReplicationTimer timer = new ReplicationTimer(); + timer.start(); + logger.debug( + "Resetting engine on promotion of shard [{}] to primary, startTime {}\n", + shardId, + timer.startTime() + ); resetEngineToGlobalCheckpoint(); + timer.stop(); + logger.info("Completed engine failover for shard [{}] in: {} ms", shardId, timer.time()); + // It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to + // trigger our refresh listener. + // Force update the checkpoint post engine reset. + updateReplicationCheckpoint(); } + replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - ensurePeerRecoveryRetentionLeasesExist(); + if (indexSettings.isSegRepEnabled()) { + // force publish a checkpoint once in primary mode so that replicas not caught up to previous primary + // are brought up to date. + checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); + } + postActivatePrimaryMode(); /* * If this shard was serving as a replica shard when another shard was promoted to primary then * its Lucene index was reset during the primary term transition. In particular, the Lucene index @@ -802,6 +851,9 @@ public void relocated( final Runnable performSegRep ) throws IllegalIndexShardStateException, IllegalStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; + // The below list of releasable ensures that if the relocation does not happen, we undo the activity of close and + // acquire all permits. This will ensure that the remote store uploads can still be done by the existing primary shard. + List<Releasable> releasablesOnHandoffFailures = new ArrayList<>(2); try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { forceRefreshes.close(); @@ -814,11 +866,15 @@ public void relocated( maybeSync(); } - // Ensures all in-flight remote store operations drain, before we perform the handoff. - internalRefreshListener.stream() - .filter(refreshListener -> refreshListener instanceof Closeable) - .map(refreshListener -> (Closeable) refreshListener) - .close(); + // Ensures all in-flight remote store refreshes drain, before we perform the performSegRep. + for (ReferenceManager.RefreshListener refreshListener : internalRefreshListener) { + if (refreshListener instanceof ReleasableRetryableRefreshListener) { + releasablesOnHandoffFailures.add(((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes()); + } + } + + // Ensure all in-flight remote store translog upload drains, before we perform the performSegRep. + releasablesOnHandoffFailures.add(getEngine().translogManager().drainSync()); // no shard operation permits are being held here, move state from started to relocated assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED @@ -836,7 +892,7 @@ public void relocated( synchronized (mutex) { verifyRelocatingState(); replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under - // mutex + // mutex } } catch (final Exception e) { try { @@ -853,6 +909,13 @@ public void relocated( // Fail primary relocation source and target shards. failShard("timed out waiting for relocation hand-off to complete", null); throw new IndexShardClosedException(shardId(), "timed out waiting for relocation hand-off to complete"); + } catch (Exception ex) { + assert replicationTracker.isPrimaryMode(); + // If the primary mode is still true after the end of handoff attempt, it basically means that the relocation + // failed. The existing primary will continue to be the primary, so we need to allow the segments and translog + // upload to resume. + Releasables.close(releasablesOnHandoffFailures); + throw ex; } } @@ -931,7 +994,8 @@ public Engine.IndexResult applyIndexOperationOnPrimary( autoGeneratedTimestamp, isRetry, Engine.Operation.Origin.PRIMARY, - sourceToParse + sourceToParse, + null ); } @@ -944,23 +1008,6 @@ public Engine.IndexResult applyIndexOperationOnReplica( boolean isRetry, SourceToParse sourceToParse ) throws IOException { - if (indexSettings.isSegRepEnabled()) { - Engine.Index index = new Engine.Index( - new Term(IdFieldMapper.NAME, Uid.encodeId(id)), - new ParsedDocument(null, null, id, null, null, sourceToParse.source(), sourceToParse.getMediaType(), null), - seqNo, - opPrimaryTerm, - version, - null, - Engine.Operation.Origin.REPLICA, - System.nanoTime(), - autoGeneratedTimeStamp, - isRetry, - UNASSIGNED_SEQ_NO, - 0 - ); - return getEngine().index(index); - } return applyIndexOperation( getEngine(), seqNo, @@ -972,7 +1019,8 @@ public Engine.IndexResult applyIndexOperationOnReplica( autoGeneratedTimeStamp, isRetry, Engine.Operation.Origin.REPLICA, - sourceToParse + sourceToParse, + id ); } @@ -987,8 +1035,29 @@ private Engine.IndexResult applyIndexOperation( long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, - SourceToParse sourceToParse + SourceToParse sourceToParse, + String id ) throws IOException { + + // For Segment Replication enabled replica shards we can be skip parsing the documents as we directly copy segments from primary + // shard. + if (indexSettings.isSegRepEnabled() && routingEntry().primary() == false) { + Engine.Index index = new Engine.Index( + new Term(IdFieldMapper.NAME, Uid.encodeId(id)), + new ParsedDocument(null, null, id, null, null, sourceToParse.source(), sourceToParse.getMediaType(), null), + seqNo, + opPrimaryTerm, + version, + null, + Engine.Operation.Origin.REPLICA, + System.nanoTime(), + autoGeneratedTimeStamp, + isRetry, + UNASSIGNED_SEQ_NO, + 0 + ); + return getEngine().index(index); + } assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ " + opPrimaryTerm + " ] > shard term [" @@ -1357,12 +1426,23 @@ public MergeStats mergeStats() { if (engine == null) { return new MergeStats(); } - return engine.getMergeStats(); + final MergeStats mergeStats = engine.getMergeStats(); + mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed()); + return mergeStats; } public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); + // Populate remote_store stats only if the index is remote store backed + if (indexSettings.isRemoteStoreEnabled()) { + segmentsStats.addRemoteSegmentStats( + new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) + ); + } + if (indexSettings.isSegRepEnabled()) { + segmentsStats.addReplicationStats(getReplicationStats()); + } return segmentsStats; } @@ -1375,7 +1455,15 @@ public FieldDataStats fieldDataStats(String... fields) { } public TranslogStats translogStats() { - return getEngine().translogManager().getTranslogStats(); + TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); + // Populate remote_store stats only if the index is remote store backed + if (indexSettings.isRemoteStoreEnabled()) { + translogStats.addRemoteTranslogStats( + new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) + ); + } + + return translogStats; } public CompletionStats completionStats(String... fields) { @@ -1408,6 +1496,9 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { + if (isRemoteTranslogEnabled()) { + return; + } verifyNotClosed(); final Engine engine = getEngine(); engine.translogManager().trimUnreferencedTranslogFiles(); @@ -1416,7 +1507,7 @@ public void trimTranslog() { /** * Rolls the tranlog generation and cleans unneeded. */ - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { final Engine engine = getEngine(); engine.translogManager().rollTranslogGeneration(); } @@ -1549,25 +1640,20 @@ public GatedCloseable<IndexCommit> acquireSafeIndexCommit() throws EngineExcepti } /** - * Compute and return the latest ReplicationCheckpoint for a particular shard. - * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices + * return the most recently computed ReplicationCheckpoint for a particular shard. + * The checkpoint is updated inside a refresh listener and may lag behind the SegmentInfos on the reader. + * To guarantee the checkpoint is upto date with the latest on-reader infos, use `getLatestSegmentInfosAndCheckpoint` instead. + * + * @return {@link ReplicationCheckpoint} - The most recently computed ReplicationCheckpoint. */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { - final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> infosAndCheckpoint = getLatestSegmentInfosAndCheckpoint(); - if (infosAndCheckpoint == null) { - return null; - } - try (final GatedCloseable<SegmentInfos> ignored = infosAndCheckpoint.v1()) { - return infosAndCheckpoint.v2(); - } catch (IOException e) { - throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e); - } + return replicationTracker.getLatestReplicationCheckpoint(); } /** * Compute and return the latest ReplicationCheckpoint for a shard and a GatedCloseable containing the corresponding SegmentInfos. * The segments referenced by the SegmentInfos will remain on disk until the GatedCloseable is closed. - * + * <p> * Primary shards compute the seqNo used in the replication checkpoint from the fetched SegmentInfos. * Replica shards compute the seqNo from its latest processed checkpoint, which only increases when refreshing on new segments. * @@ -1575,40 +1661,14 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { * */ public Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { - if (indexSettings.isSegRepEnabled() == false) { - return null; - } + assert indexSettings.isSegRepEnabled(); - Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( - new GatedCloseable<>(null, () -> {}), - ReplicationCheckpoint.empty(shardId, getDefaultCodecName()) - ); - - if (getEngineOrNull() == null) { - return nullSegmentInfosEmptyCheckpoint; - } // do not close the snapshot - caller will close it. GatedCloseable<SegmentInfos> snapshot = null; try { snapshot = getSegmentInfosSnapshot(); - if (snapshot.get() != null) { - SegmentInfos segmentInfos = snapshot.get(); - return new Tuple<>( - snapshot, - new ReplicationCheckpoint( - this.shardId, - getOperationPrimaryTerm(), - segmentInfos.getGeneration(), - segmentInfos.getVersion(), - // TODO: Update replicas to compute length from SegmentInfos. Replicas do not yet incref segments with - // getSegmentInfosSnapshot, so computing length from SegmentInfos can cause issues. - shardRouting.primary() - ? store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum() - : store.stats(StoreStats.UNKNOWN_RESERVED_BYTES).getSizeInBytes(), - getEngine().config().getCodec().getName() - ) - ); - } + final SegmentInfos segmentInfos = snapshot.get(); + return new Tuple<>(snapshot, computeReplicationCheckpoint(segmentInfos)); } catch (IOException | AlreadyClosedException e) { logger.error("Error Fetching SegmentInfos and latest checkpoint", e); if (snapshot != null) { @@ -1619,7 +1679,40 @@ public Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> getLatestSegme } } } - return nullSegmentInfosEmptyCheckpoint; + return new Tuple<>(new GatedCloseable<>(null, () -> {}), getLatestReplicationCheckpoint()); + } + + /** + * Compute the latest {@link ReplicationCheckpoint} from a SegmentInfos. + * This function fetches a metadata snapshot from the store that comes with an IO cost. + * We will reuse the existing stored checkpoint if it is at the same SI version. + * + * @param segmentInfos {@link SegmentInfos} infos to use to compute. + * @return {@link ReplicationCheckpoint} Checkpoint computed from the infos. + * @throws IOException When there is an error computing segment metadata from the store. + */ + ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) throws IOException { + if (segmentInfos == null) { + return ReplicationCheckpoint.empty(shardId); + } + final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); + if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration() + && latestReplicationCheckpoint.getPrimaryTerm() == getOperationPrimaryTerm()) { + return latestReplicationCheckpoint; + } + final Map<String, StoreFileMetadata> metadataMap = store.getSegmentMetadataMap(segmentInfos); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + this.shardId, + getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + segmentInfos.getVersion(), + metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), + getEngine().config().getCodec().getName(), + metadataMap + ); + logger.trace("Recomputed ReplicationCheckpoint for shard {}", checkpoint); + return checkpoint; } /** @@ -1628,20 +1721,20 @@ public Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> getLatestSegme */ public boolean isSegmentReplicationAllowed() { if (indexSettings.isSegRepEnabled() == false) { - logger.warn("Attempting to perform segment replication when it is not enabled on the index"); + logger.trace("Attempting to perform segment replication when it is not enabled on the index"); return false; } if (getReplicationTracker().isPrimaryMode()) { - logger.warn("Shard is in primary mode and cannot perform segment replication as a replica."); + logger.trace("Shard is in primary mode and cannot perform segment replication as a replica."); return false; } if (this.routingEntry().primary()) { - logger.warn("Shard routing is marked primary thus cannot perform segment replication as replica"); + logger.trace("Shard routing is marked primary thus cannot perform segment replication as replica"); return false; } if (state().equals(IndexShardState.STARTED) == false && (state() == IndexShardState.POST_RECOVERY && shardRouting.state() == ShardRoutingState.INITIALIZING) == false) { - logger.warn( + logger.trace( () -> new ParameterizedMessage( "Shard is not started or recovering {} {} and cannot perform segment replication as a replica", state(), @@ -1651,7 +1744,7 @@ public boolean isSegmentReplicationAllowed() { return false; } if (getReplicationEngine().isEmpty()) { - logger.warn( + logger.trace( () -> new ParameterizedMessage( "Shard does not have the correct engine type to perform segment replication {}.", getEngine().getClass() @@ -1672,8 +1765,8 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp if (isSegmentReplicationAllowed() == false) { return false; } - ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); - if (localCheckpoint.isAheadOf(requestCheckpoint)) { + final ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint(); + if (requestCheckpoint.isAheadOf(localCheckpoint) == false) { logger.trace( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - Shard is already on checkpoint {} that is ahead of {}", @@ -1683,12 +1776,6 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp ); return false; } - if (localCheckpoint.equals(requestCheckpoint)) { - logger.trace( - () -> new ParameterizedMessage("Ignoring new replication checkpoint - Shard is already on checkpoint {}", requestCheckpoint) - ); - return false; - } return true; } @@ -1846,6 +1933,10 @@ static Engine.Searcher wrapSearcher( } } + public void onCheckpointPublished(ReplicationCheckpoint checkpoint) { + replicationTracker.startReplicationLagTimers(checkpoint); + } + /** * Used with segment replication during relocation handoff, this method updates current read only engine to global * checkpoint followed by changing to writeable engine @@ -1860,10 +1951,6 @@ public void resetToWriteableEngine() throws IOException, InterruptedException, T indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { resetEngineToGlobalCheckpoint(); }); } - public void onCheckpointPublished(ReplicationCheckpoint checkpoint) { - replicationTracker.setLatestReplicationCheckpoint(checkpoint); - } - /** * Wrapper for a non-closing reader * @@ -1928,7 +2015,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO /* ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ - private RemoteSegmentStoreDirectory getRemoteDirectory() { + public RemoteSegmentStoreDirectory getRemoteDirectory() { assert indexSettings.isRemoteStoreEnabled(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); @@ -1937,6 +2024,41 @@ private RemoteSegmentStoreDirectory getRemoteDirectory() { return ((RemoteSegmentStoreDirectory) remoteDirectory); } + /** + * Returns true iff it is able to verify that remote segment store + * is in sync with local + */ + boolean isRemoteSegmentStoreInSync() { + assert indexSettings.isRemoteStoreEnabled(); + try { + RemoteSegmentStoreDirectory directory = getRemoteDirectory(); + if (directory.readLatestMetadataFile() != null) { + Collection<String> uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet(); + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = getSegmentInfosSnapshot()) { + Collection<String> localSegmentInfosFiles = segmentInfosGatedCloseable.get().files(true); + Set<String> localFiles = new HashSet<>(localSegmentInfosFiles); + // verifying that all files except EXCLUDE_FILES are uploaded to the remote + localFiles.removeAll(RemoteStoreRefreshListener.EXCLUDE_FILES); + if (uploadFiles.containsAll(localFiles)) { + return true; + } + logger.debug( + () -> new ParameterizedMessage( + "RemoteSegmentStoreSyncStatus localSize={} remoteSize={}", + localFiles.size(), + uploadFiles.size() + ) + ); + } + } + } catch (AlreadyClosedException e) { + throw e; + } catch (Throwable e) { + logger.error("Exception while reading latest metadata", e); + } + return false; + } + public void preRecovery() { final IndexShardState currentState = this.state; // single volatile read if (currentState == IndexShardState.CLOSED) { @@ -1988,7 +2110,7 @@ private long recoverLocallyUpToGlobalCheckpoint() { final Optional<SequenceNumbers.CommitInfo> safeCommit; final long globalCheckpoint; try { - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY); globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); safeCommit = store.findSafeIndexCommit(globalCheckpoint); } catch (org.apache.lucene.index.IndexNotFoundException e) { @@ -2088,7 +2210,7 @@ private long recoverLocallyUptoLastCommit() { try { seqNo = Long.parseLong(store.readLastCommittedSegmentsInfo().getUserData().get(MAX_SEQ_NO)); } catch (org.apache.lucene.index.IndexNotFoundException e) { - logger.error("skip local recovery as no index commit found", e); + logger.error("skip local recovery as no index commit found"); return UNASSIGNED_SEQ_NO; } catch (Exception e) { logger.error("skip local recovery as failed to find the safe commit", e); @@ -2171,9 +2293,10 @@ private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation o shardId.getIndexName(), index.id(), index.source(), - XContentHelper.xContentType(index.source()), + MediaTypeRegistry.xContentType(index.source()), index.routing() - ) + ), + index.id() ); break; case DELETE: @@ -2242,7 +2365,7 @@ private void loadGlobalCheckpointToReplicationTracker() throws IOException { // we have to set it before we open an engine and recover from the translog because // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); } @@ -2268,7 +2391,7 @@ public void openEngineAndRecoverFromTranslog() throws IOException { }; // Do not load the global checkpoint if this is a remote snapshot index - if (indexSettings.isRemoteSnapshot() == false) { + if (indexSettings.isRemoteSnapshot() == false && indexSettings.isRemoteTranslogStoreEnabled() == false) { loadGlobalCheckpointToReplicationTracker(); } @@ -2307,6 +2430,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t } private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException { + syncFromRemote = syncFromRemote && indexSettings.isRemoteSnapshot() == false; assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -2325,25 +2449,49 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { - syncSegmentsFromRemoteSegmentStore(false, true, true); - } - if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + if (indexSettings.isRemoteStoreEnabled()) { + // Download missing segments from remote segment store. if (syncFromRemote) { - syncRemoteTranslogAndUpdateGlobalCheckpoint(); - } else { - // we will enter this block when we do not want to recover from remote translog. - // currently only during snapshot restore, we are coming into this block. - // here, as while initiliazing remote translog we cannot skip downloading translog files, - // so before that step, we are deleting the translog files present in remote store. - deleteTranslogFilesFromRemoteTranslog(); - + syncSegmentsFromRemoteSegmentStore(false); + } + if (shardRouting.primary()) { + if (syncFromRemote) { + syncRemoteTranslogAndUpdateGlobalCheckpoint(); + } else { + // we will enter this block when we do not want to recover from remote translog. + // currently only during snapshot restore, we are coming into this block. + // here, as while initiliazing remote translog we cannot skip downloading translog files, + // so before that step, we are deleting the translog files present in remote store. + deleteTranslogFilesFromRemoteTranslog(); + } + } else if (syncFromRemote) { + // For replicas, when we download segments from remote segment store, we need to make sure that local + // translog is having the same UUID that is referred by the segments. If they are different, engine open + // fails with TranslogCorruptedException. It is safe to create empty translog for remote store enabled + // indices as replica would only need to read translog in failover scenario and we always fetch data + // from remote translog at the time of failover. + final SegmentInfos lastCommittedSegmentInfos = store().readLastCommittedSegmentsInfo(); + final String translogUUID = lastCommittedSegmentInfos.userData.get(TRANSLOG_UUID_KEY); + final long checkpoint = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + Translog.createEmptyTranslog( + shardPath().resolveTranslog(), + shardId(), + checkpoint, + getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); } } // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). final Engine newEngine = engineFactory.newReadWriteEngine(config); onNewEngine(newEngine); currentEngineReference.set(newEngine); + + if (indexSettings.isSegRepEnabled()) { + // set initial replication checkpoints into tracker. + updateReplicationCheckpoint(); + } // We set active because we are now writing operations to the engine; this way, // we can flush if we go idle after some time and become inactive. active.set(true); @@ -2914,10 +3062,24 @@ public void updateVisibleCheckpointForShard(final String allocationId, final Rep * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. */ - public Set<SegmentReplicationShardStats> getReplicationStats() { + public Set<SegmentReplicationShardStats> getReplicationStatsForTrackedReplicas() { return replicationTracker.getSegmentReplicationStats(); } + public ReplicationStats getReplicationStats() { + if (indexSettings.isSegRepEnabled() && routingEntry().primary()) { + final Set<SegmentReplicationShardStats> stats = getReplicationStatsForTrackedReplicas(); + long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); + long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); + long maxReplicationLag = stats.stream() + .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) + .max() + .orElse(0L); + return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); + } + return new ReplicationStats(); + } + /** * Add a global checkpoint listener. If the global checkpoint is equal to or above the global checkpoint the listener is waiting for, * then the listener will be notified immediately via an executor (so possibly not on the current thread). If the specified timeout @@ -3293,6 +3455,20 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } + postActivatePrimaryMode(); + } + + private void postActivatePrimaryMode() { + if (indexSettings.isRemoteStoreEnabled()) { + // We make sure to upload translog (even if it does not contain any operations) to remote translog. + // This helps to get a consistent state in remote store where both remote segment store and remote + // translog contains data. + try { + getEngine().translogManager().syncTranslog(); + } catch (IOException e) { + logger.error("Failed to sync translog to remote from new primary", e); + } + } ensurePeerRecoveryRetentionLeasesExist(); } @@ -3437,6 +3613,7 @@ public void startRecovery( // } // }} // } + logger.debug("startRecovery type={}", recoveryState.getRecoverySource().getType()); assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource()); switch (recoveryState.getRecoverySource().getType()) { case EMPTY_STORE: @@ -3669,6 +3846,9 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.clear(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); + if (indexSettings.isSegRepEnabled()) { + internalRefreshListener.add(new ReplicationCheckpointUpdater()); + } if (this.checkpointPublisher != null && shardRouting.primary() && indexSettings.isSegRepLocalEnabled()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } @@ -3677,16 +3857,15 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.add( new RemoteStoreRefreshListener( this, - // Add the checkpoint publisher if the Segment Replciation via remote store is enabled. - indexSettings.isSegRepWithRemoteEnabled() ? this.checkpointPublisher : SegmentReplicationCheckpointPublisher.EMPTY, - remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker(shardId()) + this.checkpointPublisher, + remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) ) ); } - /** - * With segment replication enabled for primary relocation, recover replica shard initially as read only and - * change to a writeable engine during relocation handoff after a round of segment replication. + /* + With segment replication enabled for primary relocation, recover replica shard initially as read only and + change to a writeable engine during relocation handoff after a round of segment replication. */ boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() && (shardRouting.primary() == false @@ -3698,7 +3877,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), @@ -3713,13 +3892,13 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro circuitBreakerService, globalCheckpointSupplier, replicationTracker::getRetentionLeases, - () -> getOperationPrimaryTerm(), + this::getOperationPrimaryTerm, tombstoneDocSupplier(), isReadOnlyReplica, - replicationTracker::isPrimaryMode, + this::isStartedPrimary, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for - // timeseries + // timeseries ); } @@ -3731,6 +3910,15 @@ public boolean isRemoteTranslogEnabled() { return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); } + /** + * This checks if we are in state to upload to remote store. Until the cluster-manager informs the shard through + * cluster state, the shard will not be in STARTED state. This method is used to prevent pre-emptive segment or + * translog uploads. + */ + public boolean isStartedPrimary() { + return getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED; + } + /** * @return true if segment reverse search optimization is enabled for time series based workload. */ @@ -4096,6 +4284,8 @@ private static AsyncIOProcessor<Translog.Location> createTranslogSyncProcessor( boolean bufferAsyncIoProcessor, Supplier<TimeValue> bufferIntervalSupplier ) { + assert bufferAsyncIoProcessor == false || Objects.nonNull(bufferIntervalSupplier) + : "If bufferAsyncIoProcessor is true, then the bufferIntervalSupplier needs to be non null"; ThreadContext threadContext = threadPool.getThreadContext(); CheckedConsumer<List<Tuple<Translog.Location, Consumer<Exception>>>, IOException> writeConsumer = candidates -> { try { @@ -4251,8 +4441,9 @@ private RefreshListeners buildRefreshListeners() { * * @see IndexShard#addShardFailureCallback(Consumer) * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class ShardFailure { public final ShardRouting routing; public final String reason; @@ -4320,15 +4511,19 @@ public final boolean isSearchIdle() { } /** - * * Returns true if this shard supports search idle. - * + * <p> * Indices using Segment Replication will ignore search idle unless there are no replicas. * Primary shards push out new segments only * after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving * a new set of segments. */ public final boolean isSearchIdleSupported() { + // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh + // task continues to upload to remote store periodically. + if (isRemoteTranslogEnabled()) { + return false; + } return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; } @@ -4473,6 +4668,33 @@ public void afterRefresh(boolean didRefresh) throws IOException { } } + /** + * Refresh listener to update the Shard's ReplicationCheckpoint post refresh. + */ + private class ReplicationCheckpointUpdater implements ReferenceManager.RefreshListener { + @Override + public void beforeRefresh() throws IOException {} + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + // We're only starting to track the replication checkpoint. The timers for replication are started when + // the checkpoint is published. This is done so that the timers do not include the time spent by primary + // in uploading the segments to remote store. + updateReplicationCheckpoint(); + } + } + } + + private void updateReplicationCheckpoint() { + final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable<SegmentInfos> ignored = tuple.v1()) { + replicationTracker.setLatestReplicationCheckpoint(tuple.v2()); + } catch (IOException e) { + throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e); + } + } + private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); final DocumentMapper noopDocumentMapper = mapperService != null @@ -4541,6 +4763,16 @@ public GatedCloseable<IndexCommit> acquireSafeIndexCommit() { } } + @Override + public GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() { + synchronized (engineMutex) { + if (newEngineReference.get() == null) { + throw new AlreadyClosedException("engine was closed"); + } + return newEngineReference.get().getSegmentInfosSnapshot(); + } + } + @Override public void close() throws IOException { assert Thread.holdsLock(engineMutex); @@ -4555,7 +4787,7 @@ public void close() throws IOException { }; IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); if (indexSettings.isRemoteStoreEnabled()) { - syncSegmentsFromRemoteSegmentStore(false, true, true); + syncSegmentsFromRemoteSegmentStore(false); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { syncRemoteTranslogAndUpdateGlobalCheckpoint(); @@ -4613,23 +4845,36 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { } /** - * Downloads segments from remote segment store. - * @param overrideLocal flag to override local segment files with those in remote store - * @param refreshLevelSegmentSync last refresh checkpoint is used if true, commit checkpoint otherwise - * @param shouldCommit if the shard requires committing the changes after sync from remote. - * @throws IOException if exception occurs while reading segments from remote store + * Downloads segments from remote segment store + * @param overrideLocal flag to override local segment files with those in remote store. + * @throws IOException if exception occurs while reading segments from remote store. */ - public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean refreshLevelSegmentSync, boolean shouldCommit) - throws IOException { + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOException { + syncSegmentsFromRemoteSegmentStore(overrideLocal, () -> {}); + } + + /** + * Downloads segments from remote segment store along with updating the access time of the recovery target. + * @param overrideLocal flag to override local segment files with those in remote store. + * @param onFileSync runnable that updates the access time when run. + * @throws IOException if exception occurs while reading segments from remote store. + */ + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { + boolean syncSegmentSuccess = false; + long startTimeMs = System.currentTimeMillis(); assert indexSettings.isRemoteStoreEnabled(); - logger.info("Downloading segments from remote segment store"); + logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that // are uploaded to the remote segment store. RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.init(); Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments = remoteDirectory - .getSegmentsUploadedToRemoteStore(); + .getSegmentsUploadedToRemoteStore() + .entrySet() + .stream() + .filter(entry -> entry.getKey().startsWith(IndexFileNames.SEGMENTS) == false) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); store.incRef(); remoteStore.incRef(); try { @@ -4647,56 +4892,34 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re } else { storeDirectory = store.directory(); } - Set<String> localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); - copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); - if (refreshLevelSegmentSync && remoteSegmentMetadata != null) { - try ( - ChecksumIndexInput indexInput = new BufferedChecksumIndexInput( - new ByteArrayIndexInput("Snapshot of SegmentInfos", remoteSegmentMetadata.getSegmentInfosBytes()) - ); - ) { - SegmentInfos infosSnapshot = SegmentInfos.readCommit( - store.directory(), - indexInput, - remoteSegmentMetadata.getGeneration() - ); - // Replicas never need a local commit - if (shouldCommit) { - if (this.shardRouting.primary()) { - long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); - // Following code block makes sure to use SegmentInfosSnapshot in the remote store if generation differs - // with local filesystem. If local filesystem already has segments_N+2 and infosSnapshot has generation N, - // after commit, there would be 2 files that would be created segments_N+1 and segments_N+2. With the - // policy of preserving only the latest commit, we will delete segments_N+1 which in fact is the part of the - // latest commit. - Optional<String> localMaxSegmentInfos = localSegmentFiles.stream() - .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) - .max(Comparator.comparingLong(SegmentInfos::generationFromSegmentsFileName)); - if (localMaxSegmentInfos.isPresent() - && infosSnapshot.getGeneration() < SegmentInfos.generationFromSegmentsFileName(localMaxSegmentInfos.get()) - - 1) { - // If remote translog is not enabled, local translog will be created with different UUID. - // This fails in Store.trimUnsafeCommits() as translog UUID of checkpoint and SegmentInfos needs - // to be same. Following code block make sure to have the same UUID. - if (indexSettings.isRemoteTranslogStoreEnabled() == false) { - SegmentInfos localSegmentInfos = store.readLastCommittedSegmentsInfo(); - Map<String, String> userData = new HashMap<>(infosSnapshot.getUserData()); - userData.put(TRANSLOG_UUID_KEY, localSegmentInfos.userData.get(TRANSLOG_UUID_KEY)); - infosSnapshot.setUserData(userData, false); - } - storeDirectory.deleteFile(localMaxSegmentInfos.get()); - } - store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); - } - } else { - finalizeReplication(infosSnapshot); + if (remoteSegmentMetadata != null) { + final SegmentInfos infosSnapshot = store.buildSegmentInfos( + remoteSegmentMetadata.getSegmentInfosBytes(), + remoteSegmentMetadata.getGeneration() + ); + long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); + // delete any other commits, we want to start the engine only from a new commit made with the downloaded infos bytes. + // Extra segments will be wiped on engine open. + for (String file : List.of(store.directory().listAll())) { + if (file.startsWith(IndexFileNames.SEGMENTS)) { + store.deleteQuiet(file); } } + assert Arrays.stream(store.directory().listAll()).filter(f -> f.startsWith(IndexFileNames.SEGMENTS)).findAny().isEmpty() + : "There should not be any segments file in the dir"; + store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } + syncSegmentSuccess = true; } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { + logger.trace( + "syncSegmentsFromRemoteSegmentStore success={} elapsedTime={}", + syncSegmentSuccess, + (System.currentTimeMillis() - startTimeMs) + ); store.decRef(); remoteStore.decRef(); } @@ -4716,7 +4939,7 @@ public void syncSegmentsFromGivenRemoteSegmentStore( long primaryTerm, long commitGeneration ) throws IOException { - logger.info("Downloading segments from given remote segment store"); + logger.trace("Downloading segments from given remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = null; if (remoteStore != null) { remoteDirectory = getRemoteDirectory(); @@ -4724,8 +4947,7 @@ public void syncSegmentsFromGivenRemoteSegmentStore( remoteStore.incRef(); } Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments = sourceRemoteDirectory - .initializeToSpecificCommit(primaryTerm, commitGeneration) - .getMetadata(); + .getSegmentsUploadedToRemoteStore(); final Directory storeDirectory = store.directory(); store.incRef(); @@ -4735,7 +4957,8 @@ public void syncSegmentsFromGivenRemoteSegmentStore( sourceRemoteDirectory, remoteDirectory, uploadedSegments, - overrideLocal + overrideLocal, + () -> {} ); if (segmentsNFile != null) { try ( @@ -4768,42 +4991,51 @@ private String copySegmentFiles( RemoteSegmentStoreDirectory sourceRemoteDirectory, RemoteSegmentStoreDirectory targetRemoteDirectory, Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments, - boolean overrideLocal + boolean overrideLocal, + final Runnable onFileSync ) throws IOException { - List<String> downloadedSegments = new ArrayList<>(); - List<String> skippedSegments = new ArrayList<>(); + Set<String> toDownloadSegments = new HashSet<>(); + Set<String> skippedSegments = new HashSet<>(); String segmentNFile = null; + try { - Set<String> localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); if (overrideLocal) { - for (String file : localSegmentFiles) { + for (String file : storeDirectory.listAll()) { storeDirectory.deleteFile(file); } } + for (String file : uploadedSegments.keySet()) { long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { - storeDirectory.copyFrom(sourceRemoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(file); + toDownloadSegments.add(file); } else { skippedSegments.add(file); } - if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - } + if (file.startsWith(IndexFileNames.SEGMENTS)) { assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file"; segmentNFile = file; } } + + if (toDownloadSegments.isEmpty() == false) { + try { + fileDownloader.download(sourceRemoteDirectory, storeDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); + } catch (Exception e) { + throw new IOException("Error occurred when downloading segments from remote store", e); + } + } } finally { - logger.info("Downloaded segments here: {}", downloadedSegments); - logger.info("Skipped download for segments here: {}", skippedSegments); + logger.trace("Downloaded segments here: {}", toDownloadSegments); + logger.trace("Skipped download for segments here: {}", skippedSegments); } + return segmentNFile; } - private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { + // Visible for testing + boolean localDirectoryContains(Directory localDirectory, String file, long checksum) throws IOException { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { return true; @@ -4822,6 +5054,8 @@ private boolean localDirectoryContains(Directory localDirectory, String file, lo logger.debug("File {} does not exist in local FS, downloading from remote store", file); } catch (IOException e) { logger.warn("Exception while reading checksum of file: {}, this can happen if file is corrupted", file); + // For any other exception on reading checksum, we delete the file to re-download again + localDirectory.deleteFile(file); } return false; } @@ -4880,4 +5114,17 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } + + private TimeValue getRemoteTranslogUploadBufferInterval(Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier) { + assert Objects.nonNull(clusterRemoteTranslogBufferIntervalSupplier) : "remote translog buffer interval supplier is null"; + if (indexSettings().isRemoteTranslogBufferIntervalExplicit()) { + return indexSettings().getRemoteTranslogUploadBufferInterval(); + } + return clusterRemoteTranslogBufferIntervalSupplier.get(); + } + + // Exclusively for testing, please do not use it elsewhere. + public AsyncIOProcessor<Translog.Location> getTranslogSyncProcessor() { + return translogSyncProcessor; + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardClosedException.java index 8b4c9a188e00c..8cc8f0356b775 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardClosedException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardClosedException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown if trying to operate on a closed Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardClosedException extends IllegalIndexShardStateException { public IndexShardClosedException(ShardId shardId) { super(shardId, IndexShardState.CLOSED, "Closed"); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardNotRecoveringException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardNotRecoveringException.java index 75af4ef53e638..d996464f25723 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardNotRecoveringException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardNotRecoveringException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown if an index shard is not recovering * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardNotRecoveringException extends IllegalIndexShardStateException { public IndexShardNotRecoveringException(ShardId shardId, IndexShardState currentState) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardNotStartedException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardNotStartedException.java index 7f37ae2a00873..0371e57bfc282 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardNotStartedException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardNotStartedException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown when trying to operate on an Index Shard that hasn't been started * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardNotStartedException extends IllegalIndexShardStateException { public IndexShardNotStartedException(ShardId shardId, IndexShardState currentState) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardOperationPermits.java b/server/src/main/java/org/opensearch/index/shard/IndexShardOperationPermits.java index 7b93d05fb322a..da51530586893 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardOperationPermits.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardOperationPermits.java @@ -32,18 +32,18 @@ package org.opensearch.index.shard; -import org.opensearch.core.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.ThreadContext.StoredContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardRecoveringException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardRecoveringException.java index 9372ff222d6f0..eb145803d25ec 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardRecoveringException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardRecoveringException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown when there is an error recovering an Index Shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardRecoveringException extends IllegalIndexShardStateException { public IndexShardRecoveringException(ShardId shardId) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardRelocatedException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardRelocatedException.java index 5176b23edcf82..be77a295d7bfc 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardRelocatedException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardRelocatedException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown if there is an error relocating an index shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardRelocatedException extends IllegalIndexShardStateException { public IndexShardRelocatedException(ShardId shardId) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardStartedException.java b/server/src/main/java/org/opensearch/index/shard/IndexShardStartedException.java index a724952e37707..f2f3554f6ef12 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardStartedException.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardStartedException.java @@ -32,6 +32,7 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -40,8 +41,9 @@ /** * Exception thrown if there is an error starting an index shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardStartedException extends IllegalIndexShardStateException { public IndexShardStartedException(ShardId shardId) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShardState.java b/server/src/main/java/org/opensearch/index/shard/IndexShardState.java index dc6f2fc3a86d9..deadf1db8314f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShardState.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShardState.java @@ -32,11 +32,14 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; + /** * Index Shard States * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum IndexShardState { CREATED((byte) 0), RECOVERING((byte) 1), diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingOperationListener.java b/server/src/main/java/org/opensearch/index/shard/IndexingOperationListener.java index ccc4cd336cff7..edce676b7c81e 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingOperationListener.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; @@ -41,8 +42,9 @@ /** * An indexing listener for indexing, delete, events. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexingOperationListener { /** diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index f45417a20036e..862962dc5467a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -33,10 +33,12 @@ package org.opensearch.index.shard; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,21 +46,108 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; /** * Tracks indexing statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexingStats implements Writeable, ToXContentFragment { /** * Internal statistics for indexing * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment { + /** + * Tracks item level rest category class codes during indexing + * + * @opensearch.api + */ + @PublicApi(since = "1.0.0") + public static class DocStatusStats implements Writeable, ToXContentFragment { + + final AtomicLong[] docStatusCounter; + + public DocStatusStats() { + docStatusCounter = new AtomicLong[5]; + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i] = new AtomicLong(0); + } + } + + public DocStatusStats(StreamInput in) throws IOException { + docStatusCounter = in.readArray(i -> new AtomicLong(i.readLong()), AtomicLong[]::new); + + assert docStatusCounter.length == 5 : "Length of incoming array should be 5! Got " + docStatusCounter.length; + } + + /** + * Increment counter for status + * + * @param status {@link RestStatus} + */ + public void inc(final RestStatus status) { + add(status, 1L); + } + + /** + * Increment counter for status by count + * + * @param status {@link RestStatus} + * @param delta The value to add + */ + void add(final RestStatus status, final long delta) { + docStatusCounter[status.getStatusFamilyCode() - 1].addAndGet(delta); + } + + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void add(final DocStatusStats stats) { + if (null == stats) { + return; + } + + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i].addAndGet(stats.docStatusCounter[i].longValue()); + } + } + + public AtomicLong[] getDocStatusCounter() { + return docStatusCounter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.DOC_STATUS); + + for (int i = 0; i < docStatusCounter.length; ++i) { + long value = docStatusCounter[i].longValue(); + + if (value > 0) { + String key = i + 1 + "xx"; + builder.field(key, value); + } + } + + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray((o, v) -> o.writeLong(v.longValue()), docStatusCounter); + } + + } + private long indexCount; private long indexTimeInMillis; private long indexCurrent; @@ -69,8 +158,11 @@ public static class Stats implements Writeable, ToXContentFragment { private long noopUpdateCount; private long throttleTimeInMillis; private boolean isThrottled; + private final DocStatusStats docStatusStats; - Stats() {} + Stats() { + docStatusStats = new DocStatusStats(); + } public Stats(StreamInput in) throws IOException { indexCount = in.readVLong(); @@ -83,6 +175,12 @@ public Stats(StreamInput in) throws IOException { noopUpdateCount = in.readVLong(); isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); + + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + docStatusStats = in.readOptionalWriteable(DocStatusStats::new); + } else { + docStatusStats = null; + } } public Stats( @@ -95,7 +193,8 @@ public Stats( long deleteCurrent, long noopUpdateCount, boolean isThrottled, - long throttleTimeInMillis + long throttleTimeInMillis, + DocStatusStats docStatusStats ) { this.indexCount = indexCount; this.indexTimeInMillis = indexTimeInMillis; @@ -107,6 +206,7 @@ public Stats( this.noopUpdateCount = noopUpdateCount; this.isThrottled = isThrottled; this.throttleTimeInMillis = throttleTimeInMillis; + this.docStatusStats = docStatusStats; } public void add(Stats stats) { @@ -121,8 +221,10 @@ public void add(Stats stats) { noopUpdateCount += stats.noopUpdateCount; throttleTimeInMillis += stats.throttleTimeInMillis; - if (isThrottled != stats.isThrottled) { - isThrottled = true; // When combining if one is throttled set result to throttled. + isThrottled |= stats.isThrottled; // When combining if one is throttled set result to throttled. + + if (getDocStatusStats() != null) { + getDocStatusStats().add(stats.getDocStatusStats()); } } @@ -193,6 +295,10 @@ public long getNoopUpdateCount() { return noopUpdateCount; } + public DocStatusStats getDocStatusStats() { + return docStatusStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(indexCount); @@ -206,6 +312,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeOptionalWriteable(docStatusStats); + } } @Override @@ -223,8 +332,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.IS_THROTTLED, isThrottled); builder.humanReadableField(Fields.THROTTLED_TIME_IN_MILLIS, Fields.THROTTLED_TIME, getThrottleTime()); + + if (getDocStatusStats() != null) { + getDocStatusStats().toXContent(builder, params); + } + return builder; } + } private final Stats totalStats; @@ -279,7 +394,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par * * @opensearch.internal */ - static final class Fields { + private static final class Fields { static final String INDEXING = "indexing"; static final String INDEX_TOTAL = "index_total"; static final String INDEX_TIME = "index_time"; @@ -294,6 +409,7 @@ static final class Fields { static final String IS_THROTTLED = "is_throttled"; static final String THROTTLED_TIME_IN_MILLIS = "throttle_time_in_millis"; static final String THROTTLED_TIME = "throttle_time"; + static final String DOC_STATUS = "doc_status"; } @Override @@ -303,4 +419,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } + } diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index d7e15dd3e40f5..55b65bb4be6d8 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -154,7 +154,8 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { deleteCurrent.count(), noopUpdates.count(), isThrottled, - TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) + TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis), + new IndexingStats.Stats.DocStatusStats() ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java index 4f0affb3035ec..af8220db25fcb 100644 --- a/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryReplicaSyncer.java @@ -33,28 +33,28 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.resync.ResyncReplicationRequest; import org.opensearch.action.resync.ResyncReplicationResponse; import org.opensearch.action.resync.TransportResyncReplicationAction; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; - -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.Translog; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; import org.opensearch.transport.TransportService; @@ -399,8 +399,9 @@ public ActionRequestValidationException validate() { /** * Task to resync primary and replica * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ResyncTask extends Task { private volatile String phase = "starting"; private volatile int totalOperations; @@ -466,8 +467,9 @@ public ResyncTask.Status getStatus() { /** * Status for primary replica syncer * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Status implements Task.Status { public static final String NAME = "resync"; @@ -516,7 +518,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java index 34adbd67ac9f2..bcede4d6a9124 100644 --- a/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java +++ b/server/src/main/java/org/opensearch/index/shard/PrimaryShardClosedException.java @@ -5,11 +5,11 @@ package org.opensearch.index.shard; -import java.io.IOException; - import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import java.io.IOException; + /** * Exception to indicate failures are caused due to the closure of the primary * shard. diff --git a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java index 07c791e1dce14..803db773efe6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java @@ -35,10 +35,10 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.ReferenceManager; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.lease.Releasable; import org.opensearch.index.translog.Translog; import java.io.Closeable; @@ -54,7 +54,7 @@ /** * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. - * + * <p> * When {@link Closeable#close()}d it will no longer accept listeners and flush any existing listeners. * * @opensearch.internal @@ -86,7 +86,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. - * + * <p> * We never set this to non-null while closed it {@code true}. */ private volatile List<Tuple<Translog.Location, Consumer<Boolean>>> refreshListeners = null; diff --git a/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java new file mode 100644 index 0000000000000..757275932c5f1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/ReleasableRetryableRefreshListener.java @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.ReferenceManager; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * RefreshListener that runs afterRefresh method if and only if there is a permit available. Once the {@code drainRefreshes()} + * is called, all the permits are acquired and there are no available permits to afterRefresh. This abstract class provides + * necessary abstract methods to schedule retry. + */ +public abstract class ReleasableRetryableRefreshListener implements ReferenceManager.RefreshListener { + + /** + * Total permits = 1 ensures that there is only single instance of runAfterRefreshWithPermit that is running at a time. + * In case there are use cases where concurrency is required, the total permit variable can be put inside the ctor. + */ + private static final int TOTAL_PERMITS = 1; + + private static final TimeValue DRAIN_TIMEOUT = TimeValue.timeValueMinutes(10); + + private final AtomicBoolean closed = new AtomicBoolean(false); + + private final Semaphore semaphore = new Semaphore(TOTAL_PERMITS); + + private final ThreadPool threadPool; + + /** + * This boolean is used to ensure that there is only 1 retry scheduled/running at any time. + */ + private final AtomicBoolean retryScheduled = new AtomicBoolean(false); + + public ReleasableRetryableRefreshListener() { + this.threadPool = null; + } + + public ReleasableRetryableRefreshListener(ThreadPool threadPool) { + assert Objects.nonNull(threadPool); + this.threadPool = threadPool; + } + + @Override + public final void afterRefresh(boolean didRefresh) throws IOException { + if (closed.get()) { + return; + } + runAfterRefreshExactlyOnce(didRefresh); + runAfterRefreshWithPermit(didRefresh, () -> {}); + } + + /** + * The code in this method is executed exactly once. This is done for running non-idempotent function which needs to be + * executed immediately when afterRefresh method is invoked. + * + * @param didRefresh if the refresh did open a new reference then didRefresh will be true + */ + protected void runAfterRefreshExactlyOnce(boolean didRefresh) { + // No-op: The implementor would be providing the code + } + + /** + * The implementor has the option to override the retry thread pool name. This will be used for scheduling the retries. + * The method would be invoked each time when a retry is required. By default, it uses the same threadpool for retry. + * + * @return the name of the retry thread pool. + */ + protected String getRetryThreadPoolName() { + return ThreadPool.Names.SAME; + } + + /** + * By default, the retry interval is returned as 1s. The implementor has the option to override the retry interval. + * This is used for scheduling the next retry. The method would be invoked each time when a retry is required. The + * implementor can choose any retry strategy and return the next retry interval accordingly. + * + * @return the interval for the next retry. + */ + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueSeconds(1); + } + + /** + * This method is used to schedule retry which internally calls the performAfterRefresh method under the available permits. + * + * @param interval interval after which the retry would be invoked + * @param retryThreadPoolName the thread pool name to be used for retry + * @param didRefresh if didRefresh is true + */ + private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boolean didRefresh) { + // If the underlying listener has closed, then we do not allow even the retry to be scheduled + if (closed.get() || isRetryEnabled() == false) { + getLogger().debug("skip retry on closed={} isRetryEnabled={}", closed.get(), isRetryEnabled()); + return; + } + + assert Objects.nonNull(interval) && ThreadPool.THREAD_POOL_TYPES.containsKey(retryThreadPoolName); + + // If the retryScheduled is already true, then we return from here itself. If not, then we proceed with scheduling + // the retry. + if (retryScheduled.getAndSet(true)) { + getLogger().debug("skip retry on retryScheduled=true"); + return; + } + + boolean scheduled = false; + try { + this.threadPool.schedule( + () -> runAfterRefreshWithPermit(didRefresh, () -> retryScheduled.set(false)), + interval, + retryThreadPoolName + ); + scheduled = true; + getLogger().info("Scheduled retry with didRefresh={}", didRefresh); + } finally { + if (scheduled == false) { + retryScheduled.set(false); + } + } + } + + /** + * This returns if the retry is enabled or not. By default, the retries are not enabled. + * @return true if retry is enabled. + */ + protected boolean isRetryEnabled() { + return false; + } + + /** + * Runs the performAfterRefresh method under permit. If there are no permits available, then it is no-op. It also hits + * the scheduleRetry method with the result value of the performAfterRefresh method invocation. + * The synchronised block ensures that if there is a retry or afterRefresh waiting, then it waits until the previous + * execution finishes. + */ + private synchronized void runAfterRefreshWithPermit(boolean didRefresh, Runnable runFinally) { + if (closed.get()) { + return; + } + boolean successful; + boolean permitAcquired = semaphore.tryAcquire(); + try { + successful = permitAcquired && performAfterRefreshWithPermit(didRefresh); + } finally { + if (permitAcquired) { + semaphore.release(); + } + runFinally.run(); + } + scheduleRetry(successful, didRefresh); + } + + /** + * Schedules the retry based on the {@code afterRefreshSuccessful} value. + * + * @param afterRefreshSuccessful is sent true if the performAfterRefresh(..) is successful. + * @param didRefresh if the refresh did open a new reference then didRefresh will be true + */ + private void scheduleRetry(boolean afterRefreshSuccessful, boolean didRefresh) { + if (afterRefreshSuccessful == false) { + scheduleRetry(getNextRetryInterval(), getRetryThreadPoolName(), didRefresh); + } + } + + /** + * This method needs to be overridden and be provided with what needs to be run on after refresh with permits. + * + * @param didRefresh true if the refresh opened a new reference + * @return true if a retry is needed else false. + */ + protected abstract boolean performAfterRefreshWithPermit(boolean didRefresh); + + public final Releasable drainRefreshes() { + try { + TimeValue timeout = getDrainTimeout(); + if (semaphore.tryAcquire(TOTAL_PERMITS, timeout.seconds(), TimeUnit.SECONDS)) { + boolean result = closed.compareAndSet(false, true); + assert result && semaphore.availablePermits() == 0; + getLogger().info("All permits are acquired and refresh listener is closed"); + return Releasables.releaseOnce(() -> { + semaphore.release(TOTAL_PERMITS); + boolean wasClosed = closed.getAndSet(false); + assert semaphore.availablePermits() == TOTAL_PERMITS : "Available permits is " + semaphore.availablePermits(); + assert wasClosed : "RefreshListener is not closed before reopening it"; + getLogger().info("All permits are released and refresh listener is open"); + }); + } else { + throw new TimeoutException("Timeout while acquiring all permits"); + } + } catch (InterruptedException | TimeoutException e) { + throw new RuntimeException("Failed to acquire all permits", e); + } + } + + protected abstract Logger getLogger(); + + // Made available for unit testing purpose only + /** + * Returns the timeout which is used while draining refreshes. + */ + TimeValue getDrainTimeout() { + return DRAIN_TIMEOUT; + } + + // Visible for testing + /** + * Returns if the retry is scheduled or not. + * + * @return boolean as mentioned above. + */ + boolean getRetryScheduledStatus() { + return retryScheduled.get(); + } + + // Visible for testing + int availablePermits() { + return semaphore.availablePermits(); + } + + // Visible for testing + boolean isClosed() { + return closed.get(); + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 2385b906a7ae5..7bb80b736693f 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -17,18 +17,18 @@ import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.UploadListener; -import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.InternalEngine; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; @@ -40,8 +40,8 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -55,7 +55,7 @@ * * @opensearch.internal */ -public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshListener { +public final class RemoteStoreRefreshListener extends ReleasableRetryableRefreshListener { private final Logger logger; @@ -79,32 +79,21 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL REMOTE_REFRESH_RETRY_MAX_INTERVAL_MILLIS ); - // Visible for testing - static final Set<String> EXCLUDE_FILES = Set.of("write.lock"); - // Visible for testing - public static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + public static final Set<String> EXCLUDE_FILES = Set.of("write.lock"); private final IndexShard indexShard; private final Directory storeDirectory; private final RemoteSegmentStoreDirectory remoteDirectory; - private final RemoteRefreshSegmentTracker segmentTracker; + private final RemoteSegmentTransferTracker segmentTracker; private final Map<String, String> localSegmentChecksumMap; - private long primaryTerm; + private volatile long primaryTerm; private volatile Iterator<TimeValue> backoffDelayIterator; - - /** - * Keeps track of segment files and their size in bytes which are part of the most recent refresh. - */ - private final Map<String, Long> latestFileNameSizeOnLocalMap = ConcurrentCollections.newConcurrentMap(); - private final SegmentReplicationCheckpointPublisher checkpointPublisher; - private final UploadListener statsListener; - public RemoteStoreRefreshListener( IndexShard indexShard, SegmentReplicationCheckpointPublisher checkpointPublisher, - RemoteRefreshSegmentTracker segmentTracker + RemoteSegmentTransferTracker segmentTracker ) { super(indexShard.getThreadPool()); logger = Loggers.getLogger(getClass(), indexShard.shardId()); @@ -122,36 +111,34 @@ public RemoteStoreRefreshListener( } } // initializing primary term with the primary term of latest metadata in remote store. - // if no metadata is present, this value will be initilized with -1. + // if no metadata is present, this value will be initialized with -1. this.primaryTerm = remoteSegmentMetadata != null ? remoteSegmentMetadata.getPrimaryTerm() : INVALID_PRIMARY_TERM; this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; - this.statsListener = new UploadListener() { - @Override - public void beforeUpload(String file) { - // Start tracking the upload bytes started - segmentTracker.addUploadBytesStarted(latestFileNameSizeOnLocalMap.get(file)); - } - - @Override - public void onSuccess(String file) { - // Track upload success - segmentTracker.addUploadBytesSucceeded(latestFileNameSizeOnLocalMap.get(file)); - segmentTracker.addToLatestUploadedFiles(file); - } - - @Override - public void onFailure(String file) { - // Track upload failure - segmentTracker.addUploadBytesFailed(latestFileNameSizeOnLocalMap.get(file)); - } - }; } @Override public void beforeRefresh() throws IOException {} + @Override + protected void runAfterRefreshExactlyOnce(boolean didRefresh) { + // We have 2 separate methods to check if sync needs to be done or not. This is required since we use the return boolean + // from isReadyForUpload to schedule refresh retries as the index shard or the primary mode are not in complete + // ready state. + if (shouldSync(didRefresh, true) && isReadyForUpload()) { + try { + segmentTracker.updateLocalRefreshTimeAndSeqNo(); + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + Collection<String> localSegmentsPostRefresh = segmentInfosGatedCloseable.get().files(true); + updateLocalSizeMapAndTracker(localSegmentsPostRefresh); + } + } catch (Throwable t) { + logger.error("Exception in runAfterRefreshExactlyOnce() method", t); + } + } + } + /** * Upload new segment files created as part of the last refresh to the remote segment store. * This method also uploads remote_segments_metadata file which contains metadata of each segment file uploaded. @@ -160,14 +147,9 @@ public void beforeRefresh() throws IOException {} * @return true if the method runs successfully. */ @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - if (didRefresh && isRetry == false) { - updateLocalRefreshTimeAndSeqNo(); - } + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - if (this.primaryTerm != indexShard.getOperationPrimaryTerm() - || didRefresh - || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty()) { + if (shouldSync(didRefresh, false)) { successful = syncSegments(); } else { successful = true; @@ -175,17 +157,60 @@ protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { return successful; } - private synchronized boolean syncSegments() { - if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.info( - "Skipped syncing segments with primaryMode={} indexShardState={}", - indexShard.getReplicationTracker().isPrimaryMode(), - indexShard.state() - ); - return true; + /** + * This checks if there is a sync required to remote. + * + * @param didRefresh if the readers changed. + * @param skipPrimaryTermCheck consider change in primary term or not for should sync + * @return true if sync is needed + */ + private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { + boolean shouldSync = didRefresh // If the readers change, didRefresh is always true. + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader + // reference, but it is important to upload the zero state segments so that the restore does not break. + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty() + // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that + // we update the primary term and the same condition would not evaluate to true again in syncSegments. + // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. + || isRefreshAfterCommitSafe() + || isRemoteSegmentStoreInSync() == false; + if (shouldSync || skipPrimaryTermCheck) { + return shouldSync; + } + return this.primaryTerm != indexShard.getOperationPrimaryTerm(); + } + + /** + * Checks if all files present in local store are uploaded to remote store or part of excluded files. + * + * Different from IndexShard#isRemoteSegmentStoreInSync as + * it uses files uploaded cache in RemoteDirector and it doesn't make a remote store call. + * Doesn't throw an exception on store getting closed as store will be open + * + * + * @return true iff all the local files are uploaded to remote store. + */ + boolean isRemoteSegmentStoreInSync() { + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + return segmentInfosGatedCloseable.get().files(true).stream().allMatch(this::skipUpload); + } catch (Throwable throwable) { + logger.error("Throwable thrown during isRemoteSegmentStoreInSync", throwable); + } + return false; + } + + /* + @return false if retry is needed + */ + private boolean syncSegments() { + if (isReadyForUpload() == false) { + // Following check is required to enable retry and make sure that we do not lose this refresh event + // When primary shard is restored from remote store, the recovery happens first followed by changing + // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through + // if following condition does not exist. The segments created as part of translog replay will not be present + // in the remote store. + return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } - ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); - indexShard.onCheckpointPublished(checkpoint); beforeSegmentsSync(); long refreshTimeMs = segmentTracker.getLocalRefreshTimeMs(), refreshClockTimeMs = segmentTracker.getLocalRefreshClockTimeMs(); long refreshSeqNo = segmentTracker.getLocalRefreshSeqNo(); @@ -193,40 +218,53 @@ private synchronized boolean syncSegments() { final AtomicBoolean successful = new AtomicBoolean(false); try { - if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { - this.primaryTerm = indexShard.getOperationPrimaryTerm(); - this.remoteDirectory.init(); - } try { + initializeRemoteDirectoryOnTermUpdate(); // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(LAST_N_METADATA_FILES_TO_KEEP); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); + if (checkpoint.getPrimaryTerm() != indexShard.getOperationPrimaryTerm()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "primaryTerm mismatch during segments upload to remote store [%s] != [%s]", + checkpoint.getPrimaryTerm(), + indexShard.getOperationPrimaryTerm() + ) + ); + } // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); Collection<String> localSegmentsPostRefresh = segmentInfos.files(true); // Create a map of file name to size and update the refresh segment tracker - updateLocalSizeMapAndTracker(localSegmentsPostRefresh); + Map<String, Long> localSegmentsSizeMap = updateLocalSizeMapAndTracker(localSegmentsPostRefresh).entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); CountDownLatch latch = new CountDownLatch(1); ActionListener<Void> segmentUploadsCompletedListener = new LatchedActionListener<>(new ActionListener<>() { @Override public void onResponse(Void unused) { try { + logger.debug("New segments upload successful"); // Start metadata file upload - uploadMetadata(localSegmentsPostRefresh, segmentInfos); + uploadMetadata(localSegmentsPostRefresh, segmentInfos, checkpoint); + logger.debug("Metadata upload successful"); clearStaleFilesFromLocalSegmentChecksumMap(localSegmentsPostRefresh); onSuccessfulSegmentsSync( refreshTimeMs, refreshClockTimeMs, refreshSeqNo, lastRefreshedCheckpoint, + localSegmentsSizeMap, checkpoint ); // At this point since we have uploaded new segments, segment infos and segment metadata file, @@ -247,7 +285,7 @@ public void onFailure(Exception e) { }, latch); // Start the segments files upload - uploadNewSegments(localSegmentsPostRefresh, segmentUploadsCompletedListener); + uploadNewSegments(localSegmentsPostRefresh, localSegmentsSizeMap, segmentUploadsCompletedListener); latch.await(); } catch (EngineException e) { logger.warn("Exception while reading SegmentInfosSnapshot", e); @@ -264,6 +302,7 @@ public void onFailure(Exception e) { updateFinalStatusInSegmentTracker(successful.get(), bytesBeforeUpload, startTimeInNS); // If there are failures in uploading segments, then we should retry as search idle can lead to // refresh not occurring until write happens. + logger.debug("syncSegments runStatus={}", successful.get()); return successful.get(); } @@ -290,10 +329,11 @@ private void onSuccessfulSegmentsSync( long refreshClockTimeMs, long refreshSeqNo, long lastRefreshedCheckpoint, + Map<String, Long> localFileSizeMap, ReplicationCheckpoint checkpoint ) { // Update latest uploaded segment files name in segment tracker - segmentTracker.setLatestUploadedFiles(latestFileNameSizeOnLocalMap.keySet()); + segmentTracker.setLatestUploadedFiles(localFileSizeMap.keySet()); // Update the remote refresh time and refresh seq no updateRemoteRefreshTimeAndSeqNo(refreshTimeMs, refreshClockTimeMs, refreshSeqNo); // Reset the backoffDelayIterator for the future failures @@ -302,6 +342,7 @@ private void onSuccessfulSegmentsSync( indexShard.getEngine().translogManager().setMinSeqNoToKeep(lastRefreshedCheckpoint + 1); // Publishing the new checkpoint which is used for remote store + segrep indexes checkpointPublisher.publish(indexShard, checkpoint); + logger.debug("onSuccessfulSegmentsSync lastRefreshedCheckpoint={} checkpoint={}", lastRefreshedCheckpoint, checkpoint); } /** @@ -327,7 +368,21 @@ private boolean isRefreshAfterCommit() throws IOException { && !remoteDirectory.containsFile(lastCommittedLocalSegmentFileName, getChecksumOfLocalFile(lastCommittedLocalSegmentFileName))); } - void uploadMetadata(Collection<String> localSegmentsPostRefresh, SegmentInfos segmentInfos) throws IOException { + /** + * Returns if the current refresh has happened after a commit. + * @return true if this refresh has happened on account of a commit. If otherwise or exception, returns false. + */ + private boolean isRefreshAfterCommitSafe() { + try { + return isRefreshAfterCommit(); + } catch (Exception e) { + logger.info("Exception occurred in isRefreshAfterCommitSafe", e); + } + return false; + } + + void uploadMetadata(Collection<String> localSegmentsPostRefresh, SegmentInfos segmentInfos, ReplicationCheckpoint replicationCheckpoint) + throws IOException { final long maxSeqNo = ((InternalEngine) indexShard.getEngine()).currentOngoingRefreshCheckpoint(); SegmentInfos segmentInfosSnapshot = segmentInfos.clone(); Map<String, String> userData = segmentInfosSnapshot.getUserData(); @@ -344,23 +399,32 @@ void uploadMetadata(Collection<String> localSegmentsPostRefresh, SegmentInfos se localSegmentsPostRefresh, segmentInfosSnapshot, storeDirectory, - indexShard.getOperationPrimaryTerm(), - translogFileGeneration + translogFileGeneration, + replicationCheckpoint, + indexShard.getNodeId() ); } } - private void uploadNewSegments(Collection<String> localSegmentsPostRefresh, ActionListener<Void> listener) { + private void uploadNewSegments( + Collection<String> localSegmentsPostRefresh, + Map<String, Long> localSegmentsSizeMap, + ActionListener<Void> listener + ) { Collection<String> filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { + logger.debug("No new segments to upload in uploadNewSegments"); listener.onResponse(null); return; } + logger.debug("Effective new segments files to upload {}", filteredFiles); ActionListener<Collection<Void>> mappedListener = ActionListener.map(listener, resp -> null); GroupedActionListener<Void> batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); for (String src : filteredFiles) { + // Initializing listener here to ensure that the stats increment operations are thread-safe + UploadListener statsListener = createUploadListener(localSegmentsSizeMap); ActionListener<Void> aggregatedListener = ActionListener.wrap(resp -> { statsListener.onSuccess(src); batchUploadListener.onResponse(resp); @@ -406,15 +470,6 @@ private String getChecksumOfLocalFile(String file) throws IOException { return localSegmentChecksumMap.get(file); } - /** - * Updates the last refresh time and refresh seq no which is seen by local store. - */ - private void updateLocalRefreshTimeAndSeqNo() { - segmentTracker.updateLocalRefreshClockTimeMs(System.currentTimeMillis()); - segmentTracker.updateLocalRefreshTimeMs(System.nanoTime() / 1_000_000L); - segmentTracker.updateLocalRefreshSeqNo(segmentTracker.getLocalRefreshSeqNo() + 1); - } - /** * Updates the last refresh time and refresh seq no which is seen by remote store. */ @@ -425,33 +480,14 @@ private void updateRemoteRefreshTimeAndSeqNo(long refreshTimeMs, long refreshClo } /** - * Updates map of file name to size of the input segment files. Tries to reuse existing information by caching the size - * data, otherwise uses {@code storeDirectory.fileLength(file)} to get the size. This method also removes from the map - * such files that are not present in the list of segment files given in the input. + * Updates map of file name to size of the input segment files in the segment tracker. Uses {@code storeDirectory.fileLength(file)} to get the size. * - * @param segmentFiles list of segment files for which size needs to be known + * @param segmentFiles list of segment files that are part of the most recent local refresh. + * + * @return updated map of local segment files and filesize */ - private void updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { - - // Update the map - segmentFiles.stream() - .filter(file -> !EXCLUDE_FILES.contains(file)) - .filter(file -> !latestFileNameSizeOnLocalMap.containsKey(file) || latestFileNameSizeOnLocalMap.get(file) == 0) - .forEach(file -> { - long fileSize = 0; - try { - fileSize = storeDirectory.fileLength(file); - } catch (IOException e) { - logger.warn(new ParameterizedMessage("Exception while reading the fileLength of file={}", file), e); - } - latestFileNameSizeOnLocalMap.put(file, fileSize); - }); - - Set<String> fileSet = new HashSet<>(segmentFiles); - // Remove keys from the fileSizeMap that do not exist in the latest segment files - latestFileNameSizeOnLocalMap.entrySet().removeIf(entry -> fileSet.contains(entry.getKey()) == false); - // Update the tracker - segmentTracker.setLatestLocalFileNameLengthMap(latestFileNameSizeOnLocalMap); + private Map<String, Long> updateLocalSizeMapAndTracker(Collection<String> segmentFiles) { + return segmentTracker.updateLatestLocalFileNameLengthMap(segmentFiles, storeDirectory::fileLength); } private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesBeforeUpload, long startTimeInNS) { @@ -459,16 +495,111 @@ private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesB long bytesUploaded = segmentTracker.getUploadBytesSucceeded() - bytesBeforeUpload; long timeTakenInMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeInNS); segmentTracker.incrementTotalUploadsSucceeded(); - segmentTracker.addUploadBytes(bytesUploaded); - segmentTracker.addUploadBytesPerSec((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS)); - segmentTracker.addUploadTimeMs(timeTakenInMS); + segmentTracker.updateUploadBytesMovingAverage(bytesUploaded); + segmentTracker.updateUploadBytesPerSecMovingAverage((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS)); + segmentTracker.updateUploadTimeMovingAverage(timeTakenInMS); } else { segmentTracker.incrementTotalUploadsFailed(); } } + /** + * On primary term update, we (re)initialise the remote segment directory to reflect the latest metadata file that + * has been uploaded to remote store successfully. This method also updates the segment tracker about the latest + * uploaded segment files onto remote store. + */ + private void initializeRemoteDirectoryOnTermUpdate() throws IOException { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.trace("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + RemoteSegmentMetadata uploadedMetadata = this.remoteDirectory.init(); + + // During failover, the uploaded metadata would have names of files that have been uploaded to remote store. + // Here we update the tracker with latest remote uploaded files. + if (uploadedMetadata != null) { + segmentTracker.setLatestUploadedFiles(uploadedMetadata.getMetadata().keySet()); + } + } + } + + /** + * This checks for readiness of the index shard and primary mode. This has separated from shouldSync since we use the + * returned value of this method for scheduling retries in syncSegments method. + * @return true iff the shard is a started with primary mode true or it is local or snapshot recovery. + */ + private boolean isReadyForUpload() { + boolean isReady = indexShard.isStartedPrimary() || isLocalOrSnapshotRecovery(); + + if (isReady == false) { + StringBuilder sb = new StringBuilder("Skipped syncing segments with"); + if (indexShard.getReplicationTracker() != null) { + sb.append(" primaryMode=").append(indexShard.getReplicationTracker().isPrimaryMode()); + } + if (indexShard.state() != null) { + sb.append(" indexShardState=").append(indexShard.state()); + } + if (indexShard.getEngineOrNull() != null) { + sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); + } + if (indexShard.recoveryState() != null) { + sb.append(" recoverySourceType=").append(indexShard.recoveryState().getRecoverySource().getType()); + sb.append(" primary=").append(indexShard.shardRouting.primary()); + } + logger.info(sb.toString()); + } + return isReady; + } + + private boolean isLocalOrSnapshotRecovery() { + // In this case when the primary mode is false, we need to upload segments to Remote Store + // This is required in case of snapshots/shrink/ split/clone where we need to durable persist + // all segments to remote before completing the recovery to ensure durability. + return (indexShard.state() == IndexShardState.RECOVERING && indexShard.shardRouting.primary()) + && indexShard.recoveryState() != null + && (indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS + || indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT); + } + + /** + * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events + * + * @param fileSizeMap updated map of current snapshot of local segments to their sizes + */ + private UploadListener createUploadListener(Map<String, Long> fileSizeMap) { + return new UploadListener() { + private long uploadStartTime = 0; + + @Override + public void beforeUpload(String file) { + // Start tracking the upload bytes started + segmentTracker.addUploadBytesStarted(fileSizeMap.get(file)); + uploadStartTime = System.currentTimeMillis(); + } + + @Override + public void onSuccess(String file) { + // Track upload success + segmentTracker.addUploadBytesSucceeded(fileSizeMap.get(file)); + segmentTracker.addToLatestUploadedFiles(file); + segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + + @Override + public void onFailure(String file) { + // Track upload failure + segmentTracker.addUploadBytesFailed(fileSizeMap.get(file)); + segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + }; + } + @Override protected Logger getLogger() { return logger; } + + @Override + protected boolean isRetryEnabled() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index 6bc7828a06d44..a56d61194bf45 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -54,20 +54,20 @@ import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; import org.opensearch.gateway.PersistedClusterStateService; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.SequenceNumbers; @@ -514,7 +514,7 @@ private void printRerouteCommand(ShardPath shardPath, Terminal terminal, boolean ); terminal.println(""); - terminal.println("POST /_cluster/reroute\n" + Strings.toString(XContentType.JSON, commands, true, true)); + terminal.println("POST /_cluster/reroute\n" + Strings.toString(MediaTypeRegistry.JSON, commands, true, true)); terminal.println(""); terminal.println("You must accept the possibility of data loss by changing the `accept_data_loss` parameter to `true`."); terminal.println(""); diff --git a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java index 6d19e9f500411..f585267f21832 100644 --- a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.set.Sets; import java.util.ArrayList; @@ -43,8 +44,9 @@ /** * Replication group for a shard. Used by a primary shard to coordinate replication and recoveries. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationGroup { private final IndexShardRoutingTable routingTable; private final Set<String> inSyncAllocationIds; diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index 0a7c80f5e87d3..849a4f9c15318 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -33,8 +33,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; - import org.opensearch.ExceptionsHelper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.transport.TransportRequest; @@ -44,8 +44,9 @@ /** * An listener for search, fetch and context events. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SearchOperationListener { /** diff --git a/server/src/main/java/org/opensearch/index/shard/ShardPath.java b/server/src/main/java/org/opensearch/index/shard/ShardPath.java index 64c949a0c537d..d16e5707b2e0e 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardPath.java @@ -33,8 +33,9 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; @@ -54,8 +55,9 @@ /** * Path for a shard * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardPath { public static final String INDEX_FOLDER_NAME = "index"; public static final String TRANSLOG_FOLDER_NAME = "translog"; diff --git a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java index 76c4c81fb5f62..9c0134fa79551 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java @@ -35,9 +35,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.common.Nullable; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.gateway.CorruptStateException; import org.opensearch.gateway.MetadataStateFormat; diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index d4e779c83644f..3faef2da05320 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -38,12 +38,13 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; +import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -51,10 +52,11 @@ import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; import org.opensearch.common.UUIDs; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; @@ -64,7 +66,9 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; +import org.opensearch.index.translog.Checkpoint; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogHeader; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; @@ -74,6 +78,8 @@ import java.io.IOException; import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -83,6 +89,7 @@ import java.util.stream.Collectors; import static org.opensearch.common.unit.TimeValue.timeValueMillis; +import static org.opensearch.index.translog.Translog.CHECKPOINT_FILE_NAME; /** * This package private utility class encapsulates the logic to recover an index shard from either an existing index on @@ -186,6 +193,16 @@ void recoverFromLocalShards( // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); + if (indexShard.isRemoteSegmentStoreInSync() == false) { + throw new IndexShardRecoveryException( + indexShard.shardId(), + "failed to upload to remote", + new IOException("Failed to upload to remote segment store") + ); + } + } return true; } catch (IOException ex) { throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex); @@ -394,7 +411,12 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - String.valueOf(shardId.id()) + shardId + ); + sourceRemoteDirectory.initializeToSpecificCommit( + primaryTerm, + commitGeneration, + recoverySource.snapshot().getSnapshotId().getUUID() ); indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); final Store store = indexShard.store(); @@ -413,6 +435,13 @@ void recoverFromSnapshotAndRemoteStore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); @@ -530,15 +559,19 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco remoteStore.incRef(); try { // Download segments from remote segment store - indexShard.syncSegmentsFromRemoteSegmentStore(true, true, true); - - if (store.directory().listAll().length == 0) { - store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion); - } - if (indexShard.indexSettings.isRemoteTranslogStoreEnabled()) { - indexShard.syncTranslogFilesFromRemoteTranslog(); - } else { - bootstrap(indexShard, store); + indexShard.syncSegmentsFromRemoteSegmentStore(true); + indexShard.syncTranslogFilesFromRemoteTranslog(); + + // On index creation, the only segment file that is created is segments_N. We can safely discard this file + // as there is no data associated with this shard as part of segments. + if (store.directory().listAll().length <= 1) { + Path location = indexShard.shardPath().resolveTranslog(); + Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + final Path translogFile = location.resolve(Translog.getFilename(checkpoint.getGeneration())); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader translogHeader = TranslogHeader.read(translogFile, channel); + store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion, translogHeader.getTranslogUUID()); + } } assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; @@ -688,6 +721,13 @@ private void restore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled() && indexShard.shardRouting.primary()) { + waitForRemoteStoreSync(indexShard); + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); }, e -> listener.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e))); @@ -756,4 +796,31 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO ); store.associateIndexWithNewTranslog(translogUUID); } + + /* + Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout + */ + private void waitForRemoteStoreSync(IndexShard indexShard) { + if (indexShard.shardRouting.primary() == false) { + return; + } + long startNanos = System.nanoTime(); + + while (System.nanoTime() - startNanos < indexShard.getRecoverySettings().internalRemoteUploadTimeout().nanos()) { + try { + if (indexShard.isRemoteSegmentStoreInSync()) { + break; + } else { + try { + Thread.sleep(TimeValue.timeValueMinutes(1).seconds()); + } catch (InterruptedException ie) { + throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie); + } + } + } catch (AlreadyClosedException e) { + // There is no point in waiting as shard is now closed . + return; + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java index ad64f3a55228f..33065dc1739a3 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java @@ -33,14 +33,16 @@ package org.opensearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; +import org.opensearch.common.annotation.PublicApi; import java.util.Objects; /** * Wrapper around a {@link Similarity} and its name. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SimilarityProvider { private final String name; @@ -70,23 +72,23 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SimilarityProvider that = (SimilarityProvider) o; - /** - * We check <code>name</code> only because the <code>similarity</code> is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities inside the same index and names are unique in this case. - **/ + /* + We check <code>name</code> only because the <code>similarity</code> is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities inside the same index and names are unique in this case. + */ return Objects.equals(name, that.name); } @Override public int hashCode() { - /** - * We use <code>name</code> only because the <code>similarity</code> is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities a single index and names are unique in this case. - **/ + /* + We use <code>name</code> only because the <code>similarity</code> is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities a single index and names are unique in this case. + */ return Objects.hash(name); } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index c3fc7ffbb0fe5..6b4566826cedc 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -45,6 +45,7 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.index.AbstractIndexComponent; @@ -63,8 +64,9 @@ /** * Service for similarity computations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SimilarityService extends AbstractIndexComponent { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SimilarityService.class); public static final String DEFAULT_SIMILARITY = "BM25"; diff --git a/server/src/main/java/org/opensearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/opensearch/index/snapshots/IndexShardSnapshotStatus.java index d48da6f462502..d844a7d2d25c4 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/opensearch/index/snapshots/IndexShardSnapshotStatus.java @@ -32,21 +32,25 @@ package org.opensearch.index.snapshots; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; /** * Represent shard snapshot status * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexShardSnapshotStatus { /** * Snapshot stage * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Stage { /** * Snapshot hasn't started yet @@ -248,8 +252,9 @@ public static IndexShardSnapshotStatus newDone( /** * Returns an immutable state of {@link IndexShardSnapshotStatus} at a given point in time. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Copy { private final Stage stage; diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 2d49b153c39f4..2b718fb055e14 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -35,16 +35,16 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.StoreFileMetadata; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java index 86ecef1173e48..ee601f96ecee1 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java @@ -33,6 +33,7 @@ package org.opensearch.index.snapshots.blobstore; import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; import java.io.FilterInputStream; import java.io.IOException; @@ -46,45 +47,17 @@ */ public class RateLimitingInputStream extends FilterInputStream { - private final Supplier<RateLimiter> rateLimiterSupplier; + private final StreamLimiter streamLimiter; - private final Listener listener; - - private long bytesSinceLastRateLimit; - - /** - * Internal listener - * - * @opensearch.internal - */ - public interface Listener { - void onPause(long nanos); - } - - public RateLimitingInputStream(InputStream delegate, Supplier<RateLimiter> rateLimiterSupplier, Listener listener) { + public RateLimitingInputStream(InputStream delegate, Supplier<RateLimiter> rateLimiterSupplier, StreamLimiter.Listener listener) { super(delegate); - this.rateLimiterSupplier = rateLimiterSupplier; - this.listener = listener; - } - - private void maybePause(int bytes) throws IOException { - bytesSinceLastRateLimit += bytes; - final RateLimiter rateLimiter = rateLimiterSupplier.get(); - if (rateLimiter != null) { - if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { - long pause = rateLimiter.pause(bytesSinceLastRateLimit); - bytesSinceLastRateLimit = 0; - if (pause > 0) { - listener.onPause(pause); - } - } - } + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); } @Override public int read() throws IOException { int b = super.read(); - maybePause(1); + streamLimiter.maybePause(1); return b; } @@ -92,7 +65,7 @@ public int read() throws IOException { public int read(byte[] b, int off, int len) throws IOException { int n = super.read(b, off, len); if (n > 0) { - maybePause(n); + streamLimiter.maybePause(n); } return n; } diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java index eefc1469a06a0..d54e9686ab951 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java @@ -9,6 +9,7 @@ package org.opensearch.index.snapshots.blobstore; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -22,8 +23,9 @@ /** * Remote Store based Shard snapshot metadata * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.9.0") public class RemoteStoreShardShallowCopySnapshot implements ToXContentFragment, IndexShardSnapshot { private final String snapshot; @@ -322,10 +324,10 @@ public String snapshot() { return snapshot; } - /** - * Returns list of files in the shard - * - * @return list of files + /* + Returns list of files in the shard + + @return list of files */ /** diff --git a/server/src/main/java/org/opensearch/index/stats/IndexingPressurePerShardStats.java b/server/src/main/java/org/opensearch/index/stats/IndexingPressurePerShardStats.java index 9e8c8d29c2058..21e1105422725 100644 --- a/server/src/main/java/org/opensearch/index/stats/IndexingPressurePerShardStats.java +++ b/server/src/main/java/org/opensearch/index/stats/IndexingPressurePerShardStats.java @@ -8,10 +8,11 @@ package org.opensearch.index.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -22,8 +23,9 @@ /** * Per shard indexing pressure statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.3.0") public class IndexingPressurePerShardStats implements Writeable, ToXContentFragment { private final String shardId; diff --git a/server/src/main/java/org/opensearch/index/stats/IndexingPressureStats.java b/server/src/main/java/org/opensearch/index/stats/IndexingPressureStats.java index 8f4f0b661ed33..53442a7a04adf 100644 --- a/server/src/main/java/org/opensearch/index/stats/IndexingPressureStats.java +++ b/server/src/main/java/org/opensearch/index/stats/IndexingPressureStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,8 +45,9 @@ /** * Base indexing pressure statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.3.0") public class IndexingPressureStats implements Writeable, ToXContentFragment { private final long totalCombinedCoordinatingAndPrimaryBytes; diff --git a/server/src/main/java/org/opensearch/index/stats/ShardIndexingPressureStats.java b/server/src/main/java/org/opensearch/index/stats/ShardIndexingPressureStats.java index 1635c560dce3f..0b8cd75a24ded 100644 --- a/server/src/main/java/org/opensearch/index/stats/ShardIndexingPressureStats.java +++ b/server/src/main/java/org/opensearch/index/stats/ShardIndexingPressureStats.java @@ -8,13 +8,14 @@ package org.opensearch.index.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.shard.ShardId; import java.io.IOException; import java.util.HashMap; @@ -23,8 +24,9 @@ /** * Cumulative shard indexing pressure stats * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.3.0") public class ShardIndexingPressureStats implements Writeable, ToXContentFragment { private final Map<ShardId, IndexingPressurePerShardStats> shardIndexingPressureStore; diff --git a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java new file mode 100644 index 0000000000000..2ab615677dedb --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java @@ -0,0 +1,257 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Tracks the amount of bytes transferred between two {@link Directory} instances + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class DirectoryFileTransferTracker { + /** + * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link Directory} + */ + private final AtomicLong transferredBytesStarted = new AtomicLong(); + + /** + * Cumulative size of files (in bytes) successfully transferred over from the source {@link Directory} + */ + private final AtomicLong transferredBytesFailed = new AtomicLong(); + + /** + * Cumulative size of files (in bytes) failed in transfer over from the source {@link Directory} + */ + private final AtomicLong transferredBytesSucceeded = new AtomicLong(); + + /** + * Time in milliseconds for the last successful transfer from the source {@link Directory} + */ + private final AtomicLong lastTransferTimestampMs = new AtomicLong(); + + /** + * Cumulative time in milliseconds spent in successful transfers from the source {@link Directory} + */ + private final AtomicLong totalTransferTimeInMs = new AtomicLong(); + + /** + * Provides moving average over the last N total size in bytes of files transferred from the source {@link Directory}. + * N is window size + */ + private final AtomicReference<MovingAverage> transferredBytesMovingAverageReference; + + private final AtomicLong lastSuccessfulTransferInBytes = new AtomicLong(); + + /** + * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link Directory}. + * N is window size + */ + private final AtomicReference<MovingAverage> transferredBytesPerSecMovingAverageReference; + + private final int DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE = 20; + + // Getters and Setters, all are visible for testing + public long getTransferredBytesStarted() { + return transferredBytesStarted.get(); + } + + public void addTransferredBytesStarted(long size) { + transferredBytesStarted.getAndAdd(size); + } + + public long getTransferredBytesFailed() { + return transferredBytesFailed.get(); + } + + public void addTransferredBytesFailed(long size, long startTimeInMs) { + transferredBytesFailed.getAndAdd(size); + addTotalTransferTimeInMs(Math.max(1, System.currentTimeMillis() - startTimeInMs)); + } + + public long getTransferredBytesSucceeded() { + return transferredBytesSucceeded.get(); + } + + public void addTransferredBytesSucceeded(long size, long startTimeInMs) { + transferredBytesSucceeded.getAndAdd(size); + updateSuccessfulTransferSize(size); + long currentTimeInMs = System.currentTimeMillis(); + updateLastTransferTimestampMs(currentTimeInMs); + long timeTakenInMS = Math.max(1, currentTimeInMs - startTimeInMs); + addTotalTransferTimeInMs(timeTakenInMS); + addTransferredBytesPerSec((size * 1_000L) / timeTakenInMS); + } + + public boolean isTransferredBytesPerSecAverageReady() { + return transferredBytesPerSecMovingAverageReference.get().isReady(); + } + + public double getTransferredBytesPerSecAverage() { + return transferredBytesPerSecMovingAverageReference.get().getAverage(); + } + + public void addTransferredBytesPerSec(long bytesPerSec) { + this.transferredBytesPerSecMovingAverageReference.get().record(bytesPerSec); + } + + public boolean isTransferredBytesAverageReady() { + return transferredBytesMovingAverageReference.get().isReady(); + } + + public double getTransferredBytesAverage() { + return transferredBytesMovingAverageReference.get().getAverage(); + } + + public void updateLastSuccessfulTransferInBytes(long size) { + lastSuccessfulTransferInBytes.set(size); + } + + public void updateSuccessfulTransferSize(long size) { + updateLastSuccessfulTransferInBytes(size); + this.transferredBytesMovingAverageReference.get().record(size); + } + + public long getLastTransferTimestampMs() { + return lastTransferTimestampMs.get(); + } + + public void updateLastTransferTimestampMs(long downloadTimestampInMs) { + this.lastTransferTimestampMs.set(downloadTimestampInMs); + } + + public void addTotalTransferTimeInMs(long totalTransferTimeInMs) { + this.totalTransferTimeInMs.addAndGet(totalTransferTimeInMs); + } + + public long getTotalTransferTimeInMs() { + return totalTransferTimeInMs.get(); + } + + public DirectoryFileTransferTracker() { + transferredBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE)); + transferredBytesPerSecMovingAverageReference = new AtomicReference<>( + new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE) + ); + } + + public DirectoryFileTransferTracker.Stats stats() { + return new Stats( + transferredBytesStarted.get(), + transferredBytesFailed.get(), + transferredBytesSucceeded.get(), + lastTransferTimestampMs.get(), + totalTransferTimeInMs.get(), + transferredBytesMovingAverageReference.get().getAverage(), + lastSuccessfulTransferInBytes.get(), + transferredBytesPerSecMovingAverageReference.get().getAverage() + ); + } + + /** + * Represents the tracker's stats presentable to an API. + * + * @opensearch.api + */ + @PublicApi(since = "2.10.0") + public static class Stats implements Writeable { + public final long transferredBytesStarted; + public final long transferredBytesFailed; + public final long transferredBytesSucceeded; + public final long lastTransferTimestampMs; + public final long totalTransferTimeInMs; + public final double transferredBytesMovingAverage; + public final long lastSuccessfulTransferInBytes; + public final double transferredBytesPerSecMovingAverage; + + public Stats( + long transferredBytesStarted, + long transferredBytesFailed, + long downloadBytesSucceeded, + long lastTransferTimestampMs, + long totalTransferTimeInMs, + double transferredBytesMovingAverage, + long lastSuccessfulTransferInBytes, + double transferredBytesPerSecMovingAverage + ) { + this.transferredBytesStarted = transferredBytesStarted; + this.transferredBytesFailed = transferredBytesFailed; + this.transferredBytesSucceeded = downloadBytesSucceeded; + this.lastTransferTimestampMs = lastTransferTimestampMs; + this.totalTransferTimeInMs = totalTransferTimeInMs; + this.transferredBytesMovingAverage = transferredBytesMovingAverage; + this.lastSuccessfulTransferInBytes = lastSuccessfulTransferInBytes; + this.transferredBytesPerSecMovingAverage = transferredBytesPerSecMovingAverage; + } + + public Stats(StreamInput in) throws IOException { + this.transferredBytesStarted = in.readLong(); + this.transferredBytesFailed = in.readLong(); + this.transferredBytesSucceeded = in.readLong(); + this.lastTransferTimestampMs = in.readLong(); + this.totalTransferTimeInMs = in.readLong(); + this.transferredBytesMovingAverage = in.readDouble(); + this.lastSuccessfulTransferInBytes = in.readLong(); + this.transferredBytesPerSecMovingAverage = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(transferredBytesStarted); + out.writeLong(transferredBytesFailed); + out.writeLong(transferredBytesSucceeded); + out.writeLong(lastTransferTimestampMs); + out.writeLong(totalTransferTimeInMs); + out.writeDouble(transferredBytesMovingAverage); + out.writeLong(lastSuccessfulTransferInBytes); + out.writeDouble(transferredBytesPerSecMovingAverage); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + Stats stats = (Stats) obj; + + return transferredBytesStarted == stats.transferredBytesStarted + && transferredBytesFailed == stats.transferredBytesFailed + && transferredBytesSucceeded == stats.transferredBytesSucceeded + && lastTransferTimestampMs == stats.lastTransferTimestampMs + && totalTransferTimeInMs == stats.totalTransferTimeInMs + && Double.compare(stats.transferredBytesMovingAverage, transferredBytesMovingAverage) == 0 + && lastSuccessfulTransferInBytes == stats.lastSuccessfulTransferInBytes + && Double.compare(stats.transferredBytesPerSecMovingAverage, transferredBytesPerSecMovingAverage) == 0; + } + + @Override + public int hashCode() { + return Objects.hash( + transferredBytesStarted, + transferredBytesFailed, + transferredBytesSucceeded, + lastTransferTimestampMs, + totalTransferTimeInMs, + transferredBytesMovingAverage, + lastSuccessfulTransferInBytes, + transferredBytesPerSecMovingAverage + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java index 3b5b4040954c9..a46b641d1423f 100644 --- a/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/FsDirectoryFactory.java @@ -45,6 +45,7 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -56,6 +57,8 @@ import java.nio.file.Path; import java.util.HashSet; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Factory for a filesystem directory @@ -97,10 +100,24 @@ protected Directory newFSDirectory(Path location, LockFactory lockFactory, Index case HYBRIDFS: // Use Lucene defaults final FSDirectory primaryDirectory = FSDirectory.open(location, lockFactory); - final Set<String> mmapExtensions = new HashSet<>(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS)); + final Set<String> nioExtensions; + final Set<String> mmapExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS)); + if (mmapExtensions.equals( + new HashSet(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY)) + ) == false) { + // If the mmap extension setting was defined, then compute nio extensions by subtracting out the + // mmap extensions from the set of all extensions. + nioExtensions = Stream.concat( + IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getDefault(Settings.EMPTY).stream(), + IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getDefault(Settings.EMPTY).stream() + ).filter(e -> mmapExtensions.contains(e) == false).collect(Collectors.toUnmodifiableSet()); + } else { + // Otherwise, get the list of nio extensions from the nio setting + nioExtensions = Set.copyOf(indexSettings.getValue(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS)); + } if (primaryDirectory instanceof MMapDirectory) { MMapDirectory mMapDirectory = (MMapDirectory) primaryDirectory; - return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions), mmapExtensions); + return new HybridDirectory(lockFactory, setPreload(mMapDirectory, lockFactory, preLoadExtensions), nioExtensions); } else { return primaryDirectory; } @@ -143,12 +160,12 @@ public static boolean isHybridFs(Directory directory) { */ static final class HybridDirectory extends NIOFSDirectory { private final MMapDirectory delegate; - private final Set<String> mmapExtensions; + private final Set<String> nioExtensions; - HybridDirectory(LockFactory lockFactory, MMapDirectory delegate, Set<String> mmapExtensions) throws IOException { + HybridDirectory(LockFactory lockFactory, MMapDirectory delegate, Set<String> nioExtensions) throws IOException { super(delegate.getDirectory(), lockFactory); this.delegate = delegate; - this.mmapExtensions = mmapExtensions; + this.nioExtensions = nioExtensions; } @Override @@ -169,7 +186,7 @@ public IndexInput openInput(String name, IOContext context) throws IOException { boolean useDelegate(String name) { final String extension = FileSwitchDirectory.getExtension(name); - return mmapExtensions.contains(extension); + return nioExtensions.contains(extension) == false; } @Override diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index f7fe7ca62e6ba..345583bbbd1be 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -8,15 +8,28 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; -import org.opensearch.action.ActionListener; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.exception.CorruptFileException; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.store.exception.ChecksumCombinationException; import java.io.FileNotFoundException; import java.io.IOException; @@ -26,12 +39,14 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; + /** * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. * A remoteDirectory contains only files (no sub-folder hierarchy). This class does not support all the methods in @@ -44,13 +59,33 @@ public class RemoteDirectory extends Directory { protected final BlobContainer blobContainer; + private static final Logger logger = LogManager.getLogger(RemoteDirectory.class); + + private final UnaryOperator<OffsetRangeInputStream> uploadRateLimiter; + + private final UnaryOperator<InputStream> downloadRateLimiter; + + /** + * Number of bytes in the segment file to store checksum + */ + private static final int SEGMENT_CHECKSUM_BYTES = 8; public BlobContainer getBlobContainer() { return blobContainer; } public RemoteDirectory(BlobContainer blobContainer) { + this(blobContainer, UnaryOperator.identity(), UnaryOperator.identity()); + } + + public RemoteDirectory( + BlobContainer blobContainer, + UnaryOperator<OffsetRangeInputStream> uploadRateLimiter, + UnaryOperator<InputStream> downloadRateLimiter + ) { this.blobContainer = blobContainer; + this.uploadRateLimiter = uploadRateLimiter; + this.downloadRateLimiter = downloadRateLimiter; } /** @@ -106,6 +141,17 @@ public void onFailure(Exception e) { } } + /** + * Returns stream emitted from by blob object. Should be used with a closeable block. + * + * @param fileName Name of file + * @return Stream from the blob object + * @throws IOException if fetch of stream fails with IO error + */ + public InputStream getBlobStream(String fileName) throws IOException { + return blobContainer.readBlob(fileName); + } + /** * Removes an existing file in the directory. * @@ -146,13 +192,24 @@ public IndexOutput createOutput(String name, IOContext context) { */ @Override public IndexInput openInput(String name, IOContext context) throws IOException { + return openInput(name, fileLength(name), context); + } + + public IndexInput openInput(String name, long fileLength, IOContext context) throws IOException { InputStream inputStream = null; try { inputStream = blobContainer.readBlob(name); - return new RemoteIndexInput(name, inputStream, fileLength(name)); + return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength); } catch (Exception e) { // Incase the RemoteIndexInput creation fails, close the input stream to avoid file handler leak. - if (inputStream != null) inputStream.close(); + if (inputStream != null) { + try { + inputStream.close(); + } catch (Exception closeEx) { + e.addSuppressed(closeEx); + } + } + logger.error("Exception while reading blob for file: " + name + " for path " + blobContainer.path()); throw e; } } @@ -176,9 +233,9 @@ public void close() throws IOException { @Override public long fileLength(String name) throws IOException { // ToDo: Instead of calling remote store each time, keep a cache with segment metadata - Map<String, BlobMetadata> metadata = blobContainer.listBlobsByPrefix(name); - if (metadata.containsKey(name)) { - return metadata.get(name).length(); + List<BlobMetadata> metadata = blobContainer.listBlobsByPrefixInSortedOrder(name, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + if (metadata.size() == 1 && metadata.get(0).name().equals(name)) { + return metadata.get(0).length(); } throw new NoSuchFileException(name); } @@ -259,4 +316,104 @@ public Lock obtainLock(String name) throws IOException { public void delete() throws IOException { blobContainer.delete(); } + + public boolean copyFrom( + Directory from, + String src, + String remoteFileName, + IOContext context, + Runnable postUploadRunner, + ActionListener<Void> listener + ) { + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + try { + uploadBlob(from, src, remoteFileName, context, postUploadRunner, listener); + } catch (Exception e) { + listener.onFailure(e); + } + return true; + } + return false; + } + + private void uploadBlob( + Directory from, + String src, + String remoteFileName, + IOContext ioContext, + Runnable postUploadRunner, + ActionListener<Void> listener + ) throws Exception { + long expectedChecksum = calculateChecksumOfChecksum(from, src); + long contentLength; + try (IndexInput indexInput = from.openInput(src, ioContext)) { + contentLength = indexInput.length(); + } + boolean remoteIntegrityEnabled = false; + if (getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { + remoteIntegrityEnabled = ((AsyncMultiStreamBlobContainer) getBlobContainer()).remoteIntegrityCheckSupported(); + } + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + src, + remoteFileName, + contentLength, + true, + WritePriority.NORMAL, + (size, position) -> uploadRateLimiter.apply(new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position)), + expectedChecksum, + remoteIntegrityEnabled + ); + ActionListener<Void> completionListener = ActionListener.wrap(resp -> { + try { + postUploadRunner.run(); + listener.onResponse(null); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); + listener.onFailure(e); + } + }, ex -> { + logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); + IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); + if (corruptIndexException != null) { + listener.onFailure(corruptIndexException); + return; + } + Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); + if (throwable != null) { + CorruptFileException corruptFileException = (CorruptFileException) throwable; + listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); + return; + } + listener.onFailure(ex); + }); + + completionListener = ActionListener.runBefore(completionListener, () -> { + try { + remoteTransferContainer.close(); + } catch (Exception e) { + logger.warn("Error occurred while closing streams", e); + } + }); + + WriteContext writeContext = remoteTransferContainer.createWriteContext(); + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(writeContext, completionListener); + } + + private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { + try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { + try { + return checksumOfChecksum(indexInput, SEGMENT_CHECKSUM_BYTES); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum in segment file: " + + file + + ", directory: " + + directory, + file, + e + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8ee267cb67e68..c9a238c6e3350 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -8,12 +8,10 @@ package org.opensearch.index.store; -import com.jcraft.jzlib.JZlib; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfos; @@ -25,42 +23,41 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; -import org.opensearch.common.blobstore.exception.CorruptFileException; -import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; -import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; -import org.opensearch.common.util.ByteUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteStoreUtils; -import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; -import java.util.zip.CRC32; /** * A RemoteDirectory extension for remote segment store. We need to make sure we don't overwrite a segment file once uploaded. @@ -70,20 +67,18 @@ * caller will be accessing segment files in the same way as {@code FSDirectory}. Apart from storing actual segment files, * remote segment store also keeps track of refresh checkpoints as metadata in a separate path which is handled by * another instance of {@code RemoteDirectory}. - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.3.0") public final class RemoteSegmentStoreDirectory extends FilterDirectory implements RemoteStoreCommitLevelLockManager { + /** * Each segment file is uploaded with unique suffix. * For example, _0.cfe in local filesystem will be uploaded to remote segment store as _0.cfe__gX7bNIIBrs0AUNsR2yEG */ public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; - /** - * Number of bytes in the segment file to store checksum - */ - private static final int SEGMENT_CHECKSUM_BYTES = 8; - /** * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data */ @@ -110,7 +105,9 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement RemoteSegmentMetadata.METADATA_CODEC ); - private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + private static final Logger staticLogger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + private final Logger logger; /** * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. @@ -120,17 +117,21 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final AtomicLong metadataUploadCounter = new AtomicLong(0); + public static final int METADATA_FILES_TO_FETCH = 10; + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, - ThreadPool threadPool + ThreadPool threadPool, + ShardId shardId ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -139,15 +140,18 @@ public RemoteSegmentStoreDirectory( * As this cache is specific to an instance of RemoteSegmentStoreDirectory, it is possible that cache becomes stale * if another instance of RemoteSegmentStoreDirectory is used to upload/delete segment files. * It is caller's responsibility to call init() again to ensure that cache is properly updated. + * * @throws IOException if there were any failures in reading the metadata file */ public RemoteSegmentMetadata init() throws IOException { + logger.debug("Start initialisation of remote segment metadata"); RemoteSegmentMetadata remoteSegmentMetadata = readLatestMetadataFile(); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); } else { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); } + logger.debug("Initialisation of remote segment metadata completed"); return remoteSegmentMetadata; } @@ -156,10 +160,12 @@ public RemoteSegmentMetadata init() throws IOException { * remote segment store. * this is currently used to restore snapshots, where we want to copy segment files from a given commit. * TODO: check if we can return read only RemoteSegmentStoreDirectory object from here. + * * @throws IOException if there were any failures in reading the metadata file */ - public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration) throws IOException { - String metadataFile = getMetadataFileForCommit(primaryTerm, commitGeneration); + public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration, String acquirerId) throws IOException { + String metadataFilePrefix = MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, commitGeneration); + String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFile(metadataFilePrefix, acquirerId); RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); @@ -176,8 +182,9 @@ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long c * Refresh metadata files keep track of active segments for the shard at the time of refresh. * In order to get the list of segment files uploaded to the remote segment store, we need to read the latest metadata file. * Each metadata file contains a map where - * Key is - Segment local filename and - * Value is - local filename::uploaded filename::checksum + * Key is - Segment local filename and + * Value is - local filename::uploaded filename::checksum + * * @return Map of segment filename to uploaded filename with checksum * @throws IOException if there were any failures in reading the metadata file */ @@ -186,31 +193,35 @@ public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { List<String> metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ); + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + if (metadataFiles.isEmpty() == false) { String latestMetadataFile = metadataFiles.get(0); - logger.info("Reading latest Metadata file {}", latestMetadataFile); + logger.trace("Reading latest Metadata file {}", latestMetadataFile); remoteSegmentMetadata = readMetadataFile(latestMetadataFile); } else { - logger.info("No metadata file found, this can happen for new index with no data uploaded to remote segment store"); + logger.trace("No metadata file found, this can happen for new index with no data uploaded to remote segment store"); } return remoteSegmentMetadata; } private RemoteSegmentMetadata readMetadataFile(String metadataFilename) throws IOException { - try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { - byte[] metadataBytes = new byte[(int) indexInput.length()]; - indexInput.readBytes(metadataBytes, 0, (int) indexInput.length()); + try (InputStream inputStream = remoteMetadataDirectory.getBlobStream(metadataFilename)) { + byte[] metadataBytes = inputStream.readAllBytes(); return metadataStreamWrapper.readStream(new ByteArrayIndexInput(metadataFilename, metadataBytes)); } } /** * Metadata of a segment that is uploaded to remote segment store. + * + * @opensearch.api */ + @PublicApi(since = "2.3.0") public static class UploadedSegmentMetadata { // Visible for testing static final String SEPARATOR = "::"; @@ -259,7 +270,7 @@ public static UploadedSegmentMetadata fromString(String uploadedFilename) { String[] values = uploadedFilename.split(SEPARATOR); UploadedSegmentMetadata metadata = new UploadedSegmentMetadata(values[0], values[1], values[2], Long.parseLong(values[3])); if (values.length < 5) { - logger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); + staticLogger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); } metadata.setWrittenByMajor(Integer.parseInt(values[4])); @@ -292,7 +303,7 @@ public void setWrittenByMajor(int writtenByMajor) { * Contains utility methods that provide various parts of metadata filename along with comparator * Each metadata filename is of format: PREFIX__PrimaryTerm__Generation__UUID */ - static class MetadataFilenameUtils { + public static class MetadataFilenameUtils { public static final String SEPARATOR = "__"; public static final String METADATA_PREFIX = "metadata"; @@ -306,12 +317,13 @@ static String getMetadataFilePrefixForCommit(long primaryTerm, long generation) } // Visible for testing - static String getMetadataFilename( + public static String getMetadataFilename( long primaryTerm, long generation, long translogGeneration, long uploadCounter, - int metadataVersion + int metadataVersion, + String nodeId ) { return String.join( SEPARATOR, @@ -320,6 +332,7 @@ static String getMetadataFilename( RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), + String.valueOf(Objects.hash(nodeId)), RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(metadataVersion) ); @@ -334,6 +347,19 @@ static long getPrimaryTerm(String[] filenameTokens) { static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + + public static Tuple<String, String> getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(SEPARATOR); + if (tokens.length < 8) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(SEPARATOR, tokens[1], tokens[2], tokens[3]); + + String nodeId = tokens[5]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + } /** @@ -341,6 +367,7 @@ static long getGeneration(String[] filenameTokens) { * Any segment file that is uploaded without corresponding metadata file will not be visible as part of listAll(). * We chose not to return cache entries for listAll as cache can have entries for stale segments as well. * Even if we plan to delete stale segments from remote segment store, it will be a periodic operation. + * * @return segment filenames stored in remote segment store * @throws IOException if there were any failures in reading the metadata file */ @@ -351,6 +378,7 @@ public String[] listAll() throws IOException { /** * Delete segment file from remote segment store. + * * @param name the name of an existing segment file in local filesystem. * @throws IOException if the file exists but could not be deleted. */ @@ -365,8 +393,9 @@ public void deleteFile(String name) throws IOException { /** * Returns the byte length of a segment file in the remote segment store. + * * @param name the name of an existing segment file in local filesystem. - * @throws IOException in case of I/O error + * @throws IOException in case of I/O error * @throws NoSuchFileException if the file does not exist in the cache or remote segment store */ @Override @@ -385,6 +414,7 @@ public long fileLength(String name) throws IOException { /** * Creates and returns a new instance of {@link RemoteIndexOutput} which will be used to copy files to the remote * segment store. + * * @param name the name of the file to create. * @throws IOException in case of I/O error */ @@ -395,15 +425,17 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti /** * Opens a stream for reading an existing file and returns {@link RemoteIndexInput} enclosing the stream. + * * @param name the name of an existing file. - * @throws IOException in case of I/O error + * @throws IOException in case of I/O error * @throws NoSuchFileException if the file does not exist either in cache or remote segment store */ @Override public IndexInput openInput(String name, IOContext context) throws IOException { String remoteFilename = getExistingRemoteFilename(name); + long fileLength = fileLength(name); if (remoteFilename != null) { - return remoteDataDirectory.openInput(remoteFilename, context); + return remoteDataDirectory.openInput(remoteFilename, fileLength, context); } else { throw new NoSuchFileException(name); } @@ -415,89 +447,38 @@ public IndexInput openInput(String name, IOContext context) throws IOException { * will be used, else, the legacy {@link RemoteSegmentStoreDirectory#copyFrom(Directory, String, String, IOContext)} * will be called. * - * @param from The directory for the file to be uploaded - * @param src File to be uploaded - * @param context IOContext to be used to open IndexInput of file during remote upload - * @param listener Listener to handle upload callback events + * @param from The directory for the file to be uploaded + * @param src File to be uploaded + * @param context IOContext to be used to open IndexInput of file during remote upload + * @param listener Listener to handle upload callback events */ public void copyFrom(Directory from, String src, IOContext context, ActionListener<Void> listener) { - if (remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer) { - try { - String remoteFilename = getNewRemoteSegmentFilename(src); - uploadBlob(from, src, remoteFilename, context, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } else { - try { + try { + final String remoteFileName = getNewRemoteSegmentFilename(src); + boolean uploaded = remoteDataDirectory.copyFrom(from, src, remoteFileName, context, () -> { + try { + postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); + } catch (IOException e) { + throw new RuntimeException("Exception in segment postUpload for file " + src, e); + } + }, listener); + if (uploaded == false) { copyFrom(from, src, src, context); listener.onResponse(null); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); - listener.onFailure(e); } + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); + listener.onFailure(e); } } - private void uploadBlob(Directory from, String src, String remoteFileName, IOContext ioContext, ActionListener<Void> listener) - throws Exception { - long expectedChecksum = calculateChecksumOfChecksum(from, src); - long contentLength; - try (IndexInput indexInput = from.openInput(src, ioContext)) { - contentLength = indexInput.length(); - } - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - src, - remoteFileName, - contentLength, - true, - WritePriority.NORMAL, - (size, position) -> new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position), - expectedChecksum, - remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer - ); - ActionListener<Void> completionListener = ActionListener.wrap(resp -> { - try { - postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); - listener.onResponse(null); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); - listener.onFailure(e); - } - }, ex -> { - logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); - IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); - if (corruptIndexException != null) { - listener.onFailure(corruptIndexException); - return; - } - Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); - if (throwable != null) { - CorruptFileException corruptFileException = (CorruptFileException) throwable; - listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); - return; - } - listener.onFailure(ex); - }); - - completionListener = ActionListener.runBefore(completionListener, () -> { - try { - remoteTransferContainer.close(); - } catch (Exception e) { - logger.warn("Error occurred while closing streams", e); - } - }); - - WriteContext writeContext = remoteTransferContainer.createWriteContext(); - ((VerifyingMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer()).asyncBlobUpload(writeContext, completionListener); - } - /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} + * * @param primaryTerm Primary Term of index at the time of commit. - * @param generation Commit Generation - * @param acquirerId Lock Acquirer ID which wants to acquire lock on the commit. - * @throws IOException will be thrown in case i) listing file failed or ii) Writing the lock file failed. + * @param generation Commit Generation + * @param acquirerId Lock Acquirer ID which wants to acquire lock on the commit. + * @throws IOException will be thrown in case i) listing file failed or ii) Writing the lock file failed. * @throws NoSuchFileException when metadata file is not present for given commit point. */ @Override @@ -508,10 +489,11 @@ public void acquireLock(long primaryTerm, long generation, String acquirerId) th /** * Releases a lock which was acquired on given segment commit. + * * @param primaryTerm Primary Term of index at the time of commit. - * @param generation Commit Generation - * @param acquirerId Acquirer ID for which lock needs to be released. - * @throws IOException will be thrown in case i) listing lock files failed or ii) deleting the lock file failed. + * @param generation Commit Generation + * @param acquirerId Acquirer ID for which lock needs to be released. + * @throws IOException will be thrown in case i) listing lock files failed or ii) deleting the lock file failed. * @throws NoSuchFileException when metadata file is not present for given commit point. */ @Override @@ -522,10 +504,11 @@ public void releaseLock(long primaryTerm, long generation, String acquirerId) th /** * Checks if a specific commit have any corresponding lock file. + * * @param primaryTerm Primary Term of index at the time of commit. - * @param generation Commit Generation + * @param generation Commit Generation * @return True if there is at least one lock for given primary term and generation. - * @throws IOException will be thrown in case listing lock files failed. + * @throws IOException will be thrown in case listing lock files failed. * @throws NoSuchFileException when metadata file is not present for given commit point. */ @Override @@ -564,13 +547,6 @@ String getMetadataFileForCommit(long primaryTerm, long generation) throws IOExce return metadataFiles.get(0); } - public void copyFrom(Directory from, String src, String dest, IOContext context, String checksum) throws IOException { - String remoteFilename; - remoteFilename = getNewRemoteSegmentFilename(dest); - remoteDataDirectory.copyFrom(from, src, remoteFilename, context); - postUpload(from, src, remoteFilename, checksum); - } - private void postUpload(Directory from, String src, String remoteFilename, String checksum) throws IOException { UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum, from.fileLength(src)); segmentsUploadedToRemoteStore.put(src, segmentMetadata); @@ -582,15 +558,18 @@ private void postUpload(Directory from, String src, String remoteFilename, Strin */ @Override public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { - copyFrom(from, src, dest, context, getChecksumOfLocalFile(from, src)); + String remoteFilename = getNewRemoteSegmentFilename(dest); + remoteDataDirectory.copyFrom(from, src, remoteFilename, context); + postUpload(from, src, remoteFilename, getChecksumOfLocalFile(from, src)); } /** * Checks if the file exists in the uploadedSegments cache and the checksum matches. * It is important to match the checksum as the same segment filename can be used for different * segments due to a concurrency issue. + * * @param localFilename filename of segment stored in local filesystem - * @param checksum checksum of the segment file + * @param checksum checksum of the segment file * @return true if file exists in cache and checksum matches. */ public boolean containsFile(String localFilename, String checksum) { @@ -600,26 +579,31 @@ public boolean containsFile(String localFilename, String checksum) { /** * Upload metadata file - * @param segmentFiles segment files that are part of the shard at the time of the latest refresh + * + * @param segmentFiles segment files that are part of the shard at the time of the latest refresh * @param segmentInfosSnapshot SegmentInfos bytes to store as part of metadata file * @param storeDirectory instance of local directory to temporarily create metadata file before upload - * @param primaryTerm primary term to be used in the name of metadata file + * @param translogGeneration translog generation + * @param replicationCheckpoint ReplicationCheckpoint of primary shard + * @param nodeId node id * @throws IOException in case of I/O error while uploading the metadata file */ public void uploadMetadata( Collection<String> segmentFiles, SegmentInfos segmentInfosSnapshot, Directory storeDirectory, - long primaryTerm, - long translogGeneration + long translogGeneration, + ReplicationCheckpoint replicationCheckpoint, + String nodeId ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( - primaryTerm, + replicationCheckpoint.getPrimaryTerm(), segmentInfosSnapshot.getGeneration(), translogGeneration, metadataUploadCounter.incrementAndGet(), - RemoteSegmentMetadata.CURRENT_VERSION + RemoteSegmentMetadata.CURRENT_VERSION, + nodeId ); try { try (IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT)) { @@ -646,8 +630,7 @@ public void uploadMetadata( new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(uploadedSegments), segmentInfoSnapshotByteArray, - primaryTerm, - segmentInfosSnapshot.getGeneration() + replicationCheckpoint ) ); } @@ -662,7 +645,8 @@ public void uploadMetadata( /** * Parses the provided SegmentInfos to retrieve a mapping of the provided segment files to * the respective Lucene major version that wrote the segments - * @param segmentFiles List of segment files for which the Lucene major version is needed + * + * @param segmentFiles List of segment files for which the Lucene major version is needed * @param segmentInfosSnapshot SegmentInfos instance to parse * @return Map of the segment file to its Lucene major version */ @@ -693,11 +677,12 @@ private Map<String, Integer> getSegmentToLuceneVersion(Collection<String> segmen /** * Try to delete file from local store. Fails silently on failures + * * @param filename: name of the file to be deleted */ private void tryAndDeleteLocalFile(String filename, Directory directory) { try { - logger.trace("Deleting file: " + filename); + logger.debug("Deleting file: " + filename); directory.deleteFile(filename); } catch (NoSuchFileException | FileNotFoundException e) { logger.trace("Exception while deleting. Missing file : " + filename, e); @@ -712,27 +697,6 @@ private String getChecksumOfLocalFile(Directory directory, String file) throws I } } - private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { - try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { - long storedChecksum = CodecUtil.retrieveChecksum(indexInput); - CRC32 checksumOfChecksum = new CRC32(); - checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); - try { - return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), SEGMENT_CHECKSUM_BYTES); - } catch (Exception e) { - throw new ChecksumCombinationException( - "Potentially corrupted file: Checksum combination failed while combining stored checksum " - + "and calculated checksum of stored checksum in segment file: " - + file - + ", directory: " - + directory, - file, - e - ); - } - } - } - private String getExistingRemoteFilename(String localFilename) { if (segmentsUploadedToRemoteStore.containsKey(localFilename)) { return segmentsUploadedToRemoteStore.get(localFilename).uploadedFilename; @@ -754,20 +718,66 @@ public Map<String, UploadedSegmentMetadata> getSegmentsUploadedToRemoteStore() { return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); } + // Visible for testing + Set<String> getMetadataFilesToFilterActiveSegments( + final int lastNMetadataFilesToKeep, + final List<String> sortedMetadataFiles, + final Set<String> lockedMetadataFiles + ) { + // the idea here is for each deletable md file, we can consider the segments present in non-deletable md file + // before this and non-deletable md file after this to compute the active segment files. + // For ex: + // lastNMetadataFilesToKeep = 3 + // sortedMetadataFiles = [m1, m2, m3, m4, m5, m6(locked), m7(locked), m8(locked), m9(locked), m10] + // lockedMetadataFiles = m6, m7, m8, m9 + // then the returned set will be (m3, m6, m9) + final Set<String> metadataFilesToFilterActiveSegments = new HashSet<>(); + for (int idx = lastNMetadataFilesToKeep; idx < sortedMetadataFiles.size(); idx++) { + if (lockedMetadataFiles.contains(sortedMetadataFiles.get(idx)) == false) { + String prevMetadata = (idx - 1) >= 0 ? sortedMetadataFiles.get(idx - 1) : null; + String nextMetadata = (idx + 1) < sortedMetadataFiles.size() ? sortedMetadataFiles.get(idx + 1) : null; + + if (prevMetadata != null && (lockedMetadataFiles.contains(prevMetadata) || idx == lastNMetadataFilesToKeep)) { + // if previous metadata of deletable md is locked, add it to md files for active segments. + metadataFilesToFilterActiveSegments.add(prevMetadata); + } + if (nextMetadata != null && lockedMetadataFiles.contains(nextMetadata)) { + // if next metadata of deletable md is locked, add it to md files for active segments. + metadataFilesToFilterActiveSegments.add(nextMetadata); + } + } + } + return metadataFilesToFilterActiveSegments; + } + /** * Delete stale segment and metadata files * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, - * we just need to read the latest metadata file. All the stale metadata files can be safely deleted. + * we just need to read the latest metadata file. + * Assumptions: + * (1) if a segment file is not present in a md file, it will never be present in any md file created after that, and + * (2) if (md1, md2, md3) are in sorted order, it is not possible that a segment file will be in md1 and md3 but not in md2. + * <p> + * for each deletable md file, segments present in non-deletable md file before this and non-deletable md file + * after this are sufficient to compute the list of active or non-deletable segment files referenced by a deletable + * md file + * * @param lastNMetadataFilesToKeep number of metadata files to keep * @throws IOException in case of I/O error while reading from / writing to remote segment store */ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { + if (lastNMetadataFilesToKeep == -1) { + logger.info( + "Stale segment deletion is disabled if cluster.remote_store.index.segment_metadata.retention.max_count is set to -1" + ); + return; + } List<String> sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, Integer.MAX_VALUE ); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { - logger.info( + logger.debug( "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", sortedMetadataFileList.size(), lastNMetadataFilesToKeep @@ -775,36 +785,43 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List<String> metadataFilesEligibleToDelete = sortedMetadataFileList.subList( - lastNMetadataFilesToKeep, - sortedMetadataFileList.size() + List<String> metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) ); - List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream().filter(metadataFile -> { - try { - return !isLockAcquired(metadataFile); - } catch (IOException e) { - logger.error( - "skipping metadata file (" - + metadataFile - + ") deletion for this run," - + " as checking lock for metadata is failing with error: " - + e - ); - return false; - } - }).collect(Collectors.toList()); + Set<String> allLockFiles; + try { + allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + } catch (Exception e) { + logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); + return; + } + List<String> metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() + .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) + .collect(Collectors.toList()); - sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); + logger.debug( + "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", + metadataFilesEligibleToDelete, + metadataFilesToBeDeleted + ); Map<String, UploadedSegmentMetadata> activeSegmentFilesMetadataMap = new HashMap<>(); Set<String> activeSegmentRemoteFilenames = new HashSet<>(); - for (String metadataFile : sortedMetadataFileList) { + + final Set<String> metadataFilesToFilterActiveSegments = getMetadataFilesToFilterActiveSegments( + lastNMetadataFilesToKeep, + sortedMetadataFileList, + allLockFiles + ); + + for (String metadataFile : metadataFilesToFilterActiveSegments) { Map<String, UploadedSegmentMetadata> segmentMetadataMap = readMetadataFile(metadataFile).getMetadata(); activeSegmentFilesMetadataMap.putAll(segmentMetadataMap); activeSegmentRemoteFilenames.addAll( segmentMetadataMap.values().stream().map(metadata -> metadata.uploadedFilename).collect(Collectors.toSet()) ); } + Set<String> deletedSegmentFiles = new HashSet<>(); for (String metadataFile : metadataFilesToBeDeleted) { Map<String, UploadedSegmentMetadata> staleSegmentFilesMetadataMap = readMetadataFile(metadataFile).getMetadata(); Set<String> staleSegmentRemoteFilenames = staleSegmentFilesMetadataMap.values() @@ -812,57 +829,89 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException .map(metadata -> metadata.uploadedFilename) .collect(Collectors.toSet()); AtomicBoolean deletionSuccessful = new AtomicBoolean(true); - staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { - try { - remoteDataDirectory.deleteFile(file); - if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { - segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); + staleSegmentRemoteFilenames.stream() + .filter(file -> activeSegmentRemoteFilenames.contains(file) == false) + .filter(file -> deletedSegmentFiles.contains(file) == false) + .forEach(file -> { + try { + remoteDataDirectory.deleteFile(file); + deletedSegmentFiles.add(file); + if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { + segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); + } + } catch (NoSuchFileException e) { + logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile); + } catch (IOException e) { + deletionSuccessful.set(false); + logger.warn( + "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried", + file, + metadataFile + ); } - } catch (NoSuchFileException e) { - logger.info("Segment file {} corresponding to metadata file {} does not exist in remote", file, metadataFile); - } catch (IOException e) { - deletionSuccessful.set(false); - logger.info( - "Exception while deleting segment file {} corresponding to metadata file {}. Deletion will be re-tried", - file, - metadataFile - ); - } - }); + }); if (deletionSuccessful.get()) { - logger.info("Deleting stale metadata file {} from remote segment store", metadataFile); + logger.debug("Deleting stale metadata file {} from remote segment store", metadataFile); remoteMetadataDirectory.deleteFile(metadataFile); } } + logger.debug("deletedSegmentFiles={}", deletedSegmentFiles); + } + + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + deleteStaleSegmentsAsync(lastNMetadataFilesToKeep, ActionListener.wrap(r -> {}, e -> {})); } /** * Delete stale segment and metadata files asynchronously. * This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner. + * * @param lastNMetadataFilesToKeep number of metadata files to keep */ - public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep, ActionListener<Void> listener) { if (canDeleteStaleCommits.compareAndSet(true, false)) { try { threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { try { deleteStaleSegments(lastNMetadataFilesToKeep); + listener.onResponse(null); } catch (Exception e) { - logger.info( + logger.error( "Exception while deleting stale commits from remote segment store, will retry delete post next commit", e ); + listener.onFailure(e); } finally { canDeleteStaleCommits.set(true); } }); } catch (Exception e) { - logger.info("Exception occurred while scheduling deleteStaleCommits", e); + logger.error("Exception occurred while scheduling deleteStaleCommits", e); canDeleteStaleCommits.set(true); + listener.onFailure(e); } } } + public static void remoteDirectoryCleanup( + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, + String remoteStoreRepoForIndex, + String indexUUID, + ShardId shardId + ) { + try { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); + remoteSegmentStoreDirectory.deleteStaleSegments(0); + remoteSegmentStoreDirectory.deleteIfEmpty(); + } catch (Exception e) { + staticLogger.error("Exception occurred while deleting directory", e); + } + } + /* Tries to delete shard level directory if it is empty Return true if it deleted it successfully @@ -873,7 +922,7 @@ private boolean deleteIfEmpty() throws IOException { 1 ); if (metadataFiles.size() != 0) { - logger.info("Remote directory still has files , not deleting the path"); + logger.info("Remote directory still has files, not deleting the path"); return false; } @@ -885,12 +934,11 @@ private boolean deleteIfEmpty() throws IOException { logger.error("Exception occurred while deleting directory", e); return false; } - return true; } + @Override public void close() throws IOException { - deleteStaleSegmentsAsync(0); - deleteIfEmpty(); + deleteStaleSegmentsAsync(0, ActionListener.wrap(r -> deleteIfEmpty(), e -> logger.error("Failed to cleanup remote directory"))); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 3bec84f287ce4..eca8d9ec702e1 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -9,12 +9,12 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; -import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -46,35 +46,35 @@ public RemoteSegmentStoreDirectoryFactory(Supplier<RepositoriesService> reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - String shardId = String.valueOf(path.getShardId().getId()); - - return newDirectory(repositoryName, indexUUID, shardId); + return newDirectory(repositoryName, indexUUID, path.getShardId()); } - public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(shardId).add(SEGMENTS); + BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); + BlobPath commonBlobPath = blobStoreRepository.basePath(); + commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); - RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); - RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); - RemoteStoreMetadataLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( + RemoteDirectory dataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository::maybeRateLimitRemoteUploadTransfers, + blobStoreRepository::maybeRateLimitRemoteDownloadTransfers + ); + RemoteDirectory metadataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) + ); + RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, - shardId + String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } - private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteDirectory(dataBlobContainer); - } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java new file mode 100644 index 0000000000000..134994c0ae32c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; + +/** + * Helper class to downloads files from a {@link RemoteSegmentStoreDirectory} + * instance to a local {@link Directory} instance in parallel depending on thread + * pool size and recovery settings. + * + * @opensearch.api + */ +@PublicApi(since = "2.11.0") +public final class RemoteStoreFileDownloader { + private final Logger logger; + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + public RemoteStoreFileDownloader(ShardId shardId, ThreadPool threadPool, RecoverySettings recoverySettings) { + this.logger = Loggers.getLogger(RemoteStoreFileDownloader.class, shardId); + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param toDownloadSegments The list of segment files to download + * @param listener Callback listener to be notified upon completion + */ + public void downloadAsync( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + Collection<String> toDownloadSegments, + ActionListener<Void> listener + ) { + downloadInternal(cancellableThreads, source, destination, null, toDownloadSegments, () -> {}, listener); + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory, while also copying the segments _to_ another remote directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param secondDestination The second remote directory that segment files are + * copied to after being copied to the local directory + * @param toDownloadSegments The list of segment files to download + * @param onFileCompletion A generic runnable that is invoked after each file download. + * Must be thread safe as this may be invoked concurrently from + * different threads. + */ + public void download( + Directory source, + Directory destination, + Directory secondDestination, + Collection<String> toDownloadSegments, + Runnable onFileCompletion + ) throws InterruptedException, IOException { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture<Void> listener = PlainActionFuture.newFuture(); + downloadInternal(cancellableThreads, source, destination, secondDestination, toDownloadSegments, onFileCompletion, listener); + try { + listener.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + // If the blocking call on the PlainActionFuture itself is interrupted, then we must + // cancel the asynchronous work we were waiting on + cancellableThreads.cancel(e.getMessage()); + Thread.currentThread().interrupt(); + throw e; + } + } + + private void downloadInternal( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Collection<String> toDownloadSegments, + Runnable onFileCompletion, + ActionListener<Void> listener + ) { + final Queue<String> queue = new ConcurrentLinkedQueue<>(toDownloadSegments); + // Choose the minimum of: + // - number of files to download + // - max thread pool size + // - "indices.recovery.max_concurrent_remote_store_streams" setting + final int threads = Math.min( + toDownloadSegments.size(), + Math.min(threadPool.info(ThreadPool.Names.REMOTE_RECOVERY).getMax(), recoverySettings.getMaxConcurrentRemoteStoreStreams()) + ); + logger.trace("Starting download of {} files with {} threads", queue.size(), threads); + final ActionListener<Void> allFilesListener = new GroupedActionListener<>(ActionListener.map(listener, r -> null), threads); + for (int i = 0; i < threads; i++) { + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, allFilesListener); + } + } + + private void copyOneFile( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Queue<String> queue, + Runnable onFileCompletion, + ActionListener<Void> listener + ) { + final String file = queue.poll(); + if (file == null) { + // Queue is empty, so notify listener we are done + listener.onResponse(null); + } else { + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY).submit(() -> { + logger.trace("Downloading file {}", file); + try { + cancellableThreads.executeIO(() -> { + destination.copyFrom(source, file, file, IOContext.DEFAULT); + onFileCompletion.run(); + if (secondDestination != null) { + secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); + } + }); + } catch (Exception e) { + // Clear the queue to stop any future processing, report the failure, then return + queue.clear(); + listener.onFailure(e); + return; + } + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, listener); + }); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java deleted file mode 100644 index 0ec282619337c..0000000000000 --- a/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.store; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -/** - * This class is a version of Lucene's ReplicaFileDeleter class used to keep track of - * segment files that should be preserved on replicas between replication events. - * The difference is this component does not actually perform any deletions, it only handles refcounts. - * Our deletions are made through Store.java. - * - * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java - * - * @opensearch.internal - */ -final class ReplicaFileTracker { - - private final Map<String, Integer> refCounts = new HashMap<>(); - - public synchronized void incRef(Collection<String> fileNames) { - for (String fileName : fileNames) { - refCounts.merge(fileName, 1, Integer::sum); - } - } - - public synchronized void decRef(Collection<String> fileNames) { - for (String fileName : fileNames) { - Integer curCount = refCounts.get(fileName); - assert curCount != null : "fileName=" + fileName; - assert curCount > 0; - if (curCount == 1) { - refCounts.remove(fileName); - } else { - refCounts.put(fileName, curCount - 1); - } - } - } - - public synchronized boolean canDelete(String fileName) { - return refCounts.containsKey(fileName) == false; - } -} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 8967100d4faf0..1930a37daa400 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -65,13 +65,9 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; -import org.opensearch.common.CheckedConsumer; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -81,8 +77,13 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.RefCounted; -import org.opensearch.common.util.iterable.Iterables; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.iterable.Iterables; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLock; import org.opensearch.env.ShardLockObtainFailedException; @@ -92,7 +93,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import java.io.Closeable; @@ -105,7 +106,6 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -124,7 +124,6 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.store.Store.MetadataSnapshot.loadMetadata; -import static org.opensearch.indices.replication.SegmentReplicationTarget.REPLICATION_PREFIX; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -146,8 +145,9 @@ * } * </pre> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted { /** * This is an escape hatch for lucenes internal optimization that checks if the IndexInput is an instance of ByteBufferIndexInput @@ -182,10 +182,10 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref private final ReentrantReadWriteLock metadataLock = new ReentrantReadWriteLock(); private final ShardLock shardLock; private final OnClose onClose; + private final ShardPath shardPath; // used to ref count files when a new Reader is opened for PIT/Scroll queries // prevents segment files deletion until the PIT/Scroll expires or is discarded - private final ReplicaFileTracker replicaFileTracker; private final AbstractRefCounted refCounter = new AbstractRefCounted("store") { @Override @@ -196,10 +196,17 @@ protected void closeInternal() { }; public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock) { - this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY); + this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY, null); } - public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, OnClose onClose) { + public Store( + ShardId shardId, + IndexSettings indexSettings, + Directory directory, + ShardLock shardLock, + OnClose onClose, + ShardPath shardPath + ) { super(shardId, indexSettings); final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); @@ -207,8 +214,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; - this.replicaFileTracker = indexSettings.isSegRepEnabled() ? new ReplicaFileTracker() : null; - + this.shardPath = shardPath; assert onClose != null; assert shardLock != null; assert shardLock.getShardId().equals(shardId); @@ -219,6 +225,10 @@ public Directory directory() { return directory; } + public ShardPath shardPath() { + return shardPath; + } + /** * Returns the last committed segments info for this store * @@ -289,14 +299,15 @@ final void ensureOpen() { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is <code>null</code> * the latest commit point is used. - * + * <p> * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + * <p> * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed - * @param commit the index commit to read the snapshot from or <code>null</code> if the latest snapshot should be read from the + * + * @param commit the index commit to read the snapshot from or {@code null} if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an * unexpected exception when opening the index reading the segments file. @@ -320,10 +331,10 @@ public MetadataSnapshot getMetadata() throws IOException { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is <code>null</code> * the latest commit point is used. - * + * <p> * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + * <p> * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed @@ -375,7 +386,13 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio */ public Map<String, StoreFileMetadata> getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { assert indexSettings.isSegRepEnabled(); - return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + failIfCorrupted(); + try { + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } catch (NoSuchFileException | CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + markStoreCorrupted(ex); + throw ex; + } } /** @@ -788,88 +805,17 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } } - /** - * Segment Replication method - * This method deletes files in store that are not referenced by latest on-disk commit point - * - * @param reason the reason for this cleanup operation logged for each deleted file - * @param fileToConsiderForCleanUp Files to consider for clean up. - * - * @throws IOException Exception on locking. - */ - public void cleanupAndPreserveLatestCommitPoint(Collection<String> fileToConsiderForCleanUp, String reason) throws IOException { - assert indexSettings.isSegRepEnabled(); - // fetch a snapshot from the latest on disk Segments_N file. This can be behind - // the passed in local in memory snapshot, so we want to ensure files it references are not removed. - metadataLock.writeLock().lock(); - try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - cleanupFiles(fileToConsiderForCleanUp, reason, this.readLastCommittedSegmentsInfo().files(true)); - } finally { - metadataLock.writeLock().unlock(); - } - } - - private void cleanupFiles(Collection<String> filesToConsiderForCleanup, String reason, Collection<String> lastCommittedSegmentInfos) { - assert metadataLock.isWriteLockedByCurrentThread(); - for (String existingFile : filesToConsiderForCleanup) { - if (Store.isAutogenerated(existingFile) || lastCommittedSegmentInfos != null && lastCommittedSegmentInfos.contains(existingFile) - // also ensure we are not deleting a file referenced by an active reader. - || replicaFileTracker != null && replicaFileTracker.canDelete(existingFile) == false - // Prevent temporary replication files as it should be cleaned up MultiFileWriter - || existingFile.startsWith(REPLICATION_PREFIX)) { - // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete - // checksum) - continue; - } - try { - directory.deleteFile(reason, existingFile); - } catch (IOException ex) { - if (existingFile.startsWith(IndexFileNames.SEGMENTS) || existingFile.startsWith(CORRUPTED_MARKER_NAME_PREFIX)) { - // TODO do we need to also fail this if we can't delete the pending commit file? - // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit - // point around? - throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); - } - logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); - // ignore, we don't really care, will get deleted later on - } - } - } - /** * Segment replication method - * + * <p> * This method takes the segment info bytes to build SegmentInfos. It inc'refs files pointed by passed in SegmentInfos * bytes to ensure they are not deleted. * - * @param tmpToFileName Map of temporary replication file to actual file name * @param infosBytes bytes[] of SegmentInfos supposed to be sent over by primary excluding segment_N file * @param segmentsGen segment generation number - * @param consumer consumer for generated SegmentInfos * @throws IOException Exception while reading store and building segment infos */ - public void buildInfosFromBytes( - Map<String, String> tmpToFileName, - byte[] infosBytes, - long segmentsGen, - CheckedConsumer<SegmentInfos, IOException> consumer - ) throws IOException { - metadataLock.writeLock().lock(); - try { - final List<String> values = new ArrayList<>(tmpToFileName.values()); - incRefFileDeleter(values); - try { - renameTempFilesSafe(tmpToFileName); - consumer.accept(buildSegmentInfos(infosBytes, segmentsGen)); - } finally { - decRefFileDeleter(values); - } - } finally { - metadataLock.writeLock().unlock(); - } - } - - private SegmentInfos buildSegmentInfos(byte[] infosBytes, long segmentsGen) throws IOException { + public SegmentInfos buildSegmentInfos(byte[] infosBytes, long segmentsGen) throws IOException { try (final ChecksumIndexInput input = toIndexInput(infosBytes)) { return SegmentInfos.readCommit(directory, input, segmentsGen); } @@ -936,7 +882,7 @@ public void beforeClose() { * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. - * + * <p> * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. @@ -957,24 +903,29 @@ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, l latestSegmentInfos.commit(directory()); directory.sync(latestSegmentInfos.files(true)); directory.syncMetaData(); - cleanupAndPreserveLatestCommitPoint(List.of(this.directory.listAll()), "After commit"); } finally { metadataLock.writeLock().unlock(); } } + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directory.getDirectoryFileTransferTracker(); + } + /** * A store directory * * @opensearch.internal */ static final class StoreDirectory extends FilterDirectory { - private final Logger deletesLogger; + public final DirectoryFileTransferTracker directoryFileTransferTracker; + StoreDirectory(ByteSizeCachingDirectory delegateDirectory, Logger deletesLogger) { super(delegateDirectory); this.deletesLogger = deletesLogger; + this.directoryFileTransferTracker = new DirectoryFileTransferTracker(); } /** Estimate the cumulative size of all files in this directory in bytes. */ @@ -1012,6 +963,52 @@ public Set<String> getPendingDeletions() throws IOException { // to be removed once fixed in FilterDirectory. return unwrap(this).getPendingDeletions(); } + + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directoryFileTransferTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + long fileSize = from.fileLength(src); + beforeDownload(fileSize); + boolean success = false; + long startTime = System.currentTimeMillis(); + try { + super.copyFrom(from, src, dest, context); + success = true; + afterDownload(fileSize, startTime); + } finally { + if (!success) { + downloadFailed(fileSize, startTime); + } + } + } + + /** + * Updates the amount of bytes attempted for download + */ + private void beforeDownload(long fileSize) { + directoryFileTransferTracker.addTransferredBytesStarted(fileSize); + } + + /** + * Updates + * - The amount of bytes that has been successfully downloaded from the source store + * - The last successful download completion timestamp + * - The last successfully downloaded file + * - Download speed (in bytes/sec) + */ + private void afterDownload(long fileSize, long startTimeInMs) { + directoryFileTransferTracker.addTransferredBytesSucceeded(fileSize, startTimeInMs); + } + + /** + * Updates the amount of bytes failed in download + */ + private void downloadFailed(long fileSize, long startTimeInMs) { + directoryFileTransferTracker.addTransferredBytesFailed(fileSize, startTimeInMs); + } } /** @@ -1025,8 +1022,9 @@ public Set<String> getPendingDeletions() throws IOException { * * @see StoreFileMetadata * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class MetadataSnapshot implements Iterable<StoreFileMetadata>, Writeable { private final Map<String, StoreFileMetadata> metadata; @@ -1432,8 +1430,9 @@ public String getSyncId() { * * @see MetadataSnapshot#recoveryDiff(org.opensearch.index.store.Store.MetadataSnapshot) * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class RecoveryDiff { /** * Files that exist in both snapshots and they can be considered the same ie. they don't need to be recovered @@ -1773,13 +1772,13 @@ public void accept(ShardLock Lock) {} }; } - /** - * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. - */ - public void createEmpty(Version luceneVersion) throws IOException { + public void createEmpty(Version luceneVersion, String translogUUID) throws IOException { metadataLock.writeLock().lock(); try (IndexWriter writer = newEmptyIndexWriter(directory, luceneVersion)) { final Map<String, String> map = new HashMap<>(); + if (translogUUID != null) { + map.put(Translog.TRANSLOG_UUID_KEY, translogUUID); + } map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); @@ -1790,6 +1789,13 @@ public void createEmpty(Version luceneVersion) throws IOException { } } + /** + * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. + */ + public void createEmpty(Version luceneVersion) throws IOException { + createEmpty(luceneVersion, null); + } + /** * Marks an existing lucene index with a new history uuid. * This is used to make sure no existing shard will recovery from this index using ops based recovery. @@ -1963,23 +1969,4 @@ private static IndexWriterConfig newIndexWriterConfig() { // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE); } - - public void incRefFileDeleter(Collection<String> files) { - if (this.indexSettings.isSegRepEnabled()) { - this.replicaFileTracker.incRef(files); - } - } - - public void decRefFileDeleter(Collection<String> files) { - if (this.indexSettings.isSegRepEnabled()) { - this.replicaFileTracker.decRef(files); - try { - this.cleanupAndPreserveLatestCommitPoint(files, "On reader close"); - } catch (IOException e) { - // Log but do not rethrow - we can try cleaning up again after next replication cycle. - // If that were to fail, the shard will as well. - logger.error("Unable to clean store after reader closed", e); - } - } - } } diff --git a/server/src/main/java/org/opensearch/index/store/StoreFileMetadata.java b/server/src/main/java/org/opensearch/index/store/StoreFileMetadata.java index 8415b65d838b7..c286a5bdbd9d8 100644 --- a/server/src/main/java/org/opensearch/index/store/StoreFileMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/StoreFileMetadata.java @@ -35,10 +35,11 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.store.ByteArrayIndexInput; import java.io.IOException; import java.text.ParseException; @@ -47,8 +48,9 @@ /** * Metadata for the store file * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoreFileMetadata implements Writeable { private final String name; diff --git a/server/src/main/java/org/opensearch/index/store/StoreStats.java b/server/src/main/java/org/opensearch/index/store/StoreStats.java index ba36e6b527031..4763b5e5e8a21 100644 --- a/server/src/main/java/org/opensearch/index/store/StoreStats.java +++ b/server/src/main/java/org/opensearch/index/store/StoreStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.store; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,8 +45,9 @@ /** * Statistics about an OpenSearch Store * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoreStats implements Writeable, ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java index 24f42743e1a04..b6be60c489a6c 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java @@ -21,6 +21,7 @@ public class FileLockInfo implements LockInfo { private String fileToLock; private String acquirerId; + private static final int INVALID_INDEX = -1; public String getAcquirerId() { return acquirerId; @@ -88,21 +89,34 @@ static String generateLockName(String fileToLock, String acquirerId) { } public static String getFileToLockNameFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); - - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // use proper separator for the lock file depending on the version it is created + String lockSeparator = lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION) + ? RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + : RemoteStoreLockManagerUtils.SEPARATOR; + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + if (indexOfSeparator == INVALID_INDEX) { + throw new IllegalArgumentException("Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator); } - return lockNameTokens[0]; + return lockName.substring(0, indexOfSeparator); } public static String getAcquirerIdFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); + String lockExtension = RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; + String lockSeparator = RemoteStoreLockManagerUtils.SEPARATOR; - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // check if lock file is created on version <=2.10 + if (lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION)) { + lockSeparator = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR; + lockExtension = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + } + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + final int indexOfExt = lockName.lastIndexOf(lockExtension); + if (indexOfSeparator == INVALID_INDEX || indexOfExt == INVALID_INDEX) { + throw new IllegalArgumentException( + "Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator + " and extension: " + lockExtension + ); } - return lockNameTokens[1].replace(RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION, ""); + return lockName.substring(indexOfSeparator + lockSeparator.length(), indexOfExt); } } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/LockInfo.java b/server/src/main/java/org/opensearch/index/store/lockmanager/LockInfo.java index 1470969540d36..6aab518808a3f 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/LockInfo.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/LockInfo.java @@ -8,11 +8,15 @@ package org.opensearch.index.store.lockmanager; +import org.opensearch.common.annotation.PublicApi; + /** * An Interface that defines Remote Store Lock Information. * Individual Implemented Classes of this interface can decide how the lock should look like and its contents. - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface LockInfo { /** * A function which generates the lock name on the basis of given information. diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java index 9eb066d9e955e..4fa23dfe9dc3a 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java @@ -8,14 +8,18 @@ package org.opensearch.index.store.lockmanager; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * An Interface that defines Remote Store Lock Manager. * This will provide the functionality to acquire lock, release lock or to check if a lock is acquired on a specific * file in remote store. - * @opensearch.internal + * + * @opensearch.api */ +@PublicApi(since = "2.8.0") public interface RemoteStoreLockManager { /** * diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index e866551eae143..00666ada11983 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -8,6 +8,7 @@ package org.opensearch.index.store.lockmanager; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.store.RemoteBufferedOutputDirectory; @@ -22,8 +23,9 @@ /** * Factory for remote store lock manager * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreLockManagerFactory { private static final String SEGMENTS = "segments"; private static final String LOCK_FILES = "lock_files"; @@ -33,7 +35,7 @@ public RemoteStoreLockManagerFactory(Supplier<RepositoriesService> repositoriesS this.repositoriesService = repositoriesService; } - public RemoteStoreMetadataLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { + public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); } @@ -58,6 +60,12 @@ public static RemoteStoreMetadataLockManager newLockManager( } } + // TODO: remove this once we add poller in place to trigger remote store cleanup + // see: https://github.com/opensearch-project/OpenSearch/issues/8469 + public Supplier<RepositoriesService> getRepositoriesService() { + return repositoriesService; + } + private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( Repository repository, BlobPath commonBlobPath, diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java index 452dfc329d88b..d5fb2722a64dc 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java @@ -15,8 +15,11 @@ */ public class RemoteStoreLockManagerUtils { static final String FILE_TO_LOCK_NAME = "file_to_lock"; - static final String SEPARATOR = "___"; - static final String LOCK_FILE_EXTENSION = ".lock"; + static final String PRE_OS210_LOCK_SEPARATOR = "___"; + static final String SEPARATOR = "..."; + // for versions <= 2.10, we have lock files with this extension. + static final String PRE_OS210_LOCK_FILE_EXTENSION = ".lock"; + static final String LOCK_FILE_EXTENSION = ".v2_lock"; static final String ACQUIRER_ID = "acquirer_id"; public static final String NO_TTL = "-1"; static final String LOCK_EXPIRY_TIME = "lock_expiry_time"; diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index fd7906729e314..9c29e03c225e4 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -12,12 +12,17 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Collection; +import java.util.List; import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; /** * A Class that implements Remote Store Lock Manager by creating lock files for the remote store files that needs to @@ -25,8 +30,9 @@ * It uses {@code LockFileInfo} instance to get the information about the lock file on which operations need to * be executed. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.8.0") public class RemoteStoreMetadataLockManager implements RemoteStoreLockManager { private static final Logger logger = LogManager.getLogger(RemoteStoreMetadataLockManager.class); private final RemoteBufferedOutputDirectory lockDirectory; @@ -70,6 +76,24 @@ public void release(LockInfo lockInfo) throws IOException { } } + public String fetchLockedMetadataFile(String filenamePrefix, String acquirerId) throws IOException { + Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + List<String> lockFilesForAcquirer = lockFiles.stream() + .filter(lockFile -> acquirerId.equals(FileLockInfo.LockFileUtils.getAcquirerIdFromLock(lockFile))) + .map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock) + .collect(Collectors.toList()); + if (lockFilesForAcquirer.size() == 0) { + throw new FileNotFoundException("No lock file found for prefix: " + filenamePrefix + " and acquirerId: " + acquirerId); + } + assert lockFilesForAcquirer.size() == 1; + return lockFilesForAcquirer.get(0); + } + + public Set<String> fetchLockedMetadataFiles(String filenamePrefix) throws IOException { + Collection<String> lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + return lockFiles.stream().map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock).collect(Collectors.toSet()); + } + /** * Checks whether a given file have any lock on it or not. * @param lockInfo File Lock Info instance for which we need to check if lock is acquired. diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java index 71b325d86fcb2..19ecee67bdb96 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectory.java @@ -8,13 +8,6 @@ package org.opensearch.index.store.remote.directory; -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IOContext; @@ -30,6 +23,13 @@ import org.opensearch.index.store.remote.utils.TransferManager; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + /** * a Directory implementation that can read directly from index snapshot stored remotely in a blob store repository. * This implementation is following this design https://github.com/opensearch-project/OpenSearch/issues/4033 diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java index 3238ffe45e0a6..7cfa738e75e52 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java @@ -8,13 +8,6 @@ package org.opensearch.index.store.remote.directory; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.function.Supplier; - import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.opensearch.common.blobstore.BlobContainer; @@ -32,6 +25,13 @@ import org.opensearch.snapshots.SnapshotId; import org.opensearch.threadpool.ThreadPool; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.function.Supplier; + /** * Factory for a Directory implementation that can read directly from index * data stored remotely in a blob store repository. diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java index 7319a5324777a..6fd198747570f 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java @@ -27,7 +27,7 @@ * <br> * This class delegate the responsibility of actually fetching the block when demanded to its subclasses using * {@link OnDemandBlockIndexInput#fetchBlock(int)}. - * + * <p> * Like {@link IndexInput}, this class may only be used from one thread as it is not thread safe. * However, a cleaning action may run from another thread triggered by the {@link Cleaner}, but * this is okay because at that point the {@link OnDemandBlockIndexInput} instance is phantom @@ -428,10 +428,10 @@ Builder blockSizeShift(int blockSizeShift) { * instance to hold the current underlying IndexInput, while allowing it to * be changed out with different instances as {@link OnDemandBlockIndexInput} * reads through the data. - * + * <p> * This class implements {@link Runnable} so that it can be passed directly * to the cleaner to run its close action. - * + * <p> * [1]: https://github.com/apache/lucene/blob/8340b01c3cc229f33584ce2178b07b8984daa6a9/lucene/core/src/java/org/apache/lucene/store/IndexInput.java#L32-L33 */ private static class BlockHolder implements Closeable, Runnable { diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java index b3f8ee9c1817e..8097fd08da50a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInput.java @@ -8,8 +8,6 @@ package org.opensearch.index.store.remote.file; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IndexInput; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; @@ -17,6 +15,8 @@ import org.opensearch.index.store.remote.utils.TransferManager; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; /** * This is an implementation of {@link OnDemandBlockIndexInput} where this class provides the main IndexInput using shard snapshot files. @@ -26,8 +26,6 @@ * @opensearch.internal */ public class OnDemandBlockSnapshotIndexInput extends OnDemandBlockIndexInput { - private static final Logger logger = LogManager.getLogger(OnDemandBlockSnapshotIndexInput.class); - /** * Where this class fetches IndexInput parts from */ @@ -48,7 +46,7 @@ public class OnDemandBlockSnapshotIndexInput extends OnDemandBlockIndexInput { protected final String fileName; /** - * part size in bytes + * Maximum size in bytes of snapshot file parts. */ protected final long partSize; @@ -104,7 +102,15 @@ public OnDemandBlockSnapshotIndexInput( super(builder); this.transferManager = transferManager; this.fileInfo = fileInfo; - this.partSize = fileInfo.partSize().getBytes(); + if (fileInfo.partSize() != null) { + this.partSize = fileInfo.partSize().getBytes(); + } else { + // Repository implementations can define a size at which to split files + // into multiple objects in the repository. If partSize() is null, then + // no splitting happens, so default to Long.MAX_VALUE here to have the + // same effect. See {@code BlobStoreRepository#chunkSize()}. + this.partSize = Long.MAX_VALUE; + } this.fileName = fileInfo.physicalName(); this.directory = directory; this.originalFileSize = fileInfo.length(); @@ -131,22 +137,46 @@ protected IndexInput fetchBlock(int blockId) throws IOException { final long blockStart = getBlockStart(blockId); final long blockEnd = blockStart + getActualBlockSize(blockId); - final int part = (int) (blockStart / partSize); - final long partStart = part * partSize; - - final long position = blockStart - partStart; - final long length = blockEnd - blockStart; + // Block may be present on multiple chunks of a file, so we need + // to fetch each chunk/blob part separately to fetch an entire block. BlobFetchRequest blobFetchRequest = BlobFetchRequest.builder() - .position(position) - .length(length) - .blobName(fileInfo.partName(part)) + .blobParts(getBlobParts(blockStart, blockEnd)) .directory(directory) .fileName(blockFileName) .build(); return transferManager.fetchBlob(blobFetchRequest); } + /** + * Returns list of blob parts/chunks in a file for a given block. + */ + protected List<BlobFetchRequest.BlobPart> getBlobParts(long blockStart, long blockEnd) { + // If the snapshot file is chunked, we must account for this by + // choosing the appropriate file part and updating the position + // accordingly. + int partNum = (int) (blockStart / partSize); + long pos = blockStart; + long diff = (blockEnd - blockStart); + + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + while (diff > 0) { + long partStart = pos % partSize; + long partEnd; + if ((partStart + diff) > partSize) { + partEnd = partSize; + } else { + partEnd = (partStart + diff); + } + long fetchBytes = partEnd - partStart; + blobParts.add(new BlobFetchRequest.BlobPart(fileInfo.partName(partNum), partStart, fetchBytes)); + partNum++; + pos = pos + fetchBytes; + diff = (blockEnd - pos); + } + return blobParts; + } + @Override public OnDemandBlockSnapshotIndexInput clone() { OnDemandBlockSnapshotIndexInput clone = buildSlice("clone", 0L, this.length); diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/CachedIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/CachedIndexInput.java index 6b9b6d17bc052..b7dacb3761c33 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/CachedIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/CachedIndexInput.java @@ -8,17 +8,19 @@ package org.opensearch.index.store.remote.filecache; -import java.io.IOException; - import org.apache.lucene.store.IndexInput; +import org.opensearch.common.annotation.PublicApi; + +import java.io.IOException; /** * Interface for an entry in the {@link FileCache} that can return an * {@link IndexInput}. Exactly how the IndexInput is created is determined by * the implementations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public interface CachedIndexInput extends AutoCloseable { /** * Gets the {@link IndexInput} this cache entry represents. diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 3d23b4d22538c..2029b461674c7 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -9,8 +9,10 @@ package org.opensearch.index.store.remote.filecache; import org.apache.lucene.store.IndexInput; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.index.store.remote.utils.cache.RefCountedCache; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; @@ -42,15 +44,28 @@ * items from cache tail and triggers a callback to clean up the file from disk. The * cleanup process also includes closing file’s descriptor. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class FileCache implements RefCountedCache<Path, CachedIndexInput> { private final SegmentedCache<Path, CachedIndexInput> theCache; private final CircuitBreaker circuitBreaker; - // TODO: Convert the constant into an integer setting - public static final int DATA_TO_FILE_CACHE_SIZE_RATIO = 5; + /** + * Defines a limit of how much total remote data can be referenced as a ratio of the size of the disk reserved for + * the file cache. For example, if 100GB disk space is configured for use as a file cache and the + * remote_data_ratio of 5 is defined, then a total of 500GB of remote data can be loaded as searchable snapshots. + * This is designed to be a safeguard to prevent oversubscribing a cluster. + * Specify a value of zero for no limit, which is the default for compatibility reasons. + */ + public static final Setting<Double> DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING = Setting.doubleSetting( + "cluster.filecache.remote_data_ratio", + 0.0, + 0.0, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); public FileCache(SegmentedCache<Path, CachedIndexInput> cache, CircuitBreaker circuitBreaker) { this.theCache = cache; diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index 6d09bc062ab0e..0261ab24dfa7a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -11,16 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.settings.Settings; +import org.opensearch.common.inject.Provider; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexEventListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -30,79 +27,90 @@ import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; /** - * IndexEventListener to clean up file cache when the index is deleted. The cached entries will be eligible + * IndexStoreListener to clean up file cache when the index is deleted. The cached entries will be eligible * for eviction when the shard is deleted, but this listener deterministically removes entries from memory and * from disk at the time of shard deletion as opposed to waiting for the cache to need to perform eviction. * * @opensearch.internal */ -public class FileCacheCleaner implements IndexEventListener { - private static final Logger log = LogManager.getLogger(FileCacheCleaner.class); +public class FileCacheCleaner implements NodeEnvironment.IndexStoreListener { + private static final Logger logger = LogManager.getLogger(FileCacheCleaner.class); - private final NodeEnvironment nodeEnvironment; - private final FileCache fileCache; + private final Provider<FileCache> fileCacheProvider; - public FileCacheCleaner(NodeEnvironment nodeEnvironment, FileCache fileCache) { - this.nodeEnvironment = nodeEnvironment; - this.fileCache = fileCache; + public FileCacheCleaner(Provider<FileCache> fileCacheProvider) { + this.fileCacheProvider = fileCacheProvider; } /** - * before shard deleted and after shard closed, cleans up the corresponding index file path entries from FC. - * @param shardId The shard id - * @param settings the shards index settings + * before shard path deleted, cleans up the corresponding index file path entries from FC and delete the corresponding shard file + * cache path. + * + * @param shardId the shard id + * @param indexSettings the index settings + * @param nodeEnvironment the node environment */ @Override - public void beforeIndexShardDeleted(ShardId shardId, Settings settings) { + public void beforeShardPathDeleted(ShardId shardId, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { + final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); + cleanupShardFileCache(shardPath); + deleteShardFileCacheDirectory(shardPath); + } + } + + /** + * Cleans up the corresponding index file path entries from FileCache + * + * @param shardPath the shard path + */ + private void cleanupShardFileCache(ShardPath shardPath) { try { - if (isRemoteSnapshot(settings)) { - final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); - final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); - try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { - for (Path subPath : ds) { - fileCache.remove(subPath.toRealPath()); - } + final FileCache fc = fileCacheProvider.get(); + assert fc != null; + final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); + try (DirectoryStream<Path> ds = Files.newDirectoryStream(localStorePath)) { + for (Path subPath : ds) { + fc.remove(subPath.toRealPath()); } } } catch (IOException ioe) { - log.error(() -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardId), ioe); + logger.error( + () -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardPath.getShardId()), + ioe + ); } } - @Override - public void afterIndexShardDeleted(ShardId shardId, Settings settings) { - if (isRemoteSnapshot(settings)) { - final Path path = ShardPath.loadFileCachePath(nodeEnvironment, shardId).getDataPath(); - try { - if (Files.exists(path)) { - IOUtils.rm(path); - } - } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardId), e); + private void deleteShardFileCacheDirectory(ShardPath shardPath) { + final Path path = shardPath.getDataPath(); + try { + if (Files.exists(path)) { + IOUtils.rm(path); } + } catch (IOException e) { + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardPath.getShardId()), e); } } + /** + * before index path deleted, delete the corresponding index file cache path. + * + * @param index the index + * @param indexSettings the index settings + * @param nodeEnvironment the node environment + */ @Override - public void afterIndexRemoved( - Index index, - IndexSettings indexSettings, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason - ) { - if (isRemoteSnapshot(indexSettings.getSettings()) - && reason == IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED) { + public void beforeIndexPathDeleted(Index index, IndexSettings indexSettings, NodeEnvironment nodeEnvironment) { + if (indexSettings.isRemoteSnapshot()) { final Path indexCachePath = nodeEnvironment.fileCacheNodePath().fileCachePath.resolve(index.getUUID()); if (Files.exists(indexCachePath)) { try { IOUtils.rm(indexCachePath); } catch (IOException e) { - log.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + logger.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); } } } } - - private static boolean isRemoteSnapshot(Settings settings) { - return IndexModule.Type.REMOTE_SNAPSHOT.match(settings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())); - } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java index f23e057196096..9fe67dc67020a 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java @@ -8,8 +8,8 @@ package org.opensearch.index.store.remote.filecache; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.cache.RemovalReason; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; import java.nio.file.Files; diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java index 46a81adc1ab45..070fd663896a3 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheStats.java @@ -8,10 +8,11 @@ package org.opensearch.index.store.remote.filecache; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -20,8 +21,9 @@ /** * Statistics on file cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class FileCacheStats implements Writeable, ToXContentFragment { private final long timestamp; diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 9a479346ff711..41a145273e8ef 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -8,19 +8,27 @@ package org.opensearch.index.store.remote.metadata; -import java.io.IOException; -import java.util.Map; -import java.util.stream.Collectors; - import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; /** * Metadata object for Remote Segment * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.6.0") public class RemoteSegmentMetadata { /** * Latest supported version of metadata @@ -38,19 +46,16 @@ public class RemoteSegmentMetadata { private final byte[] segmentInfosBytes; - private final long primaryTerm; - private final long generation; + private final ReplicationCheckpoint replicationCheckpoint; public RemoteSegmentMetadata( Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> metadata, byte[] segmentInfosBytes, - long primaryTerm, - long generation + ReplicationCheckpoint replicationCheckpoint ) { this.metadata = metadata; this.segmentInfosBytes = segmentInfosBytes; - this.generation = generation; - this.primaryTerm = primaryTerm; + this.replicationCheckpoint = replicationCheckpoint; } /** @@ -66,11 +71,15 @@ public byte[] getSegmentInfosBytes() { } public long getGeneration() { - return generation; + return replicationCheckpoint.getSegmentsGen(); } public long getPrimaryTerm() { - return primaryTerm; + return replicationCheckpoint.getPrimaryTerm(); + } + + public ReplicationCheckpoint getReplicationCheckpoint() { + return replicationCheckpoint; } /** @@ -99,19 +108,60 @@ public static Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> f public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); - out.writeLong(generation); - out.writeLong(primaryTerm); + writeCheckpointToIndexOutput(replicationCheckpoint, out); out.writeLong(segmentInfosBytes.length); out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { Map<String, String> metadata = indexInput.readMapOfStrings(); - long generation = indexInput.readLong(); - long primaryTerm = indexInput.readLong(); + final Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegmentMetadataMap = RemoteSegmentMetadata + .fromMapOfStrings(metadata); + ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); - return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, primaryTerm, generation); + return new RemoteSegmentMetadata(uploadedSegmentMetadataMap, segmentInfosBytes, replicationCheckpoint); + } + + public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicationCheckpoint, IndexOutput out) throws IOException { + ShardId shardId = replicationCheckpoint.getShardId(); + // Write ShardId + out.writeString(shardId.getIndex().getName()); + out.writeString(shardId.getIndex().getUUID()); + out.writeVInt(shardId.getId()); + // Write remaining checkpoint fields + out.writeLong(replicationCheckpoint.getPrimaryTerm()); + out.writeLong(replicationCheckpoint.getSegmentsGen()); + out.writeLong(replicationCheckpoint.getSegmentInfosVersion()); + out.writeLong(replicationCheckpoint.getLength()); + out.writeString(replicationCheckpoint.getCodec()); + } + + private static ReplicationCheckpoint readCheckpointFromIndexInput( + IndexInput in, + Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegmentMetadataMap + ) throws IOException { + return new ReplicationCheckpoint( + new ShardId(new Index(in.readString(), in.readString()), in.readVInt()), + in.readLong(), + in.readLong(), + in.readLong(), + in.readLong(), + in.readString(), + toStoreFileMetadata(uploadedSegmentMetadataMap) + ); + } + + private static Map<String, StoreFileMetadata> toStoreFileMetadata( + Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> metadata + ) { + return metadata.entrySet() + .stream() + // TODO: Version here should be read from UploadedSegmentMetadata. + .map( + entry -> new StoreFileMetadata(entry.getKey(), entry.getValue().getLength(), entry.getValue().getChecksum(), Version.LATEST) + ) + .collect(Collectors.toMap(StoreFileMetadata::name, Function.identity())); } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java index 84540c64df1a6..3077d8c76ddae 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandler.java @@ -8,12 +8,12 @@ package org.opensearch.index.store.remote.metadata; -import java.io.IOException; - import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.opensearch.common.io.IndexIOStreamHandler; +import java.io.IOException; + /** * Handler for {@link RemoteSegmentMetadata} * diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java index d0508e9c6f4c7..f7e6545b5010e 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/BlobFetchRequest.java @@ -12,6 +12,7 @@ import org.apache.lucene.store.FSDirectory; import java.nio.file.Path; +import java.util.List; /** * The specification to fetch specific block from blob store @@ -20,37 +21,22 @@ */ public class BlobFetchRequest { - private final long position; - - private final long length; - - private final String blobName; - private final Path filePath; private final Directory directory; private final String fileName; + private final List<BlobPart> blobParts; + + private final long blobLength; + private BlobFetchRequest(Builder builder) { - this.position = builder.position; - this.length = builder.length; - this.blobName = builder.blobName; this.fileName = builder.fileName; this.filePath = builder.directory.getDirectory().resolve(fileName); this.directory = builder.directory; - } - - public long getPosition() { - return position; - } - - public long getLength() { - return length; - } - - public String getBlobName() { - return blobName; + this.blobParts = builder.blobParts; + this.blobLength = builder.blobParts.stream().mapToLong(o -> o.getLength()).sum(); } public Path getFilePath() { @@ -65,6 +51,14 @@ public String getFileName() { return fileName; } + public List<BlobPart> blobParts() { + return blobParts; + } + + public long getBlobLength() { + return blobLength; + } + public static Builder builder() { return new Builder(); } @@ -72,12 +66,8 @@ public static Builder builder() { @Override public String toString() { return "BlobFetchRequest{" - + "position=" - + position - + ", length=" - + length - + ", blobName='" - + blobName + + "blobParts=" + + blobParts + '\'' + ", filePath=" + filePath @@ -90,35 +80,45 @@ public String toString() { } /** - * Builder for BlobFetchRequest + * BlobPart represents a single chunk of a file */ - public static final class Builder { + public static class BlobPart { + private String blobName; private long position; private long length; - private String blobName; - private FSDirectory directory; - private String fileName; - - private Builder() {} - public Builder position(long position) { - this.position = position; - return this; - } - - public Builder length(long length) { + public BlobPart(String blobName, long position, long length) { + this.blobName = blobName; if (length <= 0) { - throw new IllegalArgumentException("Length for blob fetch request needs to be non-negative"); + throw new IllegalArgumentException("Length for blob part fetch request needs to be non-negative"); } this.length = length; - return this; + this.position = position; } - public Builder blobName(String blobName) { - this.blobName = blobName; - return this; + public String getBlobName() { + return blobName; + } + + public long getPosition() { + return position; } + public long getLength() { + return length; + } + } + + /** + * Builder for BlobFetchRequest + */ + public static final class Builder { + private List<BlobPart> blobParts; + private FSDirectory directory; + private String fileName; + + private Builder() {} + public Builder directory(FSDirectory directory) { this.directory = directory; return this; @@ -129,6 +129,11 @@ public Builder fileName(String fileName) { return this; } + public Builder blobParts(List<BlobPart> blobParts) { + this.blobParts = blobParts; + return this; + } + public BlobFetchRequest build() { return new BlobFetchRequest(this); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java index c9469283ee921..98cad7bfadb09 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/TransferManager.java @@ -48,11 +48,12 @@ public TransferManager(final BlobContainer blobContainer, final FileCache fileCa } /** - * Given a blobFetchRequest, return it's corresponding IndexInput. + * Given a blobFetchRequestList, return it's corresponding IndexInput. * @param blobFetchRequest to fetch * @return future of IndexInput augmented with internal caching maintenance tasks */ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOException { + final Path key = blobFetchRequest.getFilePath(); final CachedIndexInput cacheEntry = fileCache.compute(key, (path, cachedIndexInput) -> { @@ -75,6 +76,7 @@ public IndexInput fetchBlob(BlobFetchRequest blobFetchRequest) throws IOExceptio } } + @SuppressWarnings("removal") private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobContainer blobContainer, BlobFetchRequest request) { // We need to do a privileged action here in order to fetch from remote // and write to the local file cache in case this is invoked as a side @@ -84,15 +86,20 @@ private static FileCachedIndexInput createIndexInput(FileCache fileCache, BlobCo try { if (Files.exists(request.getFilePath()) == false) { try ( - InputStream snapshotFileInputStream = blobContainer.readBlob( - request.getBlobName(), - request.getPosition(), - request.getLength() - ); OutputStream fileOutputStream = Files.newOutputStream(request.getFilePath()); OutputStream localFileOutputStream = new BufferedOutputStream(fileOutputStream) ) { - snapshotFileInputStream.transferTo(localFileOutputStream); + for (BlobFetchRequest.BlobPart blobPart : request.blobParts()) { + try ( + InputStream snapshotFileInputStream = blobContainer.readBlob( + blobPart.getBlobName(), + blobPart.getPosition(), + blobPart.getLength() + ); + ) { + snapshotFileInputStream.transferTo(localFileOutputStream); + } + } } } final IndexInput luceneIndexInput = request.getDirectory().openInput(request.getFileName(), IOContext.READ); @@ -152,7 +159,7 @@ public IndexInput getIndexInput() throws IOException { @Override public long length() { - return request.getLength(); + return request.getBlobLength(); } @Override diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java index 1cb9242926046..0b5480d3ca978 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/CacheUsage.java @@ -8,11 +8,14 @@ package org.opensearch.index.store.remote.utils.cache; +import org.opensearch.common.annotation.PublicApi; + /** * Usage metrics for {@link RefCountedCache} * * @opensearch.internal */ +@PublicApi(since = "2.7.0") public class CacheUsage { /** * Cache usage of the system diff --git a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java index f0a7154effb1d..55893752669a8 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java +++ b/server/src/main/java/org/opensearch/index/store/remote/utils/cache/stats/CacheStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.store.remote.utils.cache.stats; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.store.remote.utils.cache.RefCountedCache; import java.util.Objects; @@ -15,8 +16,9 @@ /** * Statistics about the performance of a {@link RefCountedCache}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.7.0") public final class CacheStats { private final long hitCount; private final long missCount; diff --git a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java index 4d515fadb5a13..7b7da6531aff3 100644 --- a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java @@ -47,12 +47,12 @@ import org.opensearch.action.termvectors.TermVectorsRequest; import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.engine.Engine; import org.opensearch.index.get.GetResult; diff --git a/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java index ea817c4b0932a..d6fa2a2e53de3 100644 --- a/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/opensearch/index/translog/BaseTranslogReader.java @@ -108,7 +108,7 @@ protected final int readSize(ByteBuffer reusableBuffer, long position) throws IO return size; } - public TranslogSnapshot newSnapshot() { + TranslogSnapshot newSnapshot() { return new TranslogSnapshot(this, sizeInBytes()); } diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java index 8c9ccc3b487df..f75f27b7bcb91 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamInput.java @@ -33,6 +33,7 @@ package org.opensearch.index.translog; import org.apache.lucene.store.BufferedChecksum; +import org.apache.lucene.util.BitUtil; import org.opensearch.core.common.io.stream.FilterStreamInput; import org.opensearch.core.common.io.stream.StreamInput; @@ -92,22 +93,21 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { public short readShort() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 2); - return (short) (((buf[0] & 0xFF) << 8) | (buf[1] & 0xFF)); + return (short) BitUtil.VH_BE_SHORT.get(buf, 0); } @Override public int readInt() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 4); - return ((buf[0] & 0xFF) << 24) | ((buf[1] & 0xFF) << 16) | ((buf[2] & 0xFF) << 8) | (buf[3] & 0xFF); + return (int) BitUtil.VH_BE_INT.get(buf, 0); } @Override public long readLong() throws IOException { final byte[] buf = buffer.get(); readBytes(buf, 0, 8); - return (((long) (((buf[0] & 0xFF) << 24) | ((buf[1] & 0xFF) << 16) | ((buf[2] & 0xFF) << 8) | (buf[3] & 0xFF))) << 32) | ((((buf[4] - & 0xFF) << 24) | ((buf[5] & 0xFF) << 16) | ((buf[6] & 0xFF) << 8) | (buf[7] & 0xFF)) & 0xFFFFFFFFL); + return (long) BitUtil.VH_BE_LONG.get(buf, 0); } @Override diff --git a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java index eaf9ac0893f1b..9e96664c79cc5 100644 --- a/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java +++ b/server/src/main/java/org/opensearch/index/translog/BufferedChecksumStreamOutput.java @@ -33,6 +33,7 @@ package org.opensearch.index.translog; import org.apache.lucene.store.BufferedChecksum; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; @@ -43,8 +44,9 @@ * Similar to Lucene's BufferedChecksumIndexOutput, however this wraps a * {@link StreamOutput} so anything written will update the checksum * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class BufferedChecksumStreamOutput extends StreamOutput { private final StreamOutput out; private final Checksum digest; diff --git a/server/src/main/java/org/opensearch/index/translog/ChannelFactory.java b/server/src/main/java/org/opensearch/index/translog/ChannelFactory.java index f86b9dea0a5f7..4fba85b9d224d 100644 --- a/server/src/main/java/org/opensearch/index/translog/ChannelFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/ChannelFactory.java @@ -31,6 +31,8 @@ package org.opensearch.index.translog; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.OpenOption; @@ -40,9 +42,10 @@ /** * only for testing until we have a disk-full FileSystem * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface ChannelFactory { default FileChannel open(Path path) throws IOException { return open(path, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); diff --git a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java index a9f905f52bc3a..d309564ef5d32 100644 --- a/server/src/main/java/org/opensearch/index/translog/Checkpoint.java +++ b/server/src/main/java/org/opensearch/index/translog/Checkpoint.java @@ -44,6 +44,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.Channels; import org.opensearch.index.seqno.SequenceNumbers; @@ -57,9 +58,10 @@ /** * A checkpoint for OpenSearch operations * - * @opensearch.internal + * @opensearch.api */ -final public class Checkpoint { +@PublicApi(since = "1.0.0") +public final class Checkpoint { final long offset; final int numOps; diff --git a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java index efc762ef00d52..05049e5d07373 100644 --- a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java @@ -14,7 +14,7 @@ /** * Default implementation for the {@link TranslogDeletionPolicy}. Plugins can override the default behaviour * via the {@link org.opensearch.plugins.EnginePlugin#getCustomTranslogDeletionPolicyFactory()}. - * + * <p> * The default policy uses total number, size in bytes and maximum age for files. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java index d7be1250c0b5b..415d7dc4d1a9d 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogFactory.java @@ -28,7 +28,7 @@ public Translog newTranslog( LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier, LongConsumer persistedSequenceNumberConsumer, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { return new LocalTranslog( diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 4d728da8a394d..a22c538286a88 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -8,16 +8,18 @@ package org.opensearch.index.translog; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import java.io.Closeable; import java.io.IOException; @@ -43,7 +45,7 @@ public class InternalTranslogManager implements TranslogManager, Closeable { private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); private final TranslogEventListener translogEventListener; private final Supplier<LocalCheckpointTracker> localCheckpointTrackerSupplier; - private static final Logger logger = LogManager.getLogger(InternalTranslogManager.class); + private final Logger logger; public InternalTranslogManager( TranslogConfig translogConfig, @@ -57,7 +59,7 @@ public InternalTranslogManager( TranslogEventListener translogEventListener, LifecycleAware engineLifeCycleAware, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { this.shardId = shardId; this.readLock = readLock; @@ -70,23 +72,27 @@ public InternalTranslogManager( if (tracker != null) { tracker.markSeqNoAsPersisted(seqNo); } - }, translogUUID, translogFactory, primaryModeSupplier); + }, translogUUID, translogFactory, startedPrimarySupplier); assert translog.getGeneration() != null; this.translog = translog; assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; // don't allow commits until we are done with recovering pendingTranslogRecovery.set(true); + this.logger = Loggers.getLogger(getClass(), shardId); } /** * Rolls the translog generation and cleans unneeded. */ @Override - public void rollTranslogGeneration() throws TranslogException { + public void rollTranslogGeneration() throws TranslogException, IOException { try (ReleasableLock ignored = readLock.acquire()) { engineLifeCycleAware.ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); + } catch (TranslogUploadFailedException e) { + // Do not trigger the translogEventListener as it fails the Engine while this is only an issue with remote upload + throw e; } catch (AlreadyClosedException e) { translogEventListener.onFailure("translog roll generation failed", e); throw e; @@ -296,10 +302,16 @@ public void setMinSeqNoToKeep(long seqNo) { translog.setMinSeqNoToKeep(seqNo); } + @Override public void onDelete() { translog.onDelete(); } + @Override + public Releasable drainSync() { + return translog.drainSync(); + } + @Override public Translog.TranslogGeneration getTranslogGeneration() { return translog.getGeneration(); @@ -357,7 +369,7 @@ protected Translog openTranslog( LongConsumer persistedSequenceNumberConsumer, String translogUUID, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { return translogFactory.newTranslog( translogConfig, @@ -366,7 +378,7 @@ protected Translog openTranslog( globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer, - primaryModeSupplier + startedPrimarySupplier ); } @@ -425,10 +437,10 @@ public String getTranslogUUID() { * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { - final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo( - localCheckpointOfLastCommit + 1 - ).translogFileGeneration; - if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { + // This is the minimum seqNo that is referred in translog and considered for calculating translog size + long minTranslogRefSeqNo = translog.getMinUnreferencedSeqNoInSegments(localCheckpointOfLastCommit + 1); + final long minReferencedTranslogGeneration = translog.getMinGenerationForSeqNo(minTranslogRefSeqNo).translogFileGeneration; + if (translog.sizeInBytesByMinGen(minReferencedTranslogGeneration) < flushThreshold) { return false; } /* @@ -449,7 +461,7 @@ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long fl final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo( localCheckpointTrackerSupplier.get().getProcessedCheckpoint() + 1 ).translogFileGeneration; - return translogGenerationOfLastCommit < translogGenerationOfNewCommit + return minReferencedTranslogGeneration < translogGenerationOfNewCommit || localCheckpointTrackerSupplier.get().getProcessedCheckpoint() == localCheckpointTrackerSupplier.get().getMaxSeqNo(); } diff --git a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java index 22dba3973cfc1..7664631e0ed07 100644 --- a/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/LocalTranslog.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; @@ -140,6 +141,11 @@ public TranslogStats stats() { } } + @Override + Releasable drainSync() { + return () -> {}; // noop + } + @Override public void close() throws IOException { assert Translog.calledFromOutsideOrViaTragedyClose() diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index 3e6a8e69edfbb..b4aa7865570a6 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.core.index.shard.ShardId; @@ -121,8 +122,14 @@ public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolea throw new UnsupportedOperationException("Translog snapshot unsupported with no-op translogs"); } + @Override public void onDelete() {} + @Override + public Releasable drainSync() { + return () -> {}; + } + @Override public Translog.TranslogGeneration getTranslogGeneration() { return null; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 339e16db6f360..e100ffaabf13d 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; @@ -31,10 +32,13 @@ public class RemoteBlobStoreInternalTranslogFactory implements TranslogFactory { private final ThreadPool threadPool; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; + public RemoteBlobStoreInternalTranslogFactory( Supplier<RepositoriesService> repositoriesServiceSupplier, ThreadPool threadPool, - String repositoryName + String repositoryName, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { Repository repository; try { @@ -44,6 +48,7 @@ public RemoteBlobStoreInternalTranslogFactory( } this.repository = repository; this.threadPool = threadPool; + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; } @Override @@ -54,7 +59,7 @@ public Translog newTranslog( LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier, LongConsumer persistedSequenceNumberConsumer, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -68,7 +73,8 @@ public Translog newTranslog( persistedSequenceNumberConsumer, blobStoreRepository, threadPool, - primaryModeSupplier + startedPrimarySupplier, + remoteTranslogTransferTracker ); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 87fadce1d834c..7b969a37e4aa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -9,14 +9,16 @@ package org.opensearch.index.translog; import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; import org.opensearch.index.translog.transfer.TransferSnapshot; @@ -28,14 +30,19 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.HashSet; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -51,10 +58,10 @@ public class RemoteFsTranslog extends Translog { private final Logger logger; - private final BlobStoreRepository blobStoreRepository; private final TranslogTransferManager translogTransferManager; private final FileTransferTracker fileTransferTracker; - private final BooleanSupplier primaryModeSupplier; + private final BooleanSupplier startedPrimarySupplier; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private volatile long maxRemoteTranslogGenerationUploaded; private volatile long minSeqNoToKeep; @@ -66,11 +73,17 @@ public class RemoteFsTranslog extends Translog { private final SetOnce<Boolean> olderPrimaryCleaned = new SetOnce<>(); private static final int REMOTE_DELETION_PERMITS = 2; + private static final int DOWNLOAD_RETRIES = 2; public static final String TRANSLOG = "translog"; // Semaphore used to allow only single remote generation to happen at a time private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); + // These permits exist to allow any inflight background triggered upload. + private static final int SYNC_PERMIT = 1; + private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); + private final AtomicBoolean pauseSync = new AtomicBoolean(false); + public RemoteFsTranslog( TranslogConfig config, String translogUUID, @@ -80,17 +93,25 @@ public RemoteFsTranslog( LongConsumer persistedSequenceNumberConsumer, BlobStoreRepository blobStoreRepository, ThreadPool threadPool, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) throws IOException { super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); logger = Loggers.getLogger(getClass(), shardId); - this.blobStoreRepository = blobStoreRepository; - this.primaryModeSupplier = primaryModeSupplier; - fileTransferTracker = new FileTransferTracker(shardId); - this.translogTransferManager = buildTranslogTransferManager(blobStoreRepository, threadPool, shardId, fileTransferTracker); + this.startedPrimarySupplier = startedPrimarySupplier; + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + this.translogTransferManager = buildTranslogTransferManager( + blobStoreRepository, + threadPool, + shardId, + fileTransferTracker, + remoteTranslogTransferTracker + ); try { download(translogTransferManager, location, logger); Checkpoint checkpoint = readCheckpoint(location); + logger.info("Downloaded data from remote translog till maxSeqNo = {}", checkpoint.maxSeqNo); this.readers.addAll(recoverFromFiles(checkpoint)); if (readers.isEmpty()) { String errorMsg = String.format(Locale.ROOT, "%s at least one reader must be recovered", shardId); @@ -124,6 +145,11 @@ public RemoteFsTranslog( } } + // visible for testing + RemoteTranslogTransferTracker getRemoteTranslogTracker() { + return remoteTranslogTransferTracker; + } + public static void download(Repository repository, ShardId shardId, ThreadPool threadPool, Path location, Logger logger) throws IOException { assert repository instanceof BlobStoreRepository : String.format( @@ -132,32 +158,77 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t shardId ); BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; - FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + // We use a dummy stats tracker to ensure the flow doesn't break. + // TODO: To be revisited as part of https://github.com/opensearch-project/OpenSearch/issues/7567 + RemoteTranslogTransferTracker remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 1000); + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); TranslogTransferManager translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, shardId, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); RemoteFsTranslog.download(translogTransferManager, location, logger); + logger.trace(remoteTranslogTransferTracker.toString()); + } + + static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + /* + In Primary to Primary relocation , there can be concurrent upload and download of translog. + While translog files are getting downloaded by new primary, it might hence be deleted by the primary + Hence we retry if tlog/ckp files are not found . + + This doesn't happen in last download , where it is ensured that older primary has stopped modifying tlog data. + */ + IOException ex = null; + for (int i = 0; i <= DOWNLOAD_RETRIES; i++) { + boolean success = false; + long startTimeMs = System.currentTimeMillis(); + try { + downloadOnce(translogTransferManager, location, logger); + success = true; + return; + } catch (FileNotFoundException | NoSuchFileException e) { + // continue till download retries + ex = e; + } finally { + logger.trace("downloadOnce success={} timeElapsed={}", success, (System.currentTimeMillis() - startTimeMs)); + } + } + logger.info("Exhausted all download retries during translog/checkpoint file download"); + throw ex; } - public static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { - logger.trace("Downloading translog files from remote"); + static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + logger.debug("Downloading translog files from remote"); + RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); + long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); + long prevDownloadTimeInMillis = statsTracker.getTotalDownloadTimeInMillis(); TranslogTransferMetadata translogMetadata = translogTransferManager.readMetadata(); if (translogMetadata != null) { if (Files.notExists(location)) { Files.createDirectories(location); } + // Delete translog files on local before downloading from remote for (Path file : FileSystemUtils.files(location)) { Files.delete(file); } + Map<String, String> generationToPrimaryTermMapper = translogMetadata.getGenerationToPrimaryTermMapper(); for (long i = translogMetadata.getGeneration(); i >= translogMetadata.getMinTranslogGeneration(); i--) { String generation = Long.toString(i); translogTransferManager.downloadTranslog(generationToPrimaryTermMapper.get(generation), generation, location); } + logger.info( + "Downloaded translog and checkpoint files from={} to={}", + translogMetadata.getMinTranslogGeneration(), + translogMetadata.getGeneration() + ); + + statsTracker.recordDownloadStats(prevDownloadBytesSucceeded, prevDownloadTimeInMillis); + // We copy the latest generation .ckp file to translog.ckp so that flows that depend on // existence of translog.ckp file work in the same way Files.copy( @@ -165,34 +236,31 @@ public static void download(TranslogTransferManager translogTransferManager, Pat location.resolve(Translog.CHECKPOINT_FILE_NAME) ); } - logger.trace("Downloaded translog files from remote"); + logger.debug("downloadOnce execution completed"); } public static TranslogTransferManager buildTranslogTransferManager( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, ShardId shardId, - FileTransferTracker fileTransferTracker + FileTransferTracker fileTransferTracker, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { return new TranslogTransferManager( shardId, new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG), - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); } @Override public boolean ensureSynced(Location location) throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { - assert location.generation <= current.getGeneration(); - if (location.generation == current.getGeneration()) { - ensureOpen(); - return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); - } - } catch (final Exception ex) { - closeOnTragicEvent(ex); - throw ex; + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); } return false; } @@ -207,9 +275,23 @@ public void rollGeneration() throws IOException { } private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + // During primary relocation, both the old and new primary have engine created with RemoteFsTranslog and having + // ReplicationTracker.primaryMode() as true. However, before we perform the `internal:index/shard/replication/segments_sync` + // action which re-downloads the segments and translog on the new primary. We are ensuring 2 things here - + // 1. Using startedPrimarySupplier, we prevent the new primary to do pre-emptive syncs + // 2. Using syncPermits, we prevent syncs at the desired time during primary relocation. + if (startedPrimarySupplier.getAsBoolean() == false || syncPermit.tryAcquire(SYNC_PERMIT) == false) { + logger.debug("skipped uploading translog for {} {} syncPermits={}", primaryTerm, generation, syncPermit.availablePermits()); + // NO-OP + return false; + } + long maxSeqNo = -1; try (Releasable ignored = writeLock.acquire()) { if (generation == null || generation == current.getGeneration()) { try { + if (closed.get() == false) { + maxSeqNo = getMaxSeqNo(); + } final TranslogReader reader = current.closeIntoReader(); readers.add(reader); copyCheckpointTo(location.resolve(getCommitCheckpointFileName(current.getGeneration()))); @@ -217,6 +299,11 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc logger.trace("Creating new writer for gen: [{}]", current.getGeneration() + 1); current = createWriter(current.getGeneration() + 1); } + assert writeLock.isHeldByCurrentThread() : "Write lock must be held before we acquire the read lock"; + // Here we are downgrading the write lock by acquiring the read lock and releasing the write lock + // This ensures that other threads can still acquire the read locks while also protecting the + // readers and writer to not be mutated any further. + readLock.acquire(); } catch (final Exception e) { tragedy.setTragicException(e); closeOnTragicEvent(e); @@ -225,7 +312,10 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } else if (generation < current.getGeneration()) { return false; } + } + assert readLock.isHeldByCurrentThread() == true; + try (Releasable ignored = readLock; Releasable ignoredGenLock = deletionPolicy.acquireTranslogGen(getMinFileGeneration())) { // Do we need remote writes in sync fashion ? // If we don't , we should swallow FileAlreadyExistsException while writing to remote store // and also verify for same during primary-primary relocation @@ -233,27 +323,17 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc // is not updated in remote translog except in primary to primary recovery. if (generation == null) { if (closed.get() == false) { - return upload(primaryTerm, current.getGeneration() - 1); + return upload(primaryTerm, current.getGeneration() - 1, maxSeqNo); } else { - return upload(primaryTerm, current.getGeneration()); + return upload(primaryTerm, current.getGeneration(), maxSeqNo); } } else { - return upload(primaryTerm, generation); + return upload(primaryTerm, generation, maxSeqNo); } } } - private boolean upload(Long primaryTerm, Long generation) throws IOException { - // During primary relocation (primary-primary peer recovery), both the old and the new primary have engine - // created with the RemoteFsTranslog. Both primaries are equipped to upload the translogs. The primary mode check - // below ensures that the real primary only is uploading. Before the primary mode is set as true for the new - // primary, the engine is reset to InternalEngine which also initialises the RemoteFsTranslog which in turns - // downloads all the translogs from remote store and does a flush before the relocation finishes. - if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped uploading translog for {} {}", primaryTerm, generation); - // NO-OP - return true; - } + private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { logger.trace("uploading translog for {} {}", primaryTerm, generation); try ( TranslogCheckpointTransferSnapshot transferSnapshotProvider = new TranslogCheckpointTransferSnapshot.Builder( @@ -261,32 +341,16 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { generation, location, readers, - Translog::getCommitCheckpointFileName + Translog::getCommitCheckpointFileName, + config.getNodeId() ).build() ) { - Releasable transferReleasable = Releasables.wrap(deletionPolicy.acquireTranslogGen(getMinFileGeneration())); - return translogTransferManager.transferSnapshot(transferSnapshotProvider, new TranslogTransferListener() { - @Override - - public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); - maxRemoteTranslogGenerationUploaded = generation; - minRemoteGenReferenced = getMinFileGeneration(); - logger.trace("uploaded translog for {} {} ", primaryTerm, generation); - } - - @Override - public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); - if (ex instanceof IOException) { - throw (IOException) ex; - } else { - throw (RuntimeException) ex; - } - } - }); + return translogTransferManager.transferSnapshot( + transferSnapshotProvider, + new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo) + ); + } finally { + syncPermit.release(SYNC_PERMIT); } } @@ -307,14 +371,8 @@ private boolean syncToDisk() throws IOException { @Override public void sync() throws IOException { - try { - if (syncToDisk() || syncNeeded()) { - prepareAndUpload(primaryTermSupplier.getAsLong(), null); - } - } catch (final Exception e) { - tragedy.setTragicException(e); - closeOnTragicEvent(e); - throw e; + if (syncToDisk() || syncNeeded()) { + prepareAndUpload(primaryTermSupplier.getAsLong(), null); } } @@ -332,12 +390,14 @@ public boolean syncNeeded() { public void close() throws IOException { assert Translog.calledFromOutsideOrViaTragedyClose() : shardId + "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; - if (closed.compareAndSet(false, true)) { - try (ReleasableLock lock = writeLock.acquire()) { - sync(); - } finally { - logger.debug("translog closed"); - closeFilesIfNoPendingRetentionLocks(); + try (ReleasableLock lock = writeLock.acquire()) { + if (closed.compareAndSet(false, true)) { + try { + sync(); + } finally { + logger.debug("translog closed"); + closeFilesIfNoPendingRetentionLocks(); + } } } } @@ -373,11 +433,39 @@ protected void setMinSeqNoToKeep(long seqNo) { this.minSeqNoToKeep = seqNo; } + @Override + protected Releasable drainSync() { + try { + if (syncPermit.tryAcquire(SYNC_PERMIT, 1, TimeUnit.MINUTES)) { + boolean result = pauseSync.compareAndSet(false, true); + assert result && syncPermit.availablePermits() == 0; + logger.info("All inflight remote translog syncs finished and further syncs paused"); + return Releasables.releaseOnce(() -> { + syncPermit.release(SYNC_PERMIT); + boolean wasSyncPaused = pauseSync.getAndSet(false); + assert syncPermit.availablePermits() == SYNC_PERMIT : "Available permits is " + syncPermit.availablePermits(); + assert wasSyncPaused : "RemoteFsTranslog sync was not paused before re-enabling it"; + logger.info("Resumed remote translog sync back on relocation failure"); + }); + } else { + throw new TimeoutException("Timeout while acquiring all permits"); + } + } catch (TimeoutException | InterruptedException e) { + throw new RuntimeException("Failed to acquire all permits", e); + } + } + @Override public void trimUnreferencedReaders() throws IOException { // clean up local translog files and updates readers super.trimUnreferencedReaders(); + // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote + // store. + if (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get()) { + return; + } + // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. // Remote generations involves 2 async operations - 1) Delete translog generation files 2) Delete metadata files // We try to acquire 2 permits and if we can not, we return from here itself. @@ -388,7 +476,7 @@ public void trimUnreferencedReaders() throws IOException { // cleans up remote translog files not referenced in latest uploaded metadata. // This enables us to restore translog from the metadata in case of failover or relocation. Set<Long> generationsToDelete = new HashSet<>(); - for (long generation = minRemoteGenReferenced - 1; generation >= 0; generation--) { + for (long generation = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); generation >= 0; generation--) { if (fileTransferTracker.uploaded(Translog.getFilename(generation)) == false) { break; } @@ -426,8 +514,11 @@ private void deleteStaleRemotePrimaryTerms() { // are older files that are no longer needed and should be cleaned up. In here, we delete all files that are part // of older primary term. if (olderPrimaryCleaned.trySet(Boolean.TRUE)) { + if (readers.isEmpty()) { + logger.trace("Translog reader list is empty, returning from deleteStaleRemotePrimaryTerms"); + return; + } // First we delete all stale primary terms folders from remote store - assert readers.isEmpty() == false : shardId + " Expected non-empty readers"; long minimumReferencedPrimaryTerm = readers.stream().map(BaseTranslogReader::getPrimaryTerm).min(Long::compare).get(); translogTransferManager.deletePrimaryTermsAsync(minimumReferencedPrimaryTerm); } @@ -436,24 +527,86 @@ private void deleteStaleRemotePrimaryTerms() { public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; - FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + // We use a dummy stats tracker to ensure the flow doesn't break. + // TODO: To be revisited as part of https://github.com/opensearch-project/OpenSearch/issues/7567 + RemoteTranslogTransferTracker remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 1000); + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); TranslogTransferManager translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, shardId, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); } protected void onDelete() { - if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped delete translog"); - // NO-OP - return; - } + ClusterService.assertClusterOrClusterManagerStateThread(); // clean up all remote translog files translogTransferManager.delete(); } + + // Visible for testing + boolean isRemoteGenerationDeletionPermitsAvailable() { + return remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS; + } + + /** + * TranslogTransferListener implementation for RemoteFsTranslog + * + * @opensearch.internal + */ + private class RemoteFsTranslogTransferListener implements TranslogTransferListener { + + /** + * Generation for the translog + */ + private final long generation; + + /** + * Primary Term for the translog + */ + private final long primaryTerm; + + private final long maxSeqNo; + + RemoteFsTranslogTransferListener(long generation, long primaryTerm, long maxSeqNo) { + this.generation = generation; + this.primaryTerm = primaryTerm; + this.maxSeqNo = maxSeqNo; + } + + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { + maxRemoteTranslogGenerationUploaded = generation; + minRemoteGenReferenced = getMinFileGeneration(); + logger.debug( + "Successfully uploaded translog for primary term = {}, generation = {}, maxSeqNo = {}", + primaryTerm, + generation, + maxSeqNo + ); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { + if (ex instanceof IOException) { + throw (IOException) ex; + } else { + throw (RuntimeException) ex; + } + } + } + + @Override + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minSeqNoToKeep; + } + + // Visible for testing + int availablePermits() { + return syncPermit.availablePermits(); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java new file mode 100644 index 0000000000000..03c15e0ea4752 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; + +import java.io.IOException; +import java.util.Objects; + +/** + * Encapsulates the stats related to Remote Translog Store operations + * + * @opensearch.api + */ +@PublicApi(since = "2.10.0") +public class RemoteTranslogStats implements ToXContentFragment, Writeable { + /** + * Total number of Remote Translog Store uploads that have been started + */ + private long totalUploadsStarted; + + /** + * Total number of Remote Translog Store uploads that have failed. + */ + private long totalUploadsFailed; + + /** + * Total number of Remote Translog Store uploads that have been successful. + */ + private long totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Translog Store that have been started. + */ + private long uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Translog Store that have failed. + */ + private long uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Translog Store that have been successful. + */ + private long uploadBytesSucceeded; + + static final String REMOTE_STORE = "remote_store"; + + public RemoteTranslogStats() {} + + public RemoteTranslogStats(StreamInput in) throws IOException { + this.totalUploadsStarted = in.readVLong(); + this.totalUploadsFailed = in.readVLong(); + this.totalUploadsSucceeded = in.readVLong(); + this.uploadBytesStarted = in.readVLong(); + this.uploadBytesFailed = in.readVLong(); + this.uploadBytesSucceeded = in.readVLong(); + } + + public RemoteTranslogStats(RemoteTranslogTransferTracker.Stats transferTrackerStats) { + this.totalUploadsStarted = transferTrackerStats.totalUploadsStarted; + this.totalUploadsFailed = transferTrackerStats.totalUploadsFailed; + this.totalUploadsSucceeded = transferTrackerStats.totalUploadsSucceeded; + this.uploadBytesStarted = transferTrackerStats.uploadBytesStarted; + this.uploadBytesFailed = transferTrackerStats.uploadBytesFailed; + this.uploadBytesSucceeded = transferTrackerStats.uploadBytesSucceeded; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(totalUploadsStarted); + out.writeVLong(totalUploadsFailed); + out.writeVLong(totalUploadsSucceeded); + out.writeVLong(uploadBytesStarted); + out.writeVLong(uploadBytesFailed); + out.writeVLong(uploadBytesSucceeded); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteTranslogStats other = (RemoteTranslogStats) obj; + + return this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded; + } + + @Override + public int hashCode() { + return Objects.hash( + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(REMOTE_STORE); + + builder.startObject(RemoteStoreStats.SubFields.UPLOAD); + addRemoteTranslogUploadStatsXContent(builder); + builder.endObject(); // translog.remote_store.upload + + builder.endObject(); // translog.remote_store + + return builder; + } + + public long getTotalUploadsStarted() { + return totalUploadsStarted; + } + + public long getTotalUploadsFailed() { + return totalUploadsFailed; + } + + public long getTotalUploadsSucceeded() { + return totalUploadsSucceeded; + } + + public long getUploadBytesStarted() { + return uploadBytesStarted; + } + + public long getUploadBytesFailed() { + return uploadBytesFailed; + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded; + } + + public void add(RemoteTranslogStats other) { + if (other == null) { + return; + } + + this.totalUploadsStarted += other.totalUploadsStarted; + this.totalUploadsFailed += other.totalUploadsFailed; + this.totalUploadsSucceeded += other.totalUploadsSucceeded; + this.uploadBytesStarted += other.uploadBytesStarted; + this.uploadBytesFailed += other.uploadBytesFailed; + this.uploadBytesSucceeded += other.uploadBytesSucceeded; + } + + void addRemoteTranslogUploadStatsXContent(XContentBuilder builder) throws IOException { + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS); + builder.field(RemoteStoreStats.SubFields.STARTED, totalUploadsStarted) + .field(RemoteStoreStats.SubFields.FAILED, totalUploadsFailed) + .field(RemoteStoreStats.SubFields.SUCCEEDED, totalUploadsSucceeded); + builder.endObject(); + + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE); + builder.humanReadableField( + RemoteStoreStats.SubFields.STARTED_BYTES, + RemoteStoreStats.SubFields.STARTED, + new ByteSizeValue(uploadBytesStarted) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.FAILED_BYTES, + RemoteStoreStats.SubFields.FAILED, + new ByteSizeValue(uploadBytesFailed) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES, + RemoteStoreStats.SubFields.SUCCEEDED, + new ByteSizeValue(uploadBytesSucceeded) + ); + builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/TragicExceptionHolder.java b/server/src/main/java/org/opensearch/index/translog/TragicExceptionHolder.java index 818d840cb11fd..023e277edfd6c 100644 --- a/server/src/main/java/org/opensearch/index/translog/TragicExceptionHolder.java +++ b/server/src/main/java/org/opensearch/index/translog/TragicExceptionHolder.java @@ -34,14 +34,16 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.annotation.PublicApi; import java.util.concurrent.atomic.AtomicReference; /** * Exception thrown if there are any massive OpenSearch failures * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TragicExceptionHolder { private final AtomicReference<Exception> tragedy = new AtomicReference<>(); diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index f0177f3588db1..9f877e87415dd 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -37,18 +37,20 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; @@ -59,7 +61,6 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShardComponent; -import org.opensearch.core.index.shard.ShardId; import java.io.Closeable; import java.io.EOFException; @@ -110,8 +111,9 @@ * operation etc. are still preserved. * </p> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable { /* @@ -840,7 +842,7 @@ public boolean ensureSynced(Stream<Location> locations) throws IOException { /** * Closes the translog if the current translog writer experienced a tragic exception. - * + * <p> * Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a * write lock in the course of closing the translog * @@ -894,8 +896,9 @@ public TranslogDeletionPolicy getDeletionPolicy() { /** * Location in the translot * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Location implements Comparable<Location> { public final long generation; @@ -953,8 +956,9 @@ public int hashCode() { /** * A snapshot of the transaction log, allows to iterate over all the transaction log operations. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Snapshot extends Closeable { /** @@ -1039,14 +1043,16 @@ public void close() throws IOException { * A generic interface representing an operation performed on the transaction log. * Each is associated with a type. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Operation { /** * The type of operation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum Type { @Deprecated CREATE((byte) 1), @@ -1137,8 +1143,9 @@ static void writeOperation(final StreamOutput output, final Operation operation) /** * The source in the translog * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Source { public final BytesReference source; @@ -1566,8 +1573,9 @@ public String toString() { /** * How to sync the translog * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Durability { /** @@ -1809,6 +1817,11 @@ protected void setMinSeqNoToKeep(long seqNo) {} protected void onDelete() {} + /** + * Drains ongoing syncs to the underlying store. It returns a releasable which can be closed to resume the syncs back. + */ + abstract Releasable drainSync(); + /** * deletes all files associated with a reader. package-private to be able to simulate node failures at this point */ @@ -1833,8 +1846,9 @@ void closeFilesIfNoPendingRetentionLocks() throws IOException { /** * References a transaction log generation * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class TranslogGeneration { public final String translogUUID; public final long translogFileGeneration; @@ -1976,7 +1990,7 @@ static String createEmptyTranslog( /** * Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint}, * {@code primaryTerm} and {@code translogUUID}. - * + * <p> * This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique * translog UUID could cause a lot of issues and that's why in all (but one) cases the method * {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead. @@ -2034,4 +2048,8 @@ public static String createEmptyTranslog( writer.close(); return uuid; } + + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minUnrefCheckpointInLastCommit; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java index 2862accfedc43..2f00773075d41 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java @@ -32,11 +32,12 @@ package org.opensearch.index.translog; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.BigArrays; -import org.opensearch.index.IndexSettings; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; import java.nio.file.Path; @@ -45,8 +46,9 @@ * Once {@link Translog} has been created with this object, changes to this * object will affect the {@link Translog} instance. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TranslogConfig { public static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(1, ByteSizeUnit.MB); @@ -56,6 +58,7 @@ public final class TranslogConfig { private final ShardId shardId; private final Path translogPath; private final ByteSizeValue bufferSize; + private final String nodeId; /** * Creates a new TranslogConfig instance @@ -64,16 +67,24 @@ public final class TranslogConfig { * @param indexSettings the index settings used to set internal variables * @param bigArrays a bigArrays instance used for temporarily allocating write operations */ - public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays) { - this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE); + public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, String nodeId) { + this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId); } - TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, ByteSizeValue bufferSize) { + TranslogConfig( + ShardId shardId, + Path translogPath, + IndexSettings indexSettings, + BigArrays bigArrays, + ByteSizeValue bufferSize, + String nodeId + ) { this.bufferSize = bufferSize; this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.bigArrays = bigArrays; + this.nodeId = nodeId; } /** @@ -110,4 +121,8 @@ public Path getTranslogPath() { public ByteSizeValue getBufferSize() { return bufferSize; } + + public String getNodeId() { + return nodeId; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicy.java b/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicy.java index fde6d6bbc0632..972b19a029a8b 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicy.java @@ -33,8 +33,9 @@ package org.opensearch.index.translog; import org.apache.lucene.util.Counter; -import org.opensearch.core.Assertions; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.Assertions; import org.opensearch.index.seqno.SequenceNumbers; import java.io.IOException; @@ -47,8 +48,9 @@ /** * Defines a translog deletion policy * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TranslogDeletionPolicy { private final Map<Object, RuntimeException> openTranslogRef; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicyFactory.java b/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicyFactory.java index 8f40fdc90368f..dcce8ade12b32 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicyFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogDeletionPolicyFactory.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.RetentionLeases; @@ -19,6 +20,7 @@ * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface TranslogDeletionPolicyFactory { TranslogDeletionPolicy create(IndexSettings settings, Supplier<RetentionLeases> supplier); } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogException.java b/server/src/main/java/org/opensearch/index/translog/TranslogException.java index d7a8d649543d6..eb8e77a055752 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogException.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogException.java @@ -33,6 +33,7 @@ package org.opensearch.index.translog; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -41,8 +42,9 @@ /** * Exception thrown if there are any failures in the translog * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TranslogException extends OpenSearchException { public TranslogException(ShardId shardId, String msg) { diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java index ab8e2b7752e66..4300435093b5d 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogFactory.java @@ -8,6 +8,8 @@ package org.opensearch.index.translog; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; @@ -17,9 +19,10 @@ * Translog Factory to enable creation of various local on-disk * and remote store flavors of {@link Translog} * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface TranslogFactory { Translog newTranslog( @@ -29,6 +32,6 @@ Translog newTranslog( final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, final LongConsumer persistedSequenceNumberConsumer, - final BooleanSupplier primaryModeSupplier + final BooleanSupplier startedPrimarySupplier ) throws IOException; } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java index 1090a994bf6ad..7b5be9505f27a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -final class TranslogHeader { +public final class TranslogHeader { public static final String TRANSLOG_CODEC = "translog"; public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported @@ -137,9 +137,30 @@ static int readHeaderVersion(final Path path, final FileChannel channel, final S } /** - * Read a translog header from the given path and file channel + * Read a translog header from the given path and file channel and compare the given UUID */ static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException { + TranslogHeader translogHeader = read(path, channel); + // verify UUID only after checksum, to ensure that UUID is not corrupted + final BytesRef expectedUUID = new BytesRef(translogUUID); + final BytesRef actualUUID = new BytesRef(translogHeader.translogUUID); + if (actualUUID.bytesEquals(expectedUUID) == false) { + throw new TranslogCorruptedException( + path.toString(), + "expected shard UUID " + + translogUUID + + " but got: " + + translogHeader.translogUUID + + " this translog file belongs to a different translog" + ); + } + return translogHeader; + } + + /** + * Read a translog header from the given path and file channel and compare the given UUID + */ + public static TranslogHeader read(final Path path, final FileChannel channel) throws IOException { try { // This input is intentionally not closed because closing it will close the FileChannel. final BufferedChecksumStreamInput in = new BufferedChecksumStreamInput( @@ -179,16 +200,7 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil + channel.position() + "]"; - // verify UUID only after checksum, to ensure that UUID is not corrupted - final BytesRef expectedUUID = new BytesRef(translogUUID); - if (uuid.bytesEquals(expectedUUID) == false) { - throw new TranslogCorruptedException( - path.toString(), - "expected shard UUID " + expectedUUID + " but got: " + uuid + " this translog file belongs to a different translog" - ); - } - - return new TranslogHeader(translogUUID, primaryTerm, headerSizeInBytes); + return new TranslogHeader(uuid.utf8ToString(), primaryTerm, headerSizeInBytes); } catch (EOFException e) { throw new TranslogCorruptedException(path.toString(), "translog header truncated", e); } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 78aaa1bc13a00..e1a0b7d1c1293 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -8,20 +8,24 @@ package org.opensearch.index.translog; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lease.Releasable; + import java.io.IOException; import java.util.stream.Stream; /** * The interface that orchestrates Translog operations and manages the {@link Translog} and interfaces with the Engine * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface TranslogManager { /** * Rolls the translog generation and cleans unneeded. */ - void rollTranslogGeneration() throws TranslogException; + void rollTranslogGeneration() throws TranslogException, IOException; /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). @@ -132,5 +136,10 @@ public interface TranslogManager { */ void onDelete(); + /** + * Drains ongoing syncs to the underlying store. It returns a releasable which can be closed to resume the syncs back. + */ + Releasable drainSync(); + Translog.TranslogGeneration getTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java index 9ea3328587645..d590663670b28 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogReader.java @@ -34,6 +34,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.Channels; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.seqno.SequenceNumbers; @@ -52,8 +53,9 @@ /** * an immutable translog filereader * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TranslogReader extends BaseTranslogReader implements Closeable { protected final long length; private final int totalOperations; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogRecoveryRunner.java b/server/src/main/java/org/opensearch/index/translog/TranslogRecoveryRunner.java index 91c9a95b07d58..007c1fb98f398 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogRecoveryRunner.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogRecoveryRunner.java @@ -8,14 +8,17 @@ package org.opensearch.index.translog; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * The interface that defines how {@link Translog.Snapshot} will get replayed into the Engine * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface +@PublicApi(since = "1.0.0") public interface TranslogRecoveryRunner { /** diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java index c740e29963c4d..619dd6371c553 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java @@ -31,12 +31,14 @@ package org.opensearch.index.translog; -import org.opensearch.common.Strings; +import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,17 +47,25 @@ /** * Translog statistics * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TranslogStats implements Writeable, ToXContentFragment { - + private static final String TRANSLOG = "translog"; private long translogSizeInBytes; private int numberOfOperations; private long uncommittedSizeInBytes; private int uncommittedOperations; private long earliestLastModifiedAge; - public TranslogStats() {} + /** + * Stats related to the Remote Translog Store operations + */ + private final RemoteTranslogStats remoteTranslogStats; + + public TranslogStats() { + remoteTranslogStats = new RemoteTranslogStats(); + } public TranslogStats(StreamInput in) throws IOException { numberOfOperations = in.readVInt(); @@ -63,6 +73,9 @@ public TranslogStats(StreamInput in) throws IOException { uncommittedOperations = in.readVInt(); uncommittedSizeInBytes = in.readVLong(); earliestLastModifiedAge = in.readVLong(); + remoteTranslogStats = in.getVersion().onOrAfter(Version.V_2_10_0) + ? in.readOptionalWriteable(RemoteTranslogStats::new) + : new RemoteTranslogStats(); } public TranslogStats( @@ -87,27 +100,37 @@ public TranslogStats( if (earliestLastModifiedAge < 0) { throw new IllegalArgumentException("earliestLastModifiedAge must be >= 0"); } + this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; this.uncommittedSizeInBytes = uncommittedSizeInBytes; this.uncommittedOperations = uncommittedOperations; this.earliestLastModifiedAge = earliestLastModifiedAge; + this.remoteTranslogStats = new RemoteTranslogStats(); + } + + public void addRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + if (this.remoteTranslogStats != null) { + this.remoteTranslogStats.add(remoteTranslogStats); + } } - public void add(TranslogStats translogStats) { - if (translogStats == null) { + public void add(TranslogStats other) { + if (other == null) { return; } - this.numberOfOperations += translogStats.numberOfOperations; - this.translogSizeInBytes += translogStats.translogSizeInBytes; - this.uncommittedOperations += translogStats.uncommittedOperations; - this.uncommittedSizeInBytes += translogStats.uncommittedSizeInBytes; + this.numberOfOperations += other.numberOfOperations; + this.translogSizeInBytes += other.translogSizeInBytes; + this.uncommittedOperations += other.uncommittedOperations; + this.uncommittedSizeInBytes += other.uncommittedSizeInBytes; if (this.earliestLastModifiedAge == 0) { - this.earliestLastModifiedAge = translogStats.earliestLastModifiedAge; + this.earliestLastModifiedAge = other.earliestLastModifiedAge; } else { - this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, translogStats.earliestLastModifiedAge); + this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, other.earliestLastModifiedAge); } + + addRemoteTranslogStats(other.remoteTranslogStats); } public long getTranslogSizeInBytes() { @@ -132,21 +155,26 @@ public long getEarliestLastModifiedAge() { return earliestLastModifiedAge; } + public RemoteTranslogStats getRemoteTranslogStats() { + return remoteTranslogStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("translog"); - builder.field("operations", numberOfOperations); - builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); - builder.field("uncommitted_operations", uncommittedOperations); - builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); - builder.field("earliest_last_modified_age", earliestLastModifiedAge); + builder.startObject(TRANSLOG); + addLocalTranslogStatsXContent(builder); + if (remoteTranslogStats != null) { + builder = remoteTranslogStats.toXContent(builder, params); + } + builder.endObject(); + return builder; } @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } @Override @@ -156,5 +184,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(uncommittedOperations); out.writeVLong(uncommittedSizeInBytes); out.writeVLong(earliestLastModifiedAge); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(remoteTranslogStats); + } + } + + private void addLocalTranslogStatsXContent(XContentBuilder builder) throws IOException { + builder.field("operations", numberOfOperations); + builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); + builder.field("uncommitted_operations", uncommittedOperations); + builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); + builder.field("earliest_last_modified_age", earliestLastModifiedAge); } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index dd85f8f1f77ea..86f7567f3333d 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -37,21 +37,22 @@ import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.Channels; import org.opensearch.common.io.DiskIoBufferPool; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasables; import org.opensearch.core.Assertions; -import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; import java.io.ByteArrayOutputStream; import java.io.Closeable; @@ -73,8 +74,9 @@ /** * Writer that writes operations to the translog * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TranslogWriter extends BaseTranslogReader implements Closeable { private final ShardId shardId; @@ -376,7 +378,7 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { /** * write all buffered ops to disk and fsync file. - * + * <p> * Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before * raising the exception. * @return <code>true</code> if this call caused an actual sync operation @@ -471,7 +473,7 @@ public TranslogReader closeIntoReader() throws IOException { } @Override - public TranslogSnapshot newSnapshot() { + TranslogSnapshot newSnapshot() { // make sure to acquire the sync lock first, to prevent dead locks with threads calling // syncUpTo() , where the sync lock is acquired first, following by the synchronize(this) // After the sync lock we acquire the write lock to avoid deadlocks with threads writing where diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 4a082b4a19844..25fcdc614172a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -45,8 +45,8 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.SequenceNumbers; @@ -194,7 +194,8 @@ private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, shardPath.getShardId(), translogPath, indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardPath.getShardId().id()); // We open translog to check for corruption, do not clean anything. diff --git a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java index 9080bc483138b..a7a524ad78e95 100644 --- a/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/WriteOnlyTranslogManager.java @@ -9,9 +9,9 @@ package org.opensearch.index.translog; import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.listener.TranslogEventListener; import java.io.IOException; @@ -38,7 +38,7 @@ public WriteOnlyTranslogManager( TranslogEventListener translogEventListener, LifecycleAware engineLifecycleAware, TranslogFactory translogFactory, - BooleanSupplier primaryModeSupplier + BooleanSupplier startedPrimarySupplier ) throws IOException { super( translogConfig, @@ -52,7 +52,7 @@ public WriteOnlyTranslogManager( translogEventListener, engineLifecycleAware, translogFactory, - primaryModeSupplier + startedPrimarySupplier ); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index 974e8af42b939..82dd6301ef79f 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -11,16 +11,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeFileInputStream; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.ChannelFactory; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.threadpool.ThreadPool; @@ -92,7 +93,7 @@ public void uploadBlobs( ) { fileSnapshots.forEach(fileSnapshot -> { BlobPath blobPath = blobPaths.get(fileSnapshot.getPrimaryTerm()); - if (!(blobStore.blobContainer(blobPath) instanceof VerifyingMultiStreamBlobContainer)) { + if (!(blobStore.blobContainer(blobPath) instanceof AsyncMultiStreamBlobContainer)) { uploadBlob(ThreadPool.Names.TRANSLOG_TRANSFER, fileSnapshot, blobPath, listener, writePriority); } else { uploadBlob(fileSnapshot, listener, blobPath, writePriority); @@ -114,6 +115,11 @@ private void uploadBlob( try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { contentLength = channel.size(); } + boolean remoteIntegrityEnabled = false; + BlobContainer blobContainer = blobStore.blobContainer(blobPath); + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + remoteIntegrityEnabled = ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported(); + } RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( fileSnapshot.getName(), fileSnapshot.getName(), @@ -122,7 +128,7 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), Objects.requireNonNull(fileSnapshot.getChecksum()), - blobStore.blobContainer(blobPath) instanceof VerifyingMultiStreamBlobContainer + remoteIntegrityEnabled ); ActionListener<Void> completionListener = ActionListener.wrap(resp -> listener.onResponse(fileSnapshot), ex -> { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), ex); @@ -138,7 +144,7 @@ private void uploadBlob( }); WriteContext writeContext = remoteTransferContainer.createWriteContext(); - ((VerifyingMultiStreamBlobContainer) blobStore.blobContainer(blobPath)).asyncBlobUpload(writeContext, completionListener); + ((AsyncMultiStreamBlobContainer) blobStore.blobContainer(blobPath)).asyncBlobUpload(writeContext, completionListener); } catch (Exception e) { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), e); @@ -213,17 +219,18 @@ public void listFoldersAsync(String threadpoolName, Iterable<String> path, Actio }); } - public void listAllInSortedOrder(Iterable<String> path, int limit, ActionListener<List<BlobMetadata>> listener) { - blobStore.blobContainer((BlobPath) path).listBlobsByPrefixInSortedOrder("", limit, LEXICOGRAPHIC, listener); + public void listAllInSortedOrder(Iterable<String> path, String filenamePrefix, int limit, ActionListener<List<BlobMetadata>> listener) { + blobStore.blobContainer((BlobPath) path).listBlobsByPrefixInSortedOrder(filenamePrefix, limit, LEXICOGRAPHIC, listener); } public void listAllInSortedOrderAsync( String threadpoolName, Iterable<String> path, + String filenamePrefix, int limit, ActionListener<List<BlobMetadata>> listener ) { - threadPool.executor(threadpoolName).execute(() -> { listAllInSortedOrder(path, limit, listener); }); + threadPool.executor(threadpoolName).execute(() -> { listAllInSortedOrder(path, filenamePrefix, limit, listener); }); } } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java index 406533561a798..9c2304f809f46 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java @@ -9,11 +9,15 @@ package org.opensearch.index.translog.transfer; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import java.io.IOException; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -26,14 +30,43 @@ public class FileTransferTracker implements FileTransferListener { private final ConcurrentHashMap<String, TransferState> fileTransferTracker; private final ShardId shardId; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; + private Map<String, Long> bytesForTlogCkpFileToUpload; + private long fileTransferStartTime = -1; - public FileTransferTracker(ShardId shardId) { + public FileTransferTracker(ShardId shardId, RemoteTranslogTransferTracker remoteTranslogTransferTracker) { this.shardId = shardId; this.fileTransferTracker = new ConcurrentHashMap<>(); + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + } + + void recordFileTransferStartTime(long uploadStartTime) { + // Recording the start time more than once for a sync is invalid + if (fileTransferStartTime == -1) { + fileTransferStartTime = uploadStartTime; + } + } + + void recordBytesForFiles(Set<TransferFileSnapshot> toUpload) { + bytesForTlogCkpFileToUpload = new HashMap<>(); + toUpload.forEach(file -> { + try { + bytesForTlogCkpFileToUpload.put(file.getName(), file.getContentLength()); + } catch (IOException ignored) { + bytesForTlogCkpFileToUpload.put(file.getName(), 0L); + } + }); + } + + long getTotalBytesToUpload() { + return bytesForTlogCkpFileToUpload.values().stream().reduce(0L, Long::sum); } @Override public void onSuccess(TransferFileSnapshot fileSnapshot) { + long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; + remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); + remoteTranslogTransferTracker.addUploadBytesSucceeded(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); add(fileSnapshot.getName(), TransferState.SUCCESS); } @@ -53,6 +86,9 @@ private void add(String file, TransferState targetState) { @Override public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; + remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); + remoteTranslogTransferTracker.addUploadBytesFailed(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); add(fileSnapshot.getName(), TransferState.FAILED); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index a240fd38cda11..cfe833dde87eb 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -8,10 +8,10 @@ package org.opensearch.index.translog.transfer; -import org.opensearch.action.ActionListener; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import java.io.IOException; @@ -125,8 +125,14 @@ void uploadBlobs( */ InputStream downloadBlob(Iterable<String> path, String fileName) throws IOException; - void listAllInSortedOrder(Iterable<String> path, int limit, ActionListener<List<BlobMetadata>> listener); + void listAllInSortedOrder(Iterable<String> path, String filenamePrefix, int limit, ActionListener<List<BlobMetadata>> listener); - void listAllInSortedOrderAsync(String threadpoolName, Iterable<String> path, int limit, ActionListener<List<BlobMetadata>> listener); + void listAllInSortedOrderAsync( + String threadpoolName, + Iterable<String> path, + String filenamePrefix, + int limit, + ActionListener<List<BlobMetadata>> listener + ); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java index b4c1c97f04a7d..ef34fd31a296b 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferSnapshot.java @@ -8,8 +8,8 @@ package org.opensearch.index.translog.transfer; -import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; +import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; import java.util.Set; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 10dec13c81e1a..fb78731246a07 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -40,11 +40,14 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Clo private final long primaryTerm; private long minTranslogGeneration; - TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + private String nodeId; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size, String nodeId) { translogCheckpointFileInfoTupleSet = new HashSet<>(size); this.size = size; this.generation = generation; this.primaryTerm = primaryTerm; + this.nodeId = nodeId; } private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { @@ -63,7 +66,13 @@ public Set<TransferFileSnapshot> getTranslogFileSnapshots() { @Override public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + return new TranslogTransferMetadata( + primaryTerm, + generation, + minTranslogGeneration, + translogCheckpointFileInfoTupleSet.size() * 2, + nodeId + ); } @Override @@ -110,19 +119,22 @@ public static class Builder { private final List<TranslogReader> readers; private final Function<Long, String> checkpointGenFileNameMapper; private final Path location; + private final String nodeId; public Builder( long primaryTerm, long generation, Path location, List<TranslogReader> readers, - Function<Long, String> checkpointGenFileNameMapper + Function<Long, String> checkpointGenFileNameMapper, + String nodeId ) { this.primaryTerm = primaryTerm; this.generation = generation; this.readers = readers; this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; this.location = location; + this.nodeId = nodeId; } public TranslogCheckpointTransferSnapshot build() throws IOException { @@ -134,7 +146,8 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( primaryTerm, generation, - readers.size() + readers.size(), + nodeId ); for (TranslogReader reader : readers) { final long readerGeneration = reader.getGeneration(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index e2bb5f74df234..2f6055df87804 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.OutputStreamIndexOutput; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobMetadata; @@ -22,8 +21,11 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; import org.opensearch.threadpool.ThreadPool; @@ -40,7 +42,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -59,9 +60,12 @@ public class TranslogTransferManager { private final BlobPath remoteMetadataTransferPath; private final BlobPath remoteBaseTransferPath; private final FileTransferTracker fileTransferTracker; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + private static final int METADATA_FILES_TO_FETCH = 10; + private final Logger logger; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; @@ -76,7 +80,8 @@ public TranslogTransferManager( ShardId shardId, TransferService transferService, BlobPath remoteBaseTransferPath, - FileTransferTracker fileTransferTracker + FileTransferTracker fileTransferTracker, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { this.shardId = shardId; this.transferService = transferService; @@ -85,6 +90,11 @@ public TranslogTransferManager( this.remoteMetadataTransferPath = remoteBaseTransferPath.add(METADATA_DIR); this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + } + + public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { + return remoteTranslogTransferTracker; } public ShardId getShardId() { @@ -95,14 +105,22 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans throws IOException { List<Exception> exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount()); Set<TransferFileSnapshot> toUpload = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + long metadataBytesToUpload; + long metadataUploadStartTime; + long uploadStartTime; + long prevUploadBytesSucceeded = remoteTranslogTransferTracker.getUploadBytesSucceeded(); + long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); + try { toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); if (toUpload.isEmpty()) { logger.trace("Nothing to upload for transfer"); - translogTransferListener.onUploadComplete(transferSnapshot); return true; } + + fileTransferTracker.recordBytesForFiles(toUpload); + captureStatsBeforeUpload(); final CountDownLatch latch = new CountDownLatch(toUpload.size()); LatchedActionListener<TransferFileSnapshot> latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(fileTransferTracker::onSuccess, ex -> { @@ -115,7 +133,8 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans ex ); FileTransferException e = (FileTransferException) ex; - fileTransferTracker.onFailure(e.getFileSnapshot(), ex); + TransferFileSnapshot file = e.getFileSnapshot(); + fileTransferTracker.onFailure(file, ex); exceptionList.add(ex); }), latch @@ -128,37 +147,92 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans ) ); + uploadStartTime = System.nanoTime(); + // TODO: Ideally each file's upload start time should be when it is actually picked for upload + // https://github.com/opensearch-project/OpenSearch/issues/9729 + fileTransferTracker.recordFileTransferStartTime(uploadStartTime); transferService.uploadBlobs(toUpload, blobPathMap, latchedActionListener, WritePriority.HIGH); try { if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { - Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + Exception ex = new TranslogUploadFailedException( + "Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete" + ); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (InterruptedException ex) { - exceptionList.forEach(ex::addSuppressed); + Exception exception = new TranslogUploadFailedException("Failed to upload " + transferSnapshot, ex); + exceptionList.forEach(exception::addSuppressed); Thread.currentThread().interrupt(); - throw ex; + throw exception; } if (exceptionList.isEmpty()) { - transferService.uploadBlob(prepareMetadata(transferSnapshot), remoteMetadataTransferPath, WritePriority.HIGH); + TransferFileSnapshot tlogMetadata = prepareMetadata(transferSnapshot); + metadataBytesToUpload = tlogMetadata.getContentLength(); + remoteTranslogTransferTracker.addUploadBytesStarted(metadataBytesToUpload); + metadataUploadStartTime = System.nanoTime(); + try { + transferService.uploadBlob(tlogMetadata, remoteMetadataTransferPath, WritePriority.HIGH); + } catch (Exception exception) { + remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); + remoteTranslogTransferTracker.addUploadBytesFailed(metadataBytesToUpload); + // outer catch handles capturing stats on upload failure + throw new TranslogUploadFailedException("Failed to upload " + tlogMetadata.getName(), exception); + } + + remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); + remoteTranslogTransferTracker.addUploadBytesSucceeded(metadataBytesToUpload); + captureStatsOnUploadSuccess(prevUploadBytesSucceeded, prevUploadTimeInMillis); translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); + Exception ex = new TranslogUploadFailedException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (Exception ex) { logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex); + captureStatsOnUploadFailure(); translogTransferListener.onUploadFailed(transferSnapshot, ex); return false; } } + /** + * Adds relevant stats to the tracker when an upload is started + */ + private void captureStatsBeforeUpload() { + remoteTranslogTransferTracker.incrementTotalUploadsStarted(); + // TODO: Ideally each file's byte uploads started should be when it is actually picked for upload + // https://github.com/opensearch-project/OpenSearch/issues/9729 + remoteTranslogTransferTracker.addUploadBytesStarted(fileTransferTracker.getTotalBytesToUpload()); + } + + /** + * Adds relevant stats to the tracker when an upload is successfully completed + */ + private void captureStatsOnUploadSuccess(long prevUploadBytesSucceeded, long prevUploadTimeInMillis) { + remoteTranslogTransferTracker.setLastSuccessfulUploadTimestamp(System.currentTimeMillis()); + remoteTranslogTransferTracker.incrementTotalUploadsSucceeded(); + long totalUploadedBytes = remoteTranslogTransferTracker.getUploadBytesSucceeded() - prevUploadBytesSucceeded; + remoteTranslogTransferTracker.updateUploadBytesMovingAverage(totalUploadedBytes); + long uploadDurationInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis() - prevUploadTimeInMillis; + remoteTranslogTransferTracker.updateUploadTimeMovingAverage(uploadDurationInMillis); + if (uploadDurationInMillis > 0) { + remoteTranslogTransferTracker.updateUploadBytesPerSecMovingAverage((totalUploadedBytes * 1_000L) / uploadDurationInMillis); + } + } + + /** + * Adds relevant stats to the tracker when an upload has failed + */ + private void captureStatsOnUploadFailure() { + remoteTranslogTransferTracker.incrementTotalUploadsFailed(); + } + public boolean downloadTranslog(String primaryTerm, String generation, Path location) throws IOException { - logger.info( + logger.trace( "Downloading translog files with: Primary Term = {}, Generation = {}, Location = {}", primaryTerm, generation, @@ -180,9 +254,21 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th if (Files.exists(filePath)) { Files.delete(filePath); } + + boolean downloadStatus = false; + long bytesToRead = 0, downloadStartTime = System.nanoTime(); try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { + // Capture number of bytes for stats before reading + bytesToRead = inputStream.available(); Files.copy(inputStream, filePath); + downloadStatus = true; + } finally { + remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + if (downloadStatus) { + remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); + } } + // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync fileTransferTracker.add(fileName, true); } @@ -194,15 +280,33 @@ public TranslogTransferMetadata readMetadata() throws IOException { LatchedActionListener<List<BlobMetadata>> latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(blobMetadataList -> { if (blobMetadataList.isEmpty()) return; + RemoteStoreUtils.verifyNoMultipleWriters( + blobMetadataList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); String filename = blobMetadataList.get(0).name(); + boolean downloadStatus = false; + long downloadStartTime = System.nanoTime(), bytesToRead = 0; try (InputStream inputStream = transferService.downloadBlob(remoteMetadataTransferPath, filename)) { + // Capture number of bytes for stats before reading + bytesToRead = inputStream.available(); IndexInput indexInput = new ByteArrayIndexInput("metadata file", inputStream.readAllBytes()); metadataSetOnce.set(metadataStreamWrapper.readStream(indexInput)); + downloadStatus = true; } catch (IOException e) { logger.error(() -> new ParameterizedMessage("Exception while reading metadata file: {}", filename), e); exceptionSetOnce.set(e); + } finally { + remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + logger.debug("translogMetadataDownloadStatus={}", downloadStatus); + if (downloadStatus) { + remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); + } } }, e -> { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } logger.error(() -> new ParameterizedMessage("Exception while listing metadata files"), e); exceptionSetOnce.set((IOException) e); }), @@ -210,7 +314,12 @@ public TranslogTransferMetadata readMetadata() throws IOException { ); try { - transferService.listAllInSortedOrder(remoteMetadataTransferPath, 1, latchedActionListener); + transferService.listAllInSortedOrder( + remoteMetadataTransferPath, + TranslogTransferMetadata.METADATA_PREFIX, + METADATA_FILES_TO_FETCH, + latchedActionListener + ); latch.await(); } catch (InterruptedException e) { throw new IOException("Exception while reading/downloading metadafile", e); @@ -367,6 +476,7 @@ public void deleteStaleTranslogMetadataFilesAsync(Runnable onCompletion) { transferService.listAllInSortedOrderAsync( ThreadPool.Names.REMOTE_PURGE, remoteMetadataTransferPath, + TranslogTransferMetadata.METADATA_PREFIX, Integer.MAX_VALUE, new ActionListener<>() { @Override diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index 75d6549b23f1e..052206d807fa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog.transfer; import org.opensearch.common.SetOnce; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.remote.RemoteStoreUtils; import java.util.Arrays; @@ -30,12 +31,14 @@ public class TranslogTransferMetadata { private final long minTranslogGeneration; - private int count; + private final int count; private final SetOnce<Map<String, String>> generationToPrimaryTermMapper = new SetOnce<>(); public static final String METADATA_SEPARATOR = "__"; + public static final String METADATA_PREFIX = "metadata"; + static final int BUFFER_SIZE = 4096; static final int CURRENT_VERSION = 1; @@ -44,12 +47,22 @@ public class TranslogTransferMetadata { private final long createdAt; - public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + private final String nodeId; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count, String nodeId) { this.primaryTerm = primaryTerm; this.generation = generation; this.minTranslogGeneration = minTranslogGeneration; this.count = count; this.createdAt = System.currentTimeMillis(); + this.nodeId = nodeId; + } + + /* + Used only at the time of download . Since details are read from content , nodeId is not available + */ + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this(primaryTerm, generation, minTranslogGeneration, count, ""); } public long getPrimaryTerm() { @@ -83,14 +96,37 @@ public String getFileName() { return String.join( METADATA_SEPARATOR, Arrays.asList( + METADATA_PREFIX, RemoteStoreUtils.invertLong(primaryTerm), RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(createdAt), + String.valueOf(Objects.hash(nodeId)), String.valueOf(CURRENT_VERSION) ) ); } + public static Tuple<Tuple<Long, Long>, String> getNodeIdByPrimaryTermAndGeneration(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id + return null; + } + return new Tuple<>(new Tuple<>(RemoteStoreUtils.invertLong(tokens[1]), RemoteStoreUtils.invertLong(tokens[2])), tokens[4]); + } + + public static Tuple<String, String> getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(METADATA_SEPARATOR, tokens[1], tokens[2]); + + String nodeId = tokens[4]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + @Override public int hashCode() { return Objects.hash(primaryTerm, generation); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java new file mode 100644 index 0000000000000..4a9b10ec5a52e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import java.io.IOException; + +/** + * Exception is thrown if there are any exceptions while uploading translog to remote store. + * @opensearch.internal + */ +public class TranslogUploadFailedException extends IOException { + + public TranslogUploadFailedException(String message) { + super(message); + } + + public TranslogUploadFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java index c09fd8798e505..132d1adf916da 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java @@ -18,7 +18,6 @@ * @opensearch.internal */ public interface TranslogTransferListener { - /** * Invoked when the transfer of {@link TransferSnapshot} succeeds * @param transferSnapshot the transfer snapshot diff --git a/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java b/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java index ff8f31297d47d..26f29f17b86c0 100644 --- a/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java +++ b/server/src/main/java/org/opensearch/index/warmer/ShardIndexWarmerService.java @@ -33,19 +33,21 @@ package org.opensearch.index.warmer; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.AbstractIndexShardComponent; -import org.opensearch.core.index.shard.ShardId; import java.util.concurrent.TimeUnit; /** * Warms the index into the cache * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardIndexWarmerService extends AbstractIndexShardComponent { private final CounterMetric current = new CounterMetric(); diff --git a/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java b/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java index a0fd32b43796b..789baaea20d04 100644 --- a/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java +++ b/server/src/main/java/org/opensearch/index/warmer/WarmerStats.java @@ -32,10 +32,11 @@ package org.opensearch.index.warmer; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,8 +45,9 @@ /** * Stats collected about the warmer * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WarmerStats implements Writeable, ToXContentFragment { private long current; diff --git a/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java b/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java index 19fcbdd7b0bc7..bb1201cb910a9 100644 --- a/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java +++ b/server/src/main/java/org/opensearch/indices/AbstractIndexShardCacheEntity.java @@ -32,9 +32,9 @@ package org.opensearch.indices; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.shard.IndexShard; diff --git a/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java b/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java index 9a87f1c26fa29..93b75218fd1c6 100644 --- a/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/opensearch/indices/IndexingMemoryController.java @@ -39,15 +39,15 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.threadpool.Scheduler.Cancellable; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 9d2eef5f67a86..b86e98f4ebcbc 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -37,9 +37,8 @@ import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.resync.TransportResyncReplicationAction; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.core.ParseField; import org.opensearch.common.inject.AbstractModule; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -60,6 +59,7 @@ import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; @@ -70,17 +70,18 @@ import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseBackgroundSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; -import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.store.IndicesStore; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch; import org.opensearch.plugins.MapperPlugin; import java.util.ArrayList; @@ -159,6 +160,7 @@ public static Map<String, Mapper.TypeParser> getMappers(List<MapperPlugin> mappe mappers.put(nanoseconds.type(), DateFieldMapper.NANOS_PARSER); mappers.put(IpFieldMapper.CONTENT_TYPE, IpFieldMapper.PARSER); mappers.put(TextFieldMapper.CONTENT_TYPE, TextFieldMapper.PARSER); + mappers.put(MatchOnlyTextFieldMapper.CONTENT_TYPE, MatchOnlyTextFieldMapper.PARSER); mappers.put(KeywordFieldMapper.CONTENT_TYPE, KeywordFieldMapper.PARSER); mappers.put(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser()); mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser()); @@ -280,6 +282,7 @@ protected void configure() { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetadata.class).asEagerSingleton(); + bind(TransportNodesListShardStoreMetadataBatch.class).asEagerSingleton(); bind(GlobalCheckpointSyncAction.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton(); bind(PrimaryReplicaSyncer.class).asEagerSingleton(); @@ -288,9 +291,7 @@ protected void configure() { bind(RetentionLeaseSyncer.class).asEagerSingleton(); bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); bind(SegmentReplicationPressureService.class).asEagerSingleton(); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - bind(RemoteRefreshSegmentPressureService.class).asEagerSingleton(); - } + bind(RemoteStorePressureService.class).asEagerSingleton(); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java b/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java index 6ebed17437074..52ed311a1eb92 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesQueryCache.java @@ -44,13 +44,14 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.ShardCoreKeyMap; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.cache.query.QueryCacheStats; import java.io.Closeable; import java.io.IOException; @@ -65,8 +66,9 @@ /** * The query cache for indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesQueryCache implements QueryCache, Closeable { private static final Logger logger = LogManager.getLogger(IndicesQueryCache.class); @@ -186,6 +188,12 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return in.bulkScorer(context); } + @Override + public int count(LeafReaderContext context) throws IOException { + shardKeyMap.add(context.reader()); + return in.count(context); + } + @Override public boolean isCacheable(LeafReaderContext ctx) { return in.isCacheable(ctx); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index c06880db42587..92fb278c946f1 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -39,19 +39,26 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.common.CheckedSupplier; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.cache.Cache; -import org.opensearch.common.cache.CacheBuilder; -import org.opensearch.common.cache.CacheLoader; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.service.CacheService; +import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import java.io.Closeable; import java.io.IOException; @@ -60,8 +67,11 @@ import java.util.HashSet; import java.util.Iterator; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; +import java.util.function.ToLongBiFunction; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -103,24 +113,32 @@ public final class IndicesRequestCache implements RemovalListener<IndicesRequest Property.NodeScope ); + private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); + private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; private final TimeValue expire; - private final Cache<Key, BytesReference> cache; + private final ICache<Key, BytesReference> cache; + private final Function<ShardId, Optional<CacheEntity>> cacheEntityLookup; - IndicesRequestCache(Settings settings) { + IndicesRequestCache(Settings settings, Function<ShardId, Optional<CacheEntity>> cacheEntityFunction, CacheService cacheService) { this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); - CacheBuilder<Key, BytesReference> cacheBuilder = CacheBuilder.<Key, BytesReference>builder() - .setMaximumWeight(sizeInBytes) - .weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()) - .removalListener(this); - if (expire != null) { - cacheBuilder.setExpireAfterAccess(expire); - } - cache = cacheBuilder.build(); + ToLongBiFunction<Key, BytesReference> weigher = (k, v) -> k.ramBytesUsed() + v.ramBytesUsed(); + this.cacheEntityLookup = cacheEntityFunction; + this.cache = cacheService.createCache( + new CacheConfig.Builder<Key, BytesReference>().setSettings(settings) + .setWeigher(weigher) + .setValueType(BytesReference.class) + .setKeyType(Key.class) + .setRemovalListener(this) + .setMaxSizeInBytes(sizeInBytes) // for backward compatibility + .setExpireAfterAccess(expire) // for backward compatibility + .build(), + CacheType.INDICES_REQUEST_CACHE + ); } @Override @@ -135,23 +153,31 @@ void clear(CacheEntity entity) { @Override public void onRemoval(RemovalNotification<Key, BytesReference> notification) { - notification.getKey().entity.onRemoval(notification); + // In case this event happens for an old shard, we can safely ignore this as we don't keep track for old + // shards as part of request cache. + cacheEntityLookup.apply(notification.getKey().shardId).ifPresent(entity -> entity.onRemoval(notification)); } BytesReference getOrCompute( - CacheEntity cacheEntity, + IndicesService.IndexShardCacheEntity cacheEntity, CheckedSupplier<BytesReference, IOException> loader, DirectoryReader reader, BytesReference cacheKey ) throws Exception { assert reader.getReaderCacheHelper() != null; - final Key key = new Key(cacheEntity, reader.getReaderCacheHelper().getKey(), cacheKey); + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + assert readerCacheKeyId != null; + final Key key = new Key(((IndexShard) cacheEntity.getCacheIdentity()).shardId(), cacheKey, readerCacheKeyId); Loader cacheLoader = new Loader(cacheEntity, loader); BytesReference value = cache.computeIfAbsent(key, cacheLoader); if (cacheLoader.isLoaded()) { - key.entity.onMiss(); + cacheEntity.onMiss(); // see if its the first time we see this reader, and make sure to register a cleanup key - CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getReaderCacheHelper().getKey()); + CleanupKey cleanupKey = new CleanupKey(cacheEntity, readerCacheKeyId); if (!registeredClosedListeners.containsKey(cleanupKey)) { Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); if (previous == null) { @@ -159,7 +185,7 @@ BytesReference getOrCompute( } } } else { - key.entity.onHit(); + cacheEntity.onHit(); } return value; } @@ -170,9 +196,14 @@ BytesReference getOrCompute( * @param reader the reader to invalidate the cache entry for * @param cacheKey the cache key to invalidate */ - void invalidate(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { + void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { assert reader.getReaderCacheHelper() != null; - cache.invalidate(new Key(cacheEntity, reader.getReaderCacheHelper().getKey(), cacheKey)); + String readerCacheKeyId = null; + if (reader instanceof OpenSearchDirectoryReader) { + IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); + readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); + } + cache.invalidate(new Key(((IndexShard) cacheEntity.getCacheIdentity()).shardId(), cacheKey, readerCacheKeyId)); } /** @@ -180,7 +211,7 @@ void invalidate(CacheEntity cacheEntity, DirectoryReader reader, BytesReference * * @opensearch.internal */ - private static class Loader implements CacheLoader<Key, BytesReference> { + private static class Loader implements LoadAwareCacheLoader<Key, BytesReference> { private final CacheEntity entity; private final CheckedSupplier<BytesReference, IOException> loader; @@ -240,6 +271,7 @@ interface CacheEntity extends Accountable { * Called when this entity instance is removed */ void onRemoval(RemovalNotification<Key, BytesReference> notification); + } /** @@ -247,22 +279,26 @@ interface CacheEntity extends Accountable { * * @opensearch.internal */ - static class Key implements Accountable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); - - public final CacheEntity entity; // use as identity equality - public final IndexReader.CacheKey readerCacheKey; + static class Key implements Accountable, Writeable { + public final ShardId shardId; // use as identity equality + public final String readerCacheKeyId; public final BytesReference value; - Key(CacheEntity entity, IndexReader.CacheKey readerCacheKey, BytesReference value) { - this.entity = entity; - this.readerCacheKey = Objects.requireNonNull(readerCacheKey); + Key(ShardId shardId, BytesReference value, String readerCacheKeyId) { + this.shardId = shardId; this.value = value; + this.readerCacheKeyId = Objects.requireNonNull(readerCacheKeyId); + } + + Key(StreamInput in) throws IOException { + this.shardId = in.readOptionalWriteable(ShardId::new); + this.readerCacheKeyId = in.readOptionalString(); + this.value = in.readBytesReference(); } @Override public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + entity.ramBytesUsed() + value.length(); + return BASE_RAM_BYTES_USED + shardId.getBaseRamBytesUsed() + value.length(); } @Override @@ -276,28 +312,35 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Key key = (Key) o; - if (Objects.equals(readerCacheKey, key.readerCacheKey) == false) return false; - if (!entity.getCacheIdentity().equals(key.entity.getCacheIdentity())) return false; + if (!Objects.equals(readerCacheKeyId, key.readerCacheKeyId)) return false; + if (!shardId.equals(key.shardId)) return false; if (!value.equals(key.value)) return false; return true; } @Override public int hashCode() { - int result = entity.getCacheIdentity().hashCode(); - result = 31 * result + readerCacheKey.hashCode(); + int result = shardId.hashCode(); + result = 31 * result + readerCacheKeyId.hashCode(); result = 31 * result + value.hashCode(); return result; } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(shardId); + out.writeOptionalString(readerCacheKeyId); + out.writeBytesReference(value); + } } private class CleanupKey implements IndexReader.ClosedListener { final CacheEntity entity; - final IndexReader.CacheKey readerCacheKey; + final String readerCacheKeyId; - private CleanupKey(CacheEntity entity, IndexReader.CacheKey readerCacheKey) { + private CleanupKey(CacheEntity entity, String readerCacheKeyId) { this.entity = entity; - this.readerCacheKey = readerCacheKey; + this.readerCacheKeyId = readerCacheKeyId; } @Override @@ -315,7 +358,7 @@ public boolean equals(Object o) { return false; } CleanupKey that = (CleanupKey) o; - if (Objects.equals(readerCacheKey, that.readerCacheKey) == false) return false; + if (!Objects.equals(readerCacheKeyId, that.readerCacheKeyId)) return false; if (!entity.getCacheIdentity().equals(that.entity.getCacheIdentity())) return false; return true; } @@ -323,11 +366,14 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = entity.getCacheIdentity().hashCode(); - result = 31 * result + Objects.hashCode(readerCacheKey); + result = 31 * result + Objects.hashCode(readerCacheKeyId); return result; } } + /** + * Logic to clean up in-memory cache. + */ synchronized void cleanCache() { final Set<CleanupKey> currentKeysToClean = new HashSet<>(); final Set<Object> currentFullClean = new HashSet<>(); @@ -336,9 +382,9 @@ synchronized void cleanCache() { for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext();) { CleanupKey cleanupKey = iterator.next(); iterator.remove(); - if (cleanupKey.readerCacheKey == null || cleanupKey.entity.isOpen() == false) { + if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { // null indicates full cleanup, as does a closed shard - currentFullClean.add(cleanupKey.entity.getCacheIdentity()); + currentFullClean.add(((IndexShard) cleanupKey.entity.getCacheIdentity()).shardId()); } else { currentKeysToClean.add(cleanupKey); } @@ -346,23 +392,25 @@ synchronized void cleanCache() { if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { for (Iterator<Key> iterator = cache.keys().iterator(); iterator.hasNext();) { Key key = iterator.next(); - if (currentFullClean.contains(key.entity.getCacheIdentity())) { + if (currentFullClean.contains(key.shardId)) { iterator.remove(); } else { - if (currentKeysToClean.contains(new CleanupKey(key.entity, key.readerCacheKey))) { + // If the flow comes here, then we should have a open shard available on node. + if (currentKeysToClean.contains( + new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) + )) { iterator.remove(); } } } } - cache.refresh(); } /** * Returns the current size of the cache */ - int count() { + long count() { return cache.count(); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b574ffd1006c0..8151c151e3968 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -47,6 +47,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -60,34 +61,38 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.iterable.Iterables; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.NodeEnvironment; @@ -95,7 +100,6 @@ import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.gateway.MetaStateService; import org.opensearch.gateway.MetadataStateFormat; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; @@ -120,7 +124,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -131,18 +135,17 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.index.translog.TranslogStats; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; @@ -193,8 +196,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.common.collect.MapBuilder.newMapBuilder; -import static org.opensearch.common.util.CollectionUtils.arrayAsArrayList; import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; import static org.opensearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.opensearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; import static org.opensearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; @@ -203,8 +206,9 @@ /** * Main OpenSearch indices service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesService extends AbstractLifecycleComponent implements IndicesClusterStateService.AllocatedIndices<IndexShard, IndexService>, @@ -244,31 +248,68 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * Used to specify if all indexes are to create with remote store enabled by default + * Used to specify the default translog buffer interval for remote store backed indexes. */ - public static final Setting<Boolean> CLUSTER_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( - "cluster.remote_store.enabled", - false, + public static final Setting<TimeValue> CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, Property.NodeScope, - Property.Final + Property.Dynamic + ); + + /** + * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not + * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. + * This comes handy when the user wants to set a default refresh interval across all indexes created in a cluster + * which is different from 1s and also at the same time have searchIdle feature supported. The setting can only be + * as low as the {@code cluster.minimum.index.refresh_interval}. + */ + public static final Setting<TimeValue> CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "cluster.default.index.refresh_interval", + IndexSettings.DEFAULT_REFRESH_INTERVAL, + IndexSettings.MINIMUM_REFRESH_INTERVAL, + new ClusterDefaultRefreshIntervalValidator(), + Property.NodeScope, + Property.Dynamic + ); + + /** + * This setting is used to set the minimum refresh interval applicable for all indexes in a cluster. The + * {@code cluster.default.index.refresh_interval} setting value needs to be higher than this setting's value. Index + * creation will fail if the index setting {@code index.refresh_interval} is supplied with a value lower than the + * cluster minimum refresh interval. + */ + public static final Setting<TimeValue> CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "cluster.minimum.index.refresh_interval", + IndexSettings.MINIMUM_REFRESH_INTERVAL, + IndexSettings.MINIMUM_REFRESH_INTERVAL, + new ClusterMinimumRefreshIntervalValidator(), + Property.NodeScope, + Property.Dynamic ); /** - * Used to specify default repo to use for segment upload for remote store backed indices + * This setting is used to restrict creation or updation of index where the `index.translog.durability` index setting + * is set as ASYNC if enabled. If disabled, any of the durability mode can be used and switched at any later time from + * one to another. */ - public static final Setting<String> CLUSTER_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.segment.repository", - "", + public static final Setting<Boolean> CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING = Setting.boolSetting( + "cluster.remote_store.index.restrict.async-durability", + false, Property.NodeScope, Property.Final ); /** - * Used to specify default repo to use for translog upload for remote store backed indices + * If enabled, this setting enforces that indexes will be created with a replication type matching the cluster setting + * defined in cluster.indices.replication.strategy by rejecting any request that specifies a replication type that + * does not match the cluster setting. If disabled, a user can choose a replication type on a per-index basis using + * the index.replication.type setting. */ - public static final Setting<String> CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.translog.repository", - "", + public static final Setting<Boolean> CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING = Setting.boolSetting( + "cluster.index.restrict.replication.type", + false, Property.NodeScope, Property.Final ); @@ -310,6 +351,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CountDownLatch closeLatch = new CountDownLatch(1); private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; + private final RecoverySettings recoverySettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; @@ -318,8 +360,10 @@ public class IndicesService extends AbstractLifecycleComponent private final ValuesSourceRegistry valuesSourceRegistry; private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier; + private volatile TimeValue clusterDefaultRefreshInterval; + private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final FileCacheCleaner fileCacheCleaner; + private final SearchRequestStats searchRequestStats; @Override protected void doStart() { @@ -350,7 +394,10 @@ public IndicesService( Map<String, IndexStorePlugin.RecoveryStateFactory> recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier<RepositoriesService> repositoriesServiceSupplier, - FileCacheCleaner fileCacheCleaner + SearchRequestStats searchRequestStats, + @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + RecoverySettings recoverySettings, + CacheService cacheService ) { this.settings = settings; this.threadPool = threadPool; @@ -361,7 +408,13 @@ public IndicesService( this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); this.analysisRegistry = analysisRegistry; this.indexNameExpressionResolver = indexNameExpressionResolver; - this.indicesRequestCache = new IndicesRequestCache(settings); + this.indicesRequestCache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = this.indices.get(shardId.getIndex().getUUID()); + if (indexService == null) { + return Optional.empty(); + } + return Optional.of(new IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), cacheService); this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; this.namedWriteableRegistry = namedWriteableRegistry; @@ -396,7 +449,6 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; - this.fileCacheCleaner = fileCacheCleaner; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -439,19 +491,45 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; - this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool); + this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.searchRequestStats = searchRequestStats; + this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); + this.recoverySettings = recoverySettings; + } + + /** + * The changes to dynamic cluster setting {@code cluster.default.index.refresh_interval} needs to be updated. This + * method gets called whenever the setting changes. We set the instance variable with the updated value as this is + * also a supplier to all IndexService that have been created on the node. We also notify the change to all + * IndexService instances that are created on this node. + * + * @param clusterDefaultRefreshInterval the updated cluster default refresh interval. + */ + private void onRefreshIntervalUpdate(TimeValue clusterDefaultRefreshInterval) { + this.clusterDefaultRefreshInterval = clusterDefaultRefreshInterval; + for (Map.Entry<String, IndexService> entry : indices.entrySet()) { + IndexService indexService = entry.getValue(); + indexService.onRefreshIntervalChange(); + } } private static BiFunction<IndexSettings, ShardRouting, TranslogFactory> getTranslogFactorySupplier( Supplier<RepositoriesService> repositoriesServiceSupplier, - ThreadPool threadPool + ThreadPool threadPool, + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { return (indexSettings, shardRouting) -> { if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceSupplier, threadPool, - indexSettings.getRemoteStoreTranslogRepository() + indexSettings.getRemoteStoreTranslogRepository(), + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) ); } return new InternalTranslogFactory(); @@ -539,7 +617,7 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { } } - return new NodeIndicesStats(commonStats, statsByShard(this, flags)); + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); } Map<Index, List<IndexShardStats>> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { @@ -686,7 +764,6 @@ public void onStoreClosed(ShardId shardId) { }; finalListeners.add(onStoreClose); finalListeners.add(oldShardsStats); - finalListeners.add(fileCacheCleaner); final IndexService indexService = createIndexService( CREATE_INDEX, indexMetadata, @@ -817,7 +894,10 @@ private synchronized IndexService createIndexService( this::isIdFieldDataEnabled, valuesSourceRegistry, remoteDirectoryFactory, - translogFactorySupplier + translogFactorySupplier, + this::getClusterDefaultRefreshInterval, + this::getClusterRemoteTranslogBufferInterval, + this.recoverySettings ); } @@ -864,7 +944,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { /** * creates a new mapper service for the given index, in order to do administrative work like mapping updates. * This *should not* be used for document parsing. Doing so will result in an exception. - * + * <p> * Note: the returned {@link MapperService} should be closed when unneeded. */ public synchronized MapperService createIndexMapperService(IndexMetadata indexMetadata) throws IOException { @@ -928,7 +1008,7 @@ public IndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -940,7 +1020,7 @@ public IndexShard createShard( globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher, - remoteRefreshSegmentPressureService + remoteStoreStatsTrackerFactory ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { @@ -950,7 +1030,7 @@ public IndexShard createShard( .indices() .preparePutMapping() .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid - .setSource(mapping.source().string(), XContentType.JSON) + .setSource(mapping.source().string(), MediaTypeRegistry.JSON) .get(); }, this); return indexShard; @@ -1002,6 +1082,15 @@ public IndicesQueryCache getIndicesQueryCache() { return indicesQueryCache; } + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void addDocStatusStats(final DocStatusStats stats) { + oldShardsStats.indexingStats.getTotal().getDocStatusStats().add(stats); + } + /** * Statistics for old shards * @@ -1067,7 +1156,7 @@ public void deleteUnassignedIndex(String reason, IndexMetadata metadata, Cluster /** * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. - * + * <p> * Package private for testing */ void deleteIndexStore(String reason, IndexMetadata metadata) throws IOException { @@ -1148,7 +1237,7 @@ public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexS * This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting * is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)} * of if the shards lock can not be acquired. - * + * <p> * On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove * the index folder as well. * @@ -1249,7 +1338,10 @@ public IndexMetadata verifyIndexIsDeleted(final Index index, final ClusterState /** * result type returned by {@link #canDeleteShardContent signaling different reasons why a shard can / cannot be deleted} + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum ShardDeletionCheckResult { FOLDER_FOUND_CAN_DELETE, // shard data exists and can be deleted STILL_ALLOCATED, // the shard is still allocated / active on this node @@ -1655,7 +1747,6 @@ private BytesReference cacheShardLevelResult( BytesReference cacheKey, CheckedConsumer<StreamOutput, IOException> loader ) throws Exception { - IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard); CheckedSupplier<BytesReference, IOException> supplier = () -> { /* BytesStreamOutput allows to pass the expected size but by default uses * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. @@ -1672,7 +1763,7 @@ private BytesReference cacheShardLevelResult( return out.bytes(); } }; - return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey); + return indicesRequestCache.getOrCompute(new IndexShardCacheEntity(shard), supplier, reader, cacheKey); } /** @@ -1680,11 +1771,12 @@ private BytesReference cacheShardLevelResult( * * @opensearch.internal */ - static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + public static class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class); private final IndexShard indexShard; - protected IndexShardCacheEntity(IndexShard indexShard) { + public IndexShardCacheEntity(IndexShard indexShard) { this.indexShard = indexShard; } @@ -1727,7 +1819,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set<String CheckedFunction<BytesReference, QueryBuilder, IOException> filterParser = bytes -> { try ( InputStream inputStream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream) + XContentParser parser = MediaTypeRegistry.xContentType(inputStream) .xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, inputStream) ) { @@ -1859,4 +1951,85 @@ public boolean allPendingDanglingIndicesWritten() { return nodeWriteDanglingIndicesInfo == false || (danglingIndicesToWrite.isEmpty() && danglingIndicesThreadPoolExecutor.getActiveCount() == 0); } + + /** + * Validates the cluster default index refresh interval. + * + * @opensearch.internal + */ + private static final class ClusterDefaultRefreshIntervalValidator implements Setting.Validator<TimeValue> { + + @Override + public void validate(TimeValue value) { + + } + + @Override + public void validate(final TimeValue defaultRefreshInterval, final Map<Setting<?>, Object> settings) { + final TimeValue minimumRefreshInterval = (TimeValue) settings.get(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + validateRefreshIntervalSettings(minimumRefreshInterval, defaultRefreshInterval); + } + + @Override + public Iterator<Setting<?>> settings() { + final List<Setting<?>> settings = List.of(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + return settings.iterator(); + } + } + + /** + * Validates the cluster minimum index refresh interval. + * + * @opensearch.internal + */ + private static final class ClusterMinimumRefreshIntervalValidator implements Setting.Validator<TimeValue> { + + @Override + public void validate(TimeValue value) { + + } + + @Override + public void validate(final TimeValue minimumRefreshInterval, final Map<Setting<?>, Object> settings) { + final TimeValue defaultRefreshInterval = (TimeValue) settings.get(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING); + validateRefreshIntervalSettings(minimumRefreshInterval, defaultRefreshInterval); + } + + @Override + public Iterator<Setting<?>> settings() { + final List<Setting<?>> settings = List.of(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING); + return settings.iterator(); + } + } + + /** + * Validates that the cluster minimum refresh interval is not more than the cluster default refresh interval. + * + * @param minimumRefreshInterval value of cluster minimum index refresh interval setting + * @param defaultRefreshInterval value of cluster default index refresh interval setting + */ + private static void validateRefreshIntervalSettings(TimeValue minimumRefreshInterval, TimeValue defaultRefreshInterval) { + if (minimumRefreshInterval.compareTo(defaultRefreshInterval) > 0) { + throw new IllegalArgumentException( + "cluster minimum index refresh interval [" + + minimumRefreshInterval + + "] more than cluster default index refresh interval [" + + defaultRefreshInterval + + "]" + ); + } + } + + private TimeValue getClusterDefaultRefreshInterval() { + return this.clusterDefaultRefreshInterval; + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } } diff --git a/server/src/main/java/org/opensearch/indices/InvalidTypeNameException.java b/server/src/main/java/org/opensearch/indices/InvalidTypeNameException.java index 48c9595e789bd..2cb1ce74d857d 100644 --- a/server/src/main/java/org/opensearch/indices/InvalidTypeNameException.java +++ b/server/src/main/java/org/opensearch/indices/InvalidTypeNameException.java @@ -33,8 +33,8 @@ package org.opensearch.indices; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.mapper.MapperException; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.mapper.MapperException; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java index 482cd07543051..35b6fd395ee12 100644 --- a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java @@ -35,13 +35,15 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.engine.SegmentsStats; @@ -68,10 +70,10 @@ /** * Global information on indices stats running on a specific node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NodeIndicesStats implements Writeable, ToXContentFragment { - private CommonStats stats; private Map<Index, List<IndexShardStats>> statsByShard; @@ -92,7 +94,7 @@ public NodeIndicesStats(StreamInput in) throws IOException { } } - public NodeIndicesStats(CommonStats oldStats, Map<Index, List<IndexShardStats>> statsByShard) { + public NodeIndicesStats(CommonStats oldStats, Map<Index, List<IndexShardStats>> statsByShard, SearchRequestStats searchRequestStats) { // this.stats = stats; this.statsByShard = statsByShard; @@ -105,6 +107,9 @@ public NodeIndicesStats(CommonStats oldStats, Map<Index, List<IndexShardStats>> } } } + if (this.stats.search != null) { + this.stats.search.setSearchRequestStats(searchRequestStats); + } } @Nullable diff --git a/server/src/main/java/org/opensearch/indices/RunUnderPrimaryPermit.java b/server/src/main/java/org/opensearch/indices/RunUnderPrimaryPermit.java index b7995accda1da..683bb51f64c1d 100644 --- a/server/src/main/java/org/opensearch/indices/RunUnderPrimaryPermit.java +++ b/server/src/main/java/org/opensearch/indices/RunUnderPrimaryPermit.java @@ -9,10 +9,10 @@ package org.opensearch.indices; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.FutureUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardRelocatedException; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index c86137177e750..e345b613eebbd 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -42,11 +42,11 @@ import org.opensearch.core.index.Index; import java.util.Arrays; -import java.util.Optional; -import java.util.Map; -import java.util.List; import java.util.Collections; import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; @@ -54,7 +54,7 @@ /** * This class contains the logic used to check the cluster-wide shard limit before shards are created and ensuring that the limit is * updated correctly on setting updates, etc. - * + * <p> * NOTE: This is the limit applied at *shard creation time*. If you are looking for the limit applied at *allocation* time, which is * controlled by a different setting, * see {@link org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider}. diff --git a/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java b/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java index ade27a7ccc9f7..027cd502da1fb 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/opensearch/indices/analysis/HunspellService.java @@ -38,11 +38,11 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.NIOFSDirectory; import org.opensearch.OpenSearchException; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java index 601bd79a24746..13cc78b620b9a 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java @@ -48,11 +48,11 @@ public class PreBuiltCacheFactory { /** * The strategy of caching the analyzer - * - * ONE Exactly one version is stored. Useful for analyzers which do not store version information - * LUCENE Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version - * OPENSEARCH Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch - * releases, when the lucene version does not change + * <ul> + * <li>ONE : Exactly one version is stored. Useful for analyzers which do not store version information</li> + * <li>LUCENE : Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version</li> + * <li>OPENSEARCH : Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch releases, when the lucene version does not change</li> + * </ul> */ public enum CachingStrategy { ONE, diff --git a/server/src/main/java/org/opensearch/indices/breaker/AllCircuitBreakerStats.java b/server/src/main/java/org/opensearch/indices/breaker/AllCircuitBreakerStats.java deleted file mode 100644 index 83f3f9532948f..0000000000000 --- a/server/src/main/java/org/opensearch/indices/breaker/AllCircuitBreakerStats.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.breaker; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; - -import java.io.IOException; - -/** - * Stats class encapsulating all of the different circuit breaker stats - * - * @opensearch.internal - */ -public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { - - private final CircuitBreakerStats[] allStats; - - public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) { - this.allStats = allStats; - } - - public AllCircuitBreakerStats(StreamInput in) throws IOException { - allStats = in.readArray(CircuitBreakerStats::new, CircuitBreakerStats[]::new); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeArray(allStats); - } - - public CircuitBreakerStats[] getAllStats() { - return this.allStats; - } - - public CircuitBreakerStats getStats(String name) { - for (CircuitBreakerStats stats : allStats) { - if (stats.getName().equals(name)) { - return stats; - } - } - return null; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.BREAKERS); - for (CircuitBreakerStats stats : allStats) { - if (stats != null) { - stats.toXContent(builder, params); - } - } - builder.endObject(); - return builder; - } - - /** - * Fields used for parsing and toXContent - * - * @opensearch.internal - */ - static final class Fields { - static final String BREAKERS = "breakers"; - } -} diff --git a/server/src/main/java/org/opensearch/indices/breaker/BreakerSettings.java b/server/src/main/java/org/opensearch/indices/breaker/BreakerSettings.java index a421253227dbf..e1ed346bfcb17 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/BreakerSettings.java +++ b/server/src/main/java/org/opensearch/indices/breaker/BreakerSettings.java @@ -32,10 +32,10 @@ package org.opensearch.indices.breaker; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.unit.ByteSizeValue; /** * Settings for a {@link CircuitBreaker} diff --git a/server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerStats.java b/server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerStats.java deleted file mode 100644 index 94e63acd10648..0000000000000 --- a/server/src/main/java/org/opensearch/indices/breaker/CircuitBreakerStats.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.breaker; - -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Locale; - -/** - * Class encapsulating stats about the circuit breaker - * - * @opensearch.internal - */ -public class CircuitBreakerStats implements Writeable, ToXContentObject { - - private final String name; - private final long limit; - private final long estimated; - private final long trippedCount; - private final double overhead; - - public CircuitBreakerStats(String name, long limit, long estimated, double overhead, long trippedCount) { - this.name = name; - this.limit = limit; - this.estimated = estimated; - this.trippedCount = trippedCount; - this.overhead = overhead; - } - - public CircuitBreakerStats(StreamInput in) throws IOException { - this.limit = in.readLong(); - this.estimated = in.readLong(); - this.overhead = in.readDouble(); - this.trippedCount = in.readLong(); - this.name = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(limit); - out.writeLong(estimated); - out.writeDouble(overhead); - out.writeLong(trippedCount); - out.writeString(name); - } - - public String getName() { - return this.name; - } - - public long getLimit() { - return this.limit; - } - - public long getEstimated() { - return this.estimated; - } - - public long getTrippedCount() { - return this.trippedCount; - } - - public double getOverhead() { - return this.overhead; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(name.toLowerCase(Locale.ROOT)); - builder.field(Fields.LIMIT, limit); - builder.field(Fields.LIMIT_HUMAN, new ByteSizeValue(limit)); - builder.field(Fields.ESTIMATED, estimated); - builder.field(Fields.ESTIMATED_HUMAN, new ByteSizeValue(estimated)); - builder.field(Fields.OVERHEAD, overhead); - builder.field(Fields.TRIPPED_COUNT, trippedCount); - builder.endObject(); - return builder; - } - - @Override - public String toString() { - return "[" - + this.name - + ",limit=" - + this.limit - + "/" - + new ByteSizeValue(this.limit) - + ",estimated=" - + this.estimated - + "/" - + new ByteSizeValue(this.estimated) - + ",overhead=" - + this.overhead - + ",tripped=" - + this.trippedCount - + "]"; - } - - /** - * Fields used for statistics - * - * @opensearch.internal - */ - static final class Fields { - static final String LIMIT = "limit_size_in_bytes"; - static final String LIMIT_HUMAN = "limit_size"; - static final String ESTIMATED = "estimated_size_in_bytes"; - static final String ESTIMATED_HUMAN = "estimated_size"; - static final String OVERHEAD = "overhead"; - static final String TRIPPED_COUNT = "tripped"; - } -} diff --git a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java index 40bb4894c7397..a5aceead1062f 100644 --- a/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -37,17 +37,20 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.Booleans; import org.opensearch.common.breaker.ChildMemoryCircuitBreaker; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.monitor.jvm.GcNames; import org.opensearch.monitor.jvm.JvmInfo; diff --git a/server/src/main/java/org/opensearch/indices/breaker/NoneCircuitBreakerService.java b/server/src/main/java/org/opensearch/indices/breaker/NoneCircuitBreakerService.java deleted file mode 100644 index bcb47b48a5f14..0000000000000 --- a/server/src/main/java/org/opensearch/indices/breaker/NoneCircuitBreakerService.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.breaker; - -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; - -/** - * Class that returns a breaker that never breaks - * - * @opensearch.internal - */ -public class NoneCircuitBreakerService extends CircuitBreakerService { - - private final CircuitBreaker breaker = new NoopCircuitBreaker(CircuitBreaker.FIELDDATA); - - public NoneCircuitBreakerService() { - super(); - } - - @Override - public CircuitBreaker getBreaker(String name) { - return breaker; - } - - @Override - public AllCircuitBreakerStats stats() { - return new AllCircuitBreakerStats(new CircuitBreakerStats[] { stats(CircuitBreaker.FIELDDATA) }); - } - - @Override - public CircuitBreakerStats stats(String name) { - return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); - } - -} diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index b3fc070d62e58..7fb8b172ae352 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; @@ -45,27 +44,29 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RecoverySource.Type; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.gateway.GatewayService; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexComponent; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -75,7 +76,6 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.PeerRecoverySourceService; @@ -149,7 +149,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final SegmentReplicationCheckpointPublisher checkpointPublisher; - private final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; @Inject public IndicesClusterStateService( @@ -170,7 +170,7 @@ public IndicesClusterStateService( final GlobalCheckpointSyncAction globalCheckpointSyncAction, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this( settings, @@ -190,7 +190,7 @@ public IndicesClusterStateService( primaryReplicaSyncer, globalCheckpointSyncAction::updateGlobalCheckpointForShard, retentionLeaseSyncer, - remoteRefreshSegmentPressureService + remoteStoreStatsTrackerFactory ); } @@ -213,7 +213,7 @@ public IndicesClusterStateService( final PrimaryReplicaSyncer primaryReplicaSyncer, final Consumer<ShardId> globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this.settings = settings; this.checkpointPublisher = checkpointPublisher; @@ -223,10 +223,7 @@ public IndicesClusterStateService( ); indexEventListeners.add(segmentReplicationTargetService); indexEventListeners.add(segmentReplicationSourceService); - // if remote store feature is not enabled, do not wire the remote upload pressure service as an IndexEventListener. - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - indexEventListeners.add(remoteRefreshSegmentPressureService); - } + indexEventListeners.add(remoteStoreStatsTrackerFactory); this.segmentReplicationTargetService = segmentReplicationTargetService; this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; @@ -240,7 +237,7 @@ public IndicesClusterStateService( this.globalCheckpointSyncer = globalCheckpointSyncer; this.retentionLeaseSyncer = Objects.requireNonNull(retentionLeaseSyncer); this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); - this.remoteRefreshSegmentPressureService = remoteRefreshSegmentPressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; } @Override @@ -683,7 +680,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR retentionLeaseSyncer, nodes.getLocalNode(), sourceNode, - remoteRefreshSegmentPressureService + remoteStoreStatsTrackerFactory ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -1028,6 +1025,7 @@ U createIndex(IndexMetadata indexMetadata, List<IndexEventListener> builtInIndex * @param retentionLeaseSyncer a callback when this shard syncs retention leases * @param targetNode the node where this shard will be recovered * @param sourceNode the source node to recover this shard from (it might be null) + * @param remoteStoreStatsTrackerFactory factory for remote store stats trackers * @return a new shard * @throws IOException if an I/O exception occurs when creating the shard */ @@ -1042,7 +1040,7 @@ T createShard( RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException; /** @@ -1062,8 +1060,9 @@ void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue t /** * Why the index was removed * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") enum IndexRemovalReason { /** * Shard of this index were previously assigned to this node but all shards have been relocated. diff --git a/server/src/main/java/org/opensearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/opensearch/indices/fielddata/cache/IndicesFieldDataCache.java index dc18097419904..e78f05831d7ac 100644 --- a/server/src/main/java/org/opensearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/opensearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -32,44 +32,45 @@ package org.opensearch.indices.fielddata.cache; -import java.util.Collections; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader.CacheKey; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Accountable; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.cache.Cache; import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; -import org.opensearch.index.fielddata.LeafFieldData; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.shard.ShardUtils; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.function.ToLongBiFunction; /** * The field data cache for multiple indices * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndicesFieldDataCache implements RemovalListener<IndicesFieldDataCache.Key, Accountable>, Releasable { private static final Logger logger = LogManager.getLogger(IndicesFieldDataCache.class); @@ -253,8 +254,9 @@ public void clear(String fieldName) { /** * Key for the indices field data cache * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Key { public final IndexFieldCache indexCache; public final IndexReader.CacheKey readerKey; diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index c26428309aec5..7974de3514ce3 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,6 +32,7 @@ package org.opensearch.indices.mapper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.plugins.MapperPlugin; @@ -45,8 +46,9 @@ /** * A registry for all field mappers. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class MapperRegistry { private final Map<String, Mapper.TypeParser> mapperParsers; diff --git a/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java index a40245cddda38..cc95d18604cff 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java @@ -33,10 +33,10 @@ package org.opensearch.indices.recovery; import org.apache.lucene.util.Version; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.StoreFileMetadata; diff --git a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java index c7021a0e969b9..a1ae6af8e15e9 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkWriter.java @@ -8,7 +8,7 @@ package org.opensearch.indices.recovery; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.store.StoreFileMetadata; diff --git a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java index d7890d6e3e5e0..ac6b2e6b77d18 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java @@ -9,7 +9,6 @@ package org.opensearch.indices.recovery; import org.apache.lucene.index.IndexCommit; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.replication.ReplicationResponse; @@ -17,8 +16,9 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.SetOnce; import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; @@ -161,6 +161,7 @@ && isTargetSameHistory() }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); deleteRetentionLeaseStep.whenComplete(ignored -> { + logger.debug("deleteRetentionLeaseStep completed"); assert Transports.assertNotTransportThread(this + "[phase1]"); phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep, false); }, onFailure); @@ -172,12 +173,14 @@ && isTargetSameHistory() assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); /* * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiChunkTransfer.java b/server/src/main/java/org/opensearch/indices/recovery/MultiChunkTransfer.java index 86e021bb6bfff..e277160637366 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiChunkTransfer.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiChunkTransfer.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; -import org.opensearch.action.ActionListener; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.AsyncIOProcessor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.seqno.LocalCheckpointTracker; import java.io.Closeable; diff --git a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java index 0110e8b6d162a..29ee097d36cac 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java +++ b/server/src/main/java/org/opensearch/indices/recovery/MultiFileWriter.java @@ -37,11 +37,11 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index 66b5b3f8b7535..cb2bedf00de99 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterChangedEvent; @@ -46,14 +45,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -376,7 +376,8 @@ private Tuple<RecoverySourceHandler, RemoteRecoveryTargetHandler> createRecovery transportService, request.targetNode(), recoverySettings, - throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime) + throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), + shard.isRemoteTranslogEnabled() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index a289c8f8a04b7..4232d32987e86 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -39,7 +39,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -47,19 +46,22 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -74,7 +76,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -91,8 +92,9 @@ * Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and * not several of them (since we don't allocate several shard replicas to the same node). * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PeerRecoveryTargetService implements IndexEventListener { private static final Logger logger = LogManager.getLogger(PeerRecoveryTargetService.class); @@ -245,7 +247,18 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false, false, true); + // ToDo: This is a temporary mitigation to not fail the peer recovery flow in case there is + // an exception while downloading segments from remote store. For remote backed indexes, we + // plan to revamp this flow so that node-node segment copy will not happen. + // GitHub Issue to track the revamp: https://github.com/opensearch-project/OpenSearch/issues/11331 + try { + indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); + } catch (Exception e) { + logger.error( + "Exception while downloading segment files from remote store, will continue with peer to peer segment copy", + e + ); + } } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); @@ -264,7 +277,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi actionName = PeerRecoverySourceService.Actions.START_RECOVERY; } catch (final Exception e) { // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + logger.debug("unexpected error while preparing shard for peer recovery, failing recovery", e); onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), @@ -272,12 +285,12 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); return; } - logger.trace("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } else { startRequest = preExistingRequest; requestToSend = new ReestablishRecoveryRequest(recoveryId, startRequest.shardId(), startRequest.targetAllocationId()); actionName = PeerRecoverySourceService.Actions.REESTABLISH_RECOVERY; - logger.trace("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } } transportService.sendRequest( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverFilesRecoveryException.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverFilesRecoveryException.java index 8d60b44c2c6bd..12e628b9b7e7d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverFilesRecoveryException.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverFilesRecoveryException.java @@ -36,7 +36,7 @@ import org.opensearch.OpenSearchWrapperException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java index 3662d9106cacc..c7fad7b12f10b 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryHandoffPrimaryContextRequest.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java index c8c2fbfc896eb..c1832515e97de 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java @@ -9,6 +9,7 @@ package org.opensearch.indices.recovery; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; @@ -19,6 +20,7 @@ * * @opensearch.internal */ +@PublicApi(since = "2.2.0") public class RecoveryListener implements ReplicationListener { /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java index 8af69b1786e38..c1203a9f4939f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 60076f1668af8..2b41eb125d808 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -36,19 +36,24 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.concurrent.TimeUnit; /** * Settings for the recovery mechanism * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoverySettings { private static final Logger logger = LogManager.getLogger(RecoverySettings.class); @@ -84,6 +89,17 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls the maximum number of streams that can be started concurrently per recovery when downloading from the remote store. + */ + public static final Setting<Integer> INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = new Setting<>( + "indices.recovery.max_concurrent_remote_store_streams", + (s) -> Integer.toString(Math.max(1, OpenSearchExecutors.allocatedProcessors(s) / 2)), + (s) -> Setting.parseInt(s, 1, "indices.recovery.max_concurrent_remote_store_streams"), + Property.Dynamic, + Property.NodeScope + ); + /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. @@ -143,12 +159,39 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting<Integer> CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + + public static final Setting<TimeValue> INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( + "indices.recovery.internal_remote_upload_timeout", + new TimeValue(1, TimeUnit.HOURS), + Property.Dynamic, + Property.NodeScope + ); + // choose 512KB-16B to ensure that the resulting byte[] is not a humongous allocation in G1. public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512 * 1024 - 16, ByteSizeUnit.BYTES); private volatile ByteSizeValue maxBytesPerSec; private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentOperations; + private volatile int maxConcurrentRemoteStoreStreams; private volatile SimpleRateLimiter rateLimiter; private volatile TimeValue retryDelayStateSync; private volatile TimeValue retryDelayNetwork; @@ -156,13 +199,16 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; + private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + private volatile TimeValue internalRemoteUploadTimeout; public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); + this.maxConcurrentRemoteStoreStreams = INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); @@ -180,10 +226,15 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { } logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec); + this.internalRemoteUploadTimeout = INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.get(settings); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, this::setMaxConcurrentOperations); + clusterSettings.addSettingsUpdateConsumer( + INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + this::setMaxConcurrentRemoteStoreStreams + ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); @@ -192,6 +243,13 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); + clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); + } public RateLimiter rateLimiter() { @@ -222,6 +280,10 @@ public TimeValue internalActionLongTimeout() { return internalActionLongTimeout; } + public TimeValue internalRemoteUploadTimeout() { + return internalRemoteUploadTimeout; + } + public ByteSizeValue getChunkSize() { return chunkSize; } @@ -253,6 +315,10 @@ public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { this.internalActionLongTimeout = internalActionLongTimeout; } + public void setInternalRemoteUploadTimeout(TimeValue internalRemoteUploadTimeout) { + this.internalRemoteUploadTimeout = internalRemoteUploadTimeout; + } + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { this.maxBytesPerSec = maxBytesPerSec; if (maxBytesPerSec.getBytes() <= 0) { @@ -279,4 +345,20 @@ public int getMaxConcurrentOperations() { private void setMaxConcurrentOperations(int maxConcurrentOperations) { this.maxConcurrentOperations = maxConcurrentOperations; } + + public int getMaxConcurrentRemoteStoreStreams() { + return this.maxConcurrentRemoteStoreStreams; + } + + private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStreams) { + this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; + } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 0b343fb0b0871..7996c48b2b04b 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.PlainActionFuture; @@ -48,16 +47,17 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.StopWatch; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.FutureUtils; import org.opensearch.common.util.concurrent.ListenableFuture; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseNotFoundException; @@ -201,9 +201,13 @@ protected void finalizeStepAndCompleteFuture( final StepListener<Void> finalizeStep = new StepListener<>(); // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 final long trimAboveSeqNo = startingSeqNo - 1; - sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); + sendSnapshotStep.whenComplete(r -> { + logger.debug("sendSnapshotStep completed"); + finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep); + }, onFailure); finalizeStep.whenComplete(r -> { + logger.debug("finalizeStep completed"); final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); final SendFileResult sendFileResult = sendFileStep.result(); @@ -233,7 +237,10 @@ protected void onSendFileStepComplete( GatedCloseable<IndexCommit> wrappedSafeCommit, Releasable releaseStore ) { - sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { + sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); + IOUtils.close(wrappedSafeCommit, releaseStore); + }, e -> { try { IOUtils.close(wrappedSafeCommit, releaseStore); } catch (final IOException ex) { @@ -445,16 +452,22 @@ void phase1( sendFileInfoStep ); - sendFileInfoStep.whenComplete( - r -> sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep), - listener::onFailure - ); + sendFileInfoStep.whenComplete(r -> { + logger.debug("sendFileInfoStep completed"); + sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep); + }, listener::onFailure); // When doing peer recovery of remote store enabled replica, retention leases are not required. if (skipCreateRetentionLeaseStep) { - sendFilesStep.whenComplete(r -> createRetentionLeaseStep.onResponse(null), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLeaseStep.onResponse(null); + }, listener::onFailure); } else { - sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLease(startingSeqNo, createRetentionLeaseStep); + }, listener::onFailure); } createRetentionLeaseStep.whenComplete(retentionLease -> { @@ -471,6 +484,7 @@ void phase1( final long totalSize = totalSizeInBytes; final long existingTotalSize = existingTotalSizeInBytes; cleanFilesStep.whenComplete(r -> { + logger.debug("cleanFilesStep completed"); final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); listener.onResponse( @@ -541,7 +555,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener<RetentionLeas new ThreadedActionListener<>(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, cloneRetentionLeaseStep, false) ); logger.trace("cloned primary's retention lease as [{}]", clonedLease); - cloneRetentionLeaseStep.whenComplete(rr -> listener.onResponse(clonedLease), listener::onFailure); + cloneRetentionLeaseStep.whenComplete(rr -> { + logger.debug("cloneRetentionLeaseStep completed"); + listener.onResponse(clonedLease); + }, listener::onFailure); } catch (RetentionLeaseNotFoundException e) { // it's possible that the primary has no retention lease yet if we are doing a rolling upgrade from a version before // 7.4, and in that case we just create a lease using the local checkpoint of the safe commit which we're using for @@ -554,7 +571,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener<RetentionLeas estimatedGlobalCheckpoint, new ThreadedActionListener<>(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, addRetentionLeaseStep, false) ); - addRetentionLeaseStep.whenComplete(rr -> listener.onResponse(newLease), listener::onFailure); + addRetentionLeaseStep.whenComplete(rr -> { + logger.debug("addRetentionLeaseStep completed"); + listener.onResponse(newLease); + }, listener::onFailure); logger.trace("created retention lease with estimated checkpoint of [{}]", estimatedGlobalCheckpoint); } }, shardId + " establishing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); @@ -810,6 +830,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis cancellableThreads.checkForCancel(); recoveryTarget.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, finalizeListener); finalizeListener.whenComplete(r -> { + logger.debug("finalizeListenerStep completed"); RunUnderPrimaryPermit.run( () -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 3418e82bc25b0..80c05399df627 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -36,16 +36,17 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationTimer; import java.io.IOException; @@ -54,15 +55,17 @@ /** * Keeps track of state related to shard recovery. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Stage { INIT((byte) 0), @@ -370,8 +373,9 @@ static final class Fields { /** * Verifys the lucene index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class VerifyIndex extends ReplicationTimer implements ToXContentFragment, Writeable { private volatile long checkIndexTime; @@ -412,8 +416,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The translog * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Translog extends ReplicationTimer implements ToXContentFragment, Writeable { public static final int UNKNOWN = -1; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 1eba50ffcf172..f3b5d0d790f83 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -35,15 +35,16 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; -import org.opensearch.core.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -73,8 +74,9 @@ * Represents a recovery where the current node is the target node of the recovery. To track recoveries in a central place, instances of * this class are created through {@link ReplicationCollection}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RecoveryTarget extends ReplicationTarget implements RecoveryTargetHandler { private static final String RECOVERY_PREFIX = "recovery."; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java index b43253c32d844..707e41c8c27e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java @@ -31,7 +31,7 @@ package org.opensearch.indices.recovery; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.store.Store; @@ -55,7 +55,7 @@ public interface RecoveryTargetHandler extends FileChunkWriter { /** * Used with Segment replication only - * + * <p> * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java index e8c24fb10e02f..73f7c81050cc8 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.translog.Translog; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java index b623d382b415f..9ea9f1d48a494 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index cdc62350b4aa5..37227596fdfe7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -34,13 +34,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeases; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; @@ -48,7 +49,6 @@ import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.List; @@ -75,6 +75,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final RetryableTransportClient retryableTransportClient; private final RemoteSegmentFileChunkWriter fileChunkWriter; + private final boolean remoteStoreEnabled; public RemoteRecoveryTargetHandler( long recoveryId, @@ -82,7 +83,8 @@ public RemoteRecoveryTargetHandler( TransportService transportService, DiscoveryNode targetNode, RecoverySettings recoverySettings, - Consumer<Long> onSourceThrottle + Consumer<Long> onSourceThrottle, + boolean remoteStoreEnabled ) { this.transportService = transportService; // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler @@ -111,6 +113,7 @@ public RemoteRecoveryTargetHandler( requestSeqNoGenerator, onSourceThrottle ); + this.remoteStoreEnabled = remoteStoreEnabled; } public DiscoveryNode targetNode() { @@ -129,7 +132,13 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener<Vo ); final Writeable.Reader<TransportResponse.Empty> reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener<TransportResponse.Empty> responseListener = ActionListener.map(listener, r -> null); - retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + if (remoteStoreEnabled) { + // If remote store is enabled, during the prepare_translog phase, translog is also downloaded on the + // target host along with incremental segments download. + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + } else { + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + } } @Override @@ -189,7 +198,7 @@ public void indexTranslogOperations( /** * Used with Segment replication only - * + * <p> * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java index 6af4ce7461633..66c7a3b48f28f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java @@ -9,11 +9,11 @@ package org.opensearch.indices.recovery; import org.apache.lucene.index.IndexCommit; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; @@ -80,12 +80,14 @@ protected void innerRecoveryToTarget(ActionListener<RecoveryResponse> listener, assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(0, prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); RunUnderPrimaryPermit.run( () -> shard.initiateTracking(request.targetAllocationId()), diff --git a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java index 04bee5586cbab..9a22936dd6551 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java @@ -10,23 +10,23 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.RetryableAction; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.SendRequestTransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java index 60aa413cbca4f..1df0d3861f686 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/StartRecoveryRequest.java @@ -33,10 +33,11 @@ package org.opensearch.indices.recovery; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.Store; import org.opensearch.transport.TransportRequest; @@ -45,8 +46,9 @@ /** * Represents a request for starting a peer recovery. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StartRecoveryRequest extends TransportRequest { private long recoveryId; diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java index 0155883f34552..9fd3b7f3afb80 100644 --- a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -10,9 +10,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java index 89d50a17464a6..234c053b27b25 100644 --- a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java @@ -10,8 +10,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.List; @@ -33,6 +33,10 @@ public GetSegmentFilesResponse(StreamInput out) throws IOException { out.readList(StoreFileMetadata::new); } + public List<StoreFileMetadata> getFiles() { + return files; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(files); diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index e0e356f1531e1..33967c0203516 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -12,12 +12,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; @@ -61,8 +61,8 @@ class OngoingSegmentReplications { this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } - /** - * Operations on the {@link #copyStateMap} member. + /* + Operations on the {@link #copyStateMap} member. */ /** @@ -85,12 +85,12 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro // build the CopyState object and cache it before returning final CopyState copyState = new CopyState(checkpoint, indexShard); - /** - * Use the checkpoint from the request as the key in the map, rather than - * the checkpoint from the created CopyState. This maximizes cache hits - * if replication targets make a request with an older checkpoint. - * Replication targets are expected to fetch the checkpoint in the response - * CopyState to bring themselves up to date. + /* + Use the checkpoint from the request as the key in the map, rather than + the checkpoint from the created CopyState. This maximizes cache hits + if replication targets make a request with an older checkpoint. + Replication targets are expected to fetch the checkpoint in the response + CopyState to bring themselves up to date. */ addToCopyStateMap(checkpoint, copyState); return copyState; @@ -139,13 +139,25 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener<GetSegmentF */ CopyState prepareForReplication(CheckpointInfoRequest request, FileChunkWriter fileChunkWriter) throws IOException { final CopyState copyState = getCachedCopyState(request.getCheckpoint()); - allocationIdToHandlers.compute(request.getTargetAllocationId(), (allocationId, segrepHandler) -> { - if (segrepHandler != null) { - logger.warn("Override handler for allocation id {}", request.getTargetAllocationId()); - cancelHandlers(handler -> handler.getAllocationId().equals(request.getTargetAllocationId()), "cancel due to retry"); - } - return createTargetHandler(request.getTargetNode(), copyState, request.getTargetAllocationId(), fileChunkWriter); - }); + final SegmentReplicationSourceHandler newHandler = createTargetHandler( + request.getTargetNode(), + copyState, + request.getTargetAllocationId(), + fileChunkWriter + ); + final SegmentReplicationSourceHandler existingHandler = allocationIdToHandlers.putIfAbsent( + request.getTargetAllocationId(), + newHandler + ); + // If we are already replicating to this allocation Id, cancel the old and replace with a new execution. + // This will clear the old handler & referenced copy state holding an incref'd indexCommit. + if (existingHandler != null) { + logger.warn("Override handler for allocation id {}", request.getTargetAllocationId()); + cancelHandlers(handler -> handler.getAllocationId().equals(request.getTargetAllocationId()), "cancel due to retry"); + assert allocationIdToHandlers.containsKey(request.getTargetAllocationId()) == false; + allocationIdToHandlers.put(request.getTargetAllocationId(), newHandler); + } + assert allocationIdToHandlers.containsKey(request.getTargetAllocationId()); return copyState; } diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index b1a6cab2ba57a..a17779810239a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -8,20 +8,19 @@ package org.opensearch.indices.replication; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; import java.util.List; +import java.util.function.BiConsumer; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; @@ -34,9 +33,7 @@ */ public class PrimaryShardReplicationSource implements SegmentReplicationSource { - private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); - - private final RetryableTransportClient transportClient; + private final TransportService transportService; private final DiscoveryNode sourceNode; private final DiscoveryNode targetNode; @@ -51,12 +48,7 @@ public PrimaryShardReplicationSource( DiscoveryNode sourceNode ) { this.targetAllocationId = targetAllocationId; - this.transportClient = new RetryableTransportClient( - transportService, - sourceNode, - recoverySettings.internalActionRetryTimeout(), - logger - ); + this.transportService = transportService; this.sourceNode = sourceNode; this.targetNode = targetNode; this.recoverySettings = recoverySettings; @@ -68,10 +60,14 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener<CheckpointInfoResponse> listener ) { - final Writeable.Reader<CheckpointInfoResponse> reader = CheckpointInfoResponse::new; - final ActionListener<CheckpointInfoResponse> responseListener = ActionListener.map(listener, r -> r); final CheckpointInfoRequest request = new CheckpointInfoRequest(replicationId, targetAllocationId, targetNode, checkpoint); - transportClient.executeRetryableAction(GET_CHECKPOINT_INFO, request, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_CHECKPOINT_INFO, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionRetryTimeout()).build(), + new ActionListenerResponseHandler<>(listener, CheckpointInfoResponse::new, ThreadPool.Names.GENERIC) + ); } @Override @@ -80,10 +76,13 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { - final Writeable.Reader<GetSegmentFilesResponse> reader = GetSegmentFilesResponse::new; - final ActionListener<GetSegmentFilesResponse> responseListener = ActionListener.map(listener, r -> r); + // fileProgressTracker is a no-op for node to node recovery + // MultiFileWriter takes care of progress tracking for downloads in this scenario + // TODO: Move state management and tracking into replication methods and use chunking and data + // copy mechanisms only from MultiFileWriter final GetSegmentFilesRequest request = new GetSegmentFilesRequest( replicationId, targetAllocationId, @@ -91,20 +90,17 @@ public void getSegmentFiles( filesToFetch, checkpoint ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); - transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, options, responseListener, reader); + transportService.sendRequest( + sourceNode, + GET_SEGMENT_FILES, + request, + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), + new ActionListenerResponseHandler<>(listener, GetSegmentFilesResponse::new, ThreadPool.Names.GENERIC) + ); } @Override public String getDescription() { return sourceNode.getName(); } - - @Override - public void cancel() { - transportClient.cancel(); - } - } diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java index ed171927c4600..b52fe66816098 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java @@ -10,17 +10,17 @@ import org.apache.lucene.store.RateLimiter; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; -import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index c5be7635782af..b06b3e0497cf7 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -10,9 +10,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.util.Version; -import org.opensearch.action.ActionListener; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.store.RemoteSegmentStoreDirectory; @@ -21,9 +25,15 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -33,12 +43,17 @@ */ public class RemoteStoreReplicationSource implements SegmentReplicationSource { - private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); + private static final Logger logger = LogManager.getLogger(RemoteStoreReplicationSource.class); private final IndexShard indexShard; + private final RemoteSegmentStoreDirectory remoteDirectory; + private final CancellableThreads cancellableThreads = new CancellableThreads(); public RemoteStoreReplicationSource(IndexShard indexShard) { this.indexShard = indexShard; + FilterDirectory remoteStoreDirectory = (FilterDirectory) indexShard.remoteStore().directory(); + FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); + this.remoteDirectory = (RemoteSegmentStoreDirectory) byteSizeCachingStoreDirectory.getDelegate(); } @Override @@ -47,16 +62,13 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener<CheckpointInfoResponse> listener ) { - FilterDirectory remoteStoreDirectory = (FilterDirectory) indexShard.remoteStore().directory(); - FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); - RemoteSegmentStoreDirectory remoteDirectory = (RemoteSegmentStoreDirectory) byteSizeCachingStoreDirectory.getDelegate(); - Map<String, StoreFileMetadata> metadataMap; // TODO: Need to figure out a way to pass this information for segment metadata via remote store. - final Version version = indexShard.getSegmentInfosSnapshot().get().getCommitLuceneVersion(); - try { - RemoteSegmentMetadata mdFile = remoteDirectory.readLatestMetadataFile(); - // During initial recovery flow, the remote store might not have metadata as primary hasn't uploaded anything yet. + try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = indexShard.getSegmentInfosSnapshot()) { + final Version version = segmentInfosSnapshot.get().getCommitLuceneVersion(); + final RemoteSegmentMetadata mdFile = getRemoteSegmentMetadata(); + // During initial recovery flow, the remote store might not + // have metadata as primary hasn't uploaded anything yet. if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { listener.onResponse(new CheckpointInfoResponse(checkpoint, Collections.emptyMap(), null)); return; @@ -77,8 +89,7 @@ public void getCheckpointMetadata( ) ) ); - // TODO: GET current checkpoint from remote store. - listener.onResponse(new CheckpointInfoResponse(checkpoint, metadataMap, null)); + listener.onResponse(new CheckpointInfoResponse(mdFile.getReplicationCheckpoint(), metadataMap, mdFile.getSegmentInfosBytes())); } catch (Exception e) { listener.onFailure(e); } @@ -90,18 +101,60 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { try { - indexShard.syncSegmentsFromRemoteSegmentStore(false, true, false); - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); - } catch (Exception e) { + if (filesToFetch.isEmpty()) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + return; + } + logger.debug("Downloading segment files from remote store {}", filesToFetch); + + if (remoteMetadataExists()) { + final Directory storeDirectory = indexShard.store().directory(); + final Collection<String> directoryFiles = List.of(storeDirectory.listAll()); + final List<String> toDownloadSegmentNames = new ArrayList<>(); + for (StoreFileMetadata fileMetadata : filesToFetch) { + String file = fileMetadata.name(); + assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; + toDownloadSegmentNames.add(file); + } + indexShard.getFileDownloader() + .downloadAsync( + cancellableThreads, + remoteDirectory, + new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), + toDownloadSegmentNames, + ActionListener.map(listener, r -> new GetSegmentFilesResponse(filesToFetch)) + ); + } else { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); + } + } catch (IOException | RuntimeException e) { listener.onFailure(e); } } + @Override + public void cancel() { + this.cancellableThreads.cancel("Canceled by target"); + } + @Override public String getDescription() { - return "remote store"; + return "RemoteStoreReplicationSource"; + } + + private boolean remoteMetadataExists() throws IOException { + final AtomicBoolean metadataExists = new AtomicBoolean(false); + cancellableThreads.executeIO(() -> metadataExists.set(remoteDirectory.readLatestMetadataFile() != null)); + return metadataExists.get(); + } + + private RemoteSegmentMetadata getRemoteSegmentMetadata() throws IOException { + AtomicReference<RemoteSegmentMetadata> mdFile = new AtomicReference<>(); + cancellableThreads.executeIO(() -> mdFile.set(remoteDirectory.init())); + return mdFile.get(); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentFileTransferHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentFileTransferHandler.java index aec7908ef1165..a70700b64eb6e 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentFileTransferHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentFileTransferHandler.java @@ -15,14 +15,14 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.ArrayUtil; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.store.InputStreamIndexInput; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 79b9b31e3d5c3..24f0cb15ddb25 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -8,13 +8,19 @@ package org.opensearch.indices.replication; -import org.opensearch.action.ActionListener; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.List; +import java.util.function.BiConsumer; /** * Represents the source of a replication event. @@ -39,6 +45,7 @@ public interface SegmentReplicationSource { * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param filesToFetch {@link List} List of files to fetch. * @param indexShard {@link IndexShard} Reference to the IndexShard. + * @param fileProgressTracker {@link BiConsumer} A consumer that updates the replication progress for shard files. * @param listener {@link ActionListener} Listener that completes with the list of files copied. */ void getSegmentFiles( @@ -46,6 +53,7 @@ void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ); @@ -58,4 +66,69 @@ void getSegmentFiles( * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. */ default void cancel() {} + + /** + * Directory wrapper that records copy process for replication statistics + * + * @opensearch.internal + */ + final class ReplicationStatsDirectoryWrapper extends FilterDirectory { + private final BiConsumer<String, Long> fileProgressTracker; + + ReplicationStatsDirectoryWrapper(Directory in, BiConsumer<String, Long> fileProgressTracker) { + super(in); + this.fileProgressTracker = fileProgressTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + // here we wrap the index input form the source directory to report progress of file copy for the recovery stats. + // we increment the num bytes recovered in the readBytes method below, if users pull statistics they can see immediately + // how much has been recovered. + in.copyFrom(new FilterDirectory(from) { + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final IndexInput input = in.openInput(name, context); + return new IndexInput("StatsDirectoryWrapper(" + input.toString() + ")") { + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public long getFilePointer() { + throw new UnsupportedOperationException("only straight copies are supported"); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException("seeks are not supported"); + } + + @Override + public long length() { + return input.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException("slices are not supported"); + } + + @Override + public byte readByte() throws IOException { + throw new UnsupportedOperationException("use a buffer if you wanna perform well"); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + // we rely on the fact that copyFrom uses a buffer + input.readBytes(b, offset, len); + fileProgressTracker.accept(dest, (long) len); + } + }; + } + }, src, dest, context); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index a2ec8bfe54ecc..852003c9f3e4d 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -11,8 +11,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index 110fe9aafbf5f..674c09311c645 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -10,19 +10,16 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ListenableFuture; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.recovery.DelayRecoveryException; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; @@ -146,12 +143,6 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene ); }; cancellableThreads.checkForCancel(); - final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); - ShardRouting targetShardRouting = routingTable.getByAllocationId(request.getTargetAllocationId()); - if (targetShardRouting == null) { - logger.debug("delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), targetNode); - throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); - } final StepListener<Void> sendFileStep = new StepListener<>(); Set<String> storeFiles = new HashSet<>(Arrays.asList(shard.store().directory().listAll())); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 9d8b30209ab80..4062f9702fb3a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -19,12 +19,13 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RetryableTransportClient; @@ -34,7 +35,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java index c5d526251d9e7..5fa123948c5ac 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -10,10 +10,11 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -29,15 +30,17 @@ /** * ReplicationState implementation to track Segment Replication events. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class SegmentReplicationState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "2.2.0") public enum Stage { DONE((byte) 0), INIT((byte) 1), diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 2e0f5a8c0ad1f..af764556b7549 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -9,22 +9,21 @@ package org.opensearch.indices.replication; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.BufferedChecksumIndexInput; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.ByteBuffersIndexInput; -import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; @@ -36,10 +35,11 @@ import org.opensearch.indices.replication.common.ReplicationTarget; import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; +import java.io.UncheckedIOException; import java.util.List; import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; /** * Represents the target of a replication event. @@ -55,13 +55,14 @@ public class SegmentReplicationTarget extends ReplicationTarget { public final static String REPLICATION_PREFIX = "replication."; - public ReplicationCheckpoint getCheckpoint() { - return this.checkpoint; - } - - public SegmentReplicationTarget(IndexShard indexShard, SegmentReplicationSource source, ReplicationListener listener) { + public SegmentReplicationTarget( + IndexShard indexShard, + ReplicationCheckpoint checkpoint, + SegmentReplicationSource source, + ReplicationListener listener + ) { super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); - this.checkpoint = indexShard.getLatestReplicationCheckpoint(); + this.checkpoint = checkpoint; this.source = source; this.state = new SegmentReplicationState( indexShard.routingEntry(), @@ -82,6 +83,16 @@ protected void closeInternal() { } } + @Override + protected void onCancel(String reason) { + try { + notifyListener(new ReplicationFailedException(reason), false); + } finally { + source.cancel(); + cancellableThreads.cancel(reason); + } + } + @Override protected String getPrefix() { return REPLICATION_PREFIX + UUIDs.randomBase64UUID() + "."; @@ -98,12 +109,19 @@ public SegmentReplicationState state() { } public SegmentReplicationTarget retryCopy() { - return new SegmentReplicationTarget(indexShard, source, listener); + return new SegmentReplicationTarget(indexShard, checkpoint, source, listener); } @Override public String description() { - return String.format(Locale.ROOT, "Id:[%d] Shard:[%s] Source:[%s]", getId(), shardId(), source.getDescription()); + return String.format( + Locale.ROOT, + "Id:[%d] Checkpoint [%s] Shard:[%s] Source:[%s]", + getId(), + getCheckpoint(), + shardId(), + source.getDescription() + ); } @Override @@ -117,6 +135,10 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx return false; } + public ReplicationCheckpoint getCheckpoint() { + return this.checkpoint; + } + @Override public void writeFileChunk( StoreFileMetadata metadata, @@ -158,7 +180,14 @@ public void startReplication(ActionListener<Void> listener) { final List<StoreFileMetadata> filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, indexShard, getFilesListener); + source.getSegmentFiles( + getId(), + checkpointInfo.getCheckpoint(), + filesToFetch, + indexShard, + this::updateFileRecoveryBytes, + getFilesListener + ); }, listener::onFailure); getFilesListener.whenComplete(response -> { @@ -171,7 +200,27 @@ private List<StoreFileMetadata> getFiles(CheckpointInfoResponse checkpointInfo) cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.RecoveryDiff diff = Store.segmentReplicationDiff(checkpointInfo.getMetadataMap(), indexShard.getSegmentMetadataMap()); - logger.trace(() -> new ParameterizedMessage("Replication diff for checkpoint {} {}", checkpointInfo.getCheckpoint(), diff)); + // local files + final Set<String> localFiles = Set.of(indexShard.store().directory().listAll()); + // set of local files that can be reused + final Set<String> reuseFiles = diff.missing.stream() + .filter(storeFileMetadata -> localFiles.contains(storeFileMetadata.name())) + .filter(this::validateLocalChecksum) + .map(StoreFileMetadata::name) + .collect(Collectors.toSet()); + + final List<StoreFileMetadata> missingFiles = diff.missing.stream() + .filter(md -> reuseFiles.contains(md.name()) == false) + .collect(Collectors.toList()); + + logger.trace( + () -> new ParameterizedMessage( + "Replication diff for checkpoint {} {} {}", + checkpointInfo.getCheckpoint(), + missingFiles, + diff.different + ) + ); /* * Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming * snapshot from source that means the local copy of the segment has been corrupted/changed in some way and we throw an @@ -187,36 +236,71 @@ private List<StoreFileMetadata> getFiles(CheckpointInfoResponse checkpointInfo) ); } - for (StoreFileMetadata file : diff.missing) { + for (StoreFileMetadata file : missingFiles) { state.getIndex().addFileDetail(file.name(), file.length(), false); } - return diff.missing; + return missingFiles; } - private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { - // TODO: Refactor the logic so that finalize doesn't have to be invoked for remote store as source - if (source instanceof RemoteStoreReplicationSource) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); - return; + // pkg private for tests + private boolean validateLocalChecksum(StoreFileMetadata file) { + try (IndexInput indexInput = indexShard.store().directory().openInput(file.name(), IOContext.DEFAULT)) { + String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput)); + if (file.checksum().equals(checksum)) { + return true; + } else { + // clear local copy with mismatch. Safe because file is not referenced by active reader. + store.deleteQuiet(file.name()); + return false; + } + } catch (IOException e) { + logger.warn("Error reading " + file, e); + // Delete file on exceptions so that it can be re-downloaded. This is safe to do as this file is local only + // and not referenced by reader. + try { + indexShard.store().directory().deleteFile(file.name()); + } catch (IOException ex) { + throw new UncheckedIOException("Error reading " + file, e); + } + return false; + } + } + + /** + * Updates the state to reflect recovery progress for the given file and + * updates the last access time for the target. + * @param fileName Name of the file being downloaded + * @param bytesRecovered Number of bytes recovered + */ + private void updateFileRecoveryBytes(String fileName, long bytesRecovered) { + ReplicationLuceneIndex index = state.getIndex(); + if (index != null) { + index.addRecoveredBytesToFile(fileName, bytesRecovered); } + setLastAccessTime(); + } + + private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); + // Handle empty SegmentInfos bytes for recovering replicas + if (checkpointInfoResponse.getInfosBytes() == null) { + return; + } Store store = null; try { store = store(); store.incRef(); - store.buildInfosFromBytes( - multiFileWriter.getTempFileNames(), + multiFileWriter.renameAllTempFiles(); + final SegmentInfos infos = store.buildSegmentInfos( checkpointInfoResponse.getInfosBytes(), - checkpointInfoResponse.getCheckpoint().getSegmentsGen(), - indexShard::finalizeReplication + checkpointInfoResponse.getCheckpoint().getSegmentsGen() ); + indexShard.finalizeReplication(infos); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are - // broken. We have to clean up this shard entirely, remove all files and bubble it up to the - // source shard since this index might be broken there as well? The Source can handle this and checks - // its content on disk if possible. + // broken. We have to clean up this shard entirely, remove all files and bubble it up. try { try { store.removeCorruptionMarker(); @@ -232,40 +316,18 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) // In this case the shard is closed at some point while updating the reader. // This can happen when the engine is closed in a separate thread. logger.warn("Shard is already closed, closing replication"); - } catch (OpenSearchException ex) { + } catch (CancellableThreads.ExecutionCancelledException ex) { /* Ignore closed replication target as it can happen due to index shard closed event in a separate thread. In such scenario, ignore the exception */ - assert cancellableThreads.isCancelled() : "Replication target closed but segment replication not cancelled"; + assert cancellableThreads.isCancelled() : "Replication target cancelled but cancellable threads not cancelled"; } catch (Exception ex) { - throw new OpenSearchCorruptionException(ex); + throw new ReplicationFailedException(ex); } finally { if (store != null) { store.decRef(); } } } - - /** - * This method formats our byte[] containing the primary's SegmentInfos into lucene's {@link ChecksumIndexInput} that can be - * passed to SegmentInfos.readCommit - */ - private ChecksumIndexInput toIndexInput(byte[] input) { - return new BufferedChecksumIndexInput( - new ByteBuffersIndexInput(new ByteBuffersDataInput(Arrays.asList(ByteBuffer.wrap(input))), "SegmentInfos") - ); - } - - /** - * Trigger a cancellation, this method will not close the target a subsequent call to #fail is required from target service. - */ - @Override - public void cancel(String reason) { - if (finished.get() == false) { - logger.trace(new ParameterizedMessage("Cancelling replication for target {}", description())); - cancellableThreads.cancel(reason); - source.cancel(); - } - } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 7c35c4f07598e..f28f829545d59 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,20 +11,29 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.ForceSyncRequest; @@ -41,13 +50,14 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.UPDATE_VISIBLE_CHECKPOINT; /** @@ -55,7 +65,7 @@ * * @opensearch.internal */ -public class SegmentReplicationTargetService implements IndexEventListener { +public class SegmentReplicationTargetService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { private static final Logger logger = LogManager.getLogger(SegmentReplicationTargetService.class); @@ -64,7 +74,7 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final ReplicationCollection<SegmentReplicationTarget> onGoingReplications; - private final Map<ShardId, SegmentReplicationTarget> completedReplications = ConcurrentCollections.newConcurrentMap(); + private final Map<ShardId, SegmentReplicationState> completedReplications = ConcurrentCollections.newConcurrentMap(); private final SegmentReplicationSourceFactory sourceFactory; @@ -74,10 +84,6 @@ public class SegmentReplicationTargetService implements IndexEventListener { private final ClusterService clusterService; private final TransportService transportService; - public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { - return onGoingReplications.get(replicationId); - } - /** * The internal actions * @@ -138,13 +144,61 @@ public SegmentReplicationTargetService( ); } + @Override + protected void doStart() { + if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + clusterService.addListener(this); + } + } + + @Override + protected void doStop() { + if (DiscoveryNode.isDataNode(clusterService.getSettings())) { + assert onGoingReplications.size() == 0 : "Replication collection should be empty on shutdown"; + clusterService.removeListener(this); + } + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.routingTableChanged()) { + for (IndexService indexService : indicesService) { + if (indexService.getIndexSettings().isSegRepEnabled() && event.indexRoutingTableChanged(indexService.index().getName())) { + for (IndexShard shard : indexService) { + if (shard.routingEntry().primary() == false) { + // for this shard look up its primary routing, if it has completed a relocation trigger replication + final String previousNode = event.previousState() + .routingTable() + .shardRoutingTable(shard.shardId()) + .primaryShard() + .currentNodeId(); + final String currentNode = event.state() + .routingTable() + .shardRoutingTable(shard.shardId()) + .primaryShard() + .currentNodeId(); + if (previousNode.equals(currentNode) == false) { + processLatestReceivedCheckpoint(shard, Thread.currentThread()); + } + } + } + } + } + } + } + /** * Cancel any replications on this node for a replica that is about to be closed. */ @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null && indexShard.indexSettings().isSegRepEnabled()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard closing"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard closing"); latestReceivedCheckpoint.remove(shardId); } } @@ -166,7 +220,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { @Override public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) { if (oldRouting != null && indexShard.indexSettings().isSegRepEnabled() && oldRouting.primary() == false && newRouting.primary()) { - onGoingReplications.requestCancel(indexShard.shardId(), "Shard has been promoted to primary"); + onGoingReplications.cancelForShard(indexShard.shardId(), "Shard has been promoted to primary"); latestReceivedCheckpoint.remove(indexShard.shardId()); } } @@ -186,7 +240,7 @@ public SegmentReplicationState getOngoingEventSegmentReplicationState(ShardId sh */ @Nullable public SegmentReplicationState getlatestCompletedEventSegmentReplicationState(ShardId shardId) { - return Optional.ofNullable(completedReplications.get(shardId)).map(SegmentReplicationTarget::state).orElse(null); + return completedReplications.get(shardId); } /** @@ -198,6 +252,14 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { .orElseGet(() -> getlatestCompletedEventSegmentReplicationState(shardId)); } + public ReplicationRef<SegmentReplicationTarget> get(long replicationId) { + return onGoingReplications.get(replicationId); + } + + public SegmentReplicationTarget get(ShardId shardId) { + return onGoingReplications.getOngoingReplicationTarget(shardId); + } + /** * Invoked when a new checkpoint is received from a primary shard. * It checks if a new checkpoint should be processed or not and starts replication if needed. @@ -206,7 +268,7 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); + logger.debug(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // if the shard is in any state if (replicaShard.state().equals(IndexShardState.CLOSED)) { // ignore if shard is closed @@ -222,7 +284,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); if (ongoingReplicationTarget != null) { if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Cancelling ongoing replication {} from old primary with primary term {}", ongoingReplicationTarget.description(), @@ -231,10 +293,10 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe ); ongoingReplicationTarget.cancel("Cancelling stuck target after new primary"); } else { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", - replicaShard.getLatestReplicationCheckpoint() + ongoingReplicationTarget.getCheckpoint() ) ); return; @@ -242,10 +304,10 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe } final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { - startReplication(replicaShard, new SegmentReplicationListener() { + startReplication(replicaShard, receivedCheckpoint, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", replicaShard.shardId().getId(), @@ -269,20 +331,20 @@ public void onReplicationFailure( ReplicationFailedException e, boolean sendShardFailure ) { - logger.error( - () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Replication failed, timing data: {}", - replicaShard.shardId().getId(), - state.getReplicationId(), - state.getTimingData() - ), - e - ); + logReplicationFailure(state, e, replicaShard); if (sendShardFailure == true) { failShard(e, replicaShard); + } else { + processLatestReceivedCheckpoint(replicaShard, thread); } } }); + } else if (replicaShard.isSegmentReplicationAllowed()) { + // if we didn't process the checkpoint because we are up to date, + // send our latest checkpoint to the primary to update tracking. + // replicationId is not used by the primary set to a default value. + final long replicationId = NO_OPS_PERFORMED; + updateVisibleCheckpoint(replicationId, replicaShard); } } else { logger.trace( @@ -291,6 +353,30 @@ public void onReplicationFailure( } } + private void logReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, IndexShard replicaShard) { + // only log as error if error is not a cancellation. + if (ExceptionsHelper.unwrap(e, CancellableThreads.ExecutionCancelledException.class) == null) { + logger.error( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication failed, timing data: {}", + replicaShard.shardId(), + state.getReplicationId(), + state.getTimingData() + ), + e + ); + } else { + logger.debug( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication cancelled", + replicaShard.shardId(), + state.getReplicationId() + ), + e + ); + } + } + protected void updateVisibleCheckpoint(long replicationId, IndexShard replicaShard) { // Update replication checkpoint on source via transport call only supported for remote store integration. For node- // node communication, checkpoint update is piggy-backed to GET_SEGMENT_FILES transport call @@ -365,7 +451,7 @@ private DiscoveryNode getPrimaryNode(ShardRouting primaryShard) { // visible to tests protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Thread thread) { final ReplicationCheckpoint latestPublishedCheckpoint = latestReceivedCheckpoint.get(replicaShard.shardId()); - if (latestPublishedCheckpoint != null && latestPublishedCheckpoint.isAheadOf(replicaShard.getLatestReplicationCheckpoint())) { + if (latestPublishedCheckpoint != null) { logger.trace( () -> new ParameterizedMessage( "Processing latest received checkpoint for shard {} {}", @@ -373,7 +459,13 @@ protected boolean processLatestReceivedCheckpoint(IndexShard replicaShard, Threa latestPublishedCheckpoint ) ); - Runnable runnable = () -> onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + Runnable runnable = () -> { + // if we retry ensure the shard is not in the process of being closed. + // it will be removed from indexService's collection before the shard is actually marked as closed. + if (indicesService.getShardOrNull(replicaShard.shardId()) != null) { + onNewCheckpoint(latestReceivedCheckpoint.get(replicaShard.shardId()), replicaShard); + } + }; // Checks if we are using same thread and forks if necessary. if (thread == Thread.currentThread()) { threadPool.generic().execute(runnable); @@ -396,8 +488,24 @@ protected void updateLatestReceivedCheckpoint(ReplicationCheckpoint receivedChec } } - public SegmentReplicationTarget startReplication(final IndexShard indexShard, final SegmentReplicationListener listener) { - final SegmentReplicationTarget target = new SegmentReplicationTarget(indexShard, sourceFactory.get(indexShard), listener); + /** + * Start a round of replication and sync to at least the given checkpoint. + * @param indexShard - {@link IndexShard} replica shard + * @param checkpoint - {@link ReplicationCheckpoint} checkpoint to sync to + * @param listener - {@link ReplicationListener} + * @return {@link SegmentReplicationTarget} target event orchestrating the event. + */ + public SegmentReplicationTarget startReplication( + final IndexShard indexShard, + final ReplicationCheckpoint checkpoint, + final SegmentReplicationListener listener + ) { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + indexShard, + checkpoint, + sourceFactory.get(indexShard), + listener + ); startReplication(target); return target; } @@ -451,9 +559,6 @@ public ReplicationRunner(long replicationId) { @Override public void onFailure(Exception e) { - try (final ReplicationRef<SegmentReplicationTarget> ref = onGoingReplications.get(replicationId)) { - logger.error(() -> new ParameterizedMessage("Error during segment replication, {}", ref.get().description()), e); - } onGoingReplications.fail(replicationId, new ReplicationFailedException("Unexpected Error during replication", e), false); } @@ -476,17 +581,17 @@ private void start(final long replicationId) { target.startReplication(new ActionListener<>() { @Override public void onResponse(Void o) { - logger.trace(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { - completedReplications.put(target.shardId(), target); + completedReplications.put(target.shardId(), target.state()); } } @Override public void onFailure(Exception e) { - logger.error(() -> new ParameterizedMessage("Exception replicating {} marking as failed.", target.description()), e); - if (e instanceof OpenSearchCorruptionException) { + logger.debug("Replication failed {}", target.description()); + if (isStoreCorrupt(target) || e instanceof CorruptIndexException || e instanceof OpenSearchCorruptionException) { onGoingReplications.fail(replicationId, new ReplicationFailedException("Store corruption during replication", e), true); return; } @@ -495,6 +600,27 @@ public void onFailure(Exception e) { }); } + private boolean isStoreCorrupt(SegmentReplicationTarget target) { + // ensure target is not already closed. In that case + // we can assume the store is not corrupt and that the replication + // event completed successfully. + if (target.refCount() > 0) { + final Store store = target.store(); + if (store.tryIncRef()) { + try { + return store.isMarkedCorrupted(); + } catch (IOException ex) { + logger.warn("Unable to determine if store is corrupt", ex); + return false; + } finally { + store.decRef(); + } + } + } + // store already closed. + return false; + } + private class FileChunkTransportRequestHandler implements TransportRequestHandler<FileChunkRequest> { // How many bytes we've copied since we last called RateLimiter.pause @@ -529,50 +655,51 @@ private void forceReplication(ForceSyncRequest request, ActionListener<Transport if (indexShard == null || indexShard.getReplicationEngine().isEmpty()) { listener.onResponse(TransportResponse.Empty.INSTANCE); } else { - startReplication(indexShard, new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - try { - logger.trace( - () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Force replication Sync complete to {}, timing data: {}", - shardId, - state.getReplicationId(), - indexShard.getLatestReplicationCheckpoint(), - state.getTimingData() - ) - ); - // Promote engine type for primary target - if (indexShard.recoveryState().getPrimary() == true) { - indexShard.resetToWriteableEngine(); - } else { - // Update the replica's checkpoint on primary's replication tracker. - updateVisibleCheckpoint(state.getReplicationId(), indexShard); + // We are skipping any validation for an incoming checkpoint, use the shard's latest checkpoint in the target. + startReplication( + indexShard, + indexShard.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + try { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Force replication Sync complete to {}, timing data: {}", + shardId, + state.getReplicationId(), + indexShard.getLatestReplicationCheckpoint(), + state.getTimingData() + ) + ); + // Promote engine type for primary target + if (indexShard.recoveryState().getPrimary() == true) { + indexShard.resetToWriteableEngine(); + } else { + // Update the replica's checkpoint on primary's replication tracker. + updateVisibleCheckpoint(state.getReplicationId(), indexShard); + } + listener.onResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + logger.error("Error while marking replication completed", e); + listener.onFailure(e); } - listener.onResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - logger.error("Error while marking replication completed", e); - listener.onFailure(e); } - } - @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - logger.error( - () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Replication failed, timing data: {}", - indexShard.shardId().getId(), - state.getReplicationId(), - state.getTimingData() - ), - e - ); - if (sendShardFailure) { - failShard(e, indexShard); + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + logReplicationFailure(state, e, indexShard); + if (sendShardFailure) { + failShard(e, indexShard); + } + listener.onFailure(e); } - listener.onFailure(e); } - }); + ); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index e77f9e12212fc..821ae42e31881 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -13,17 +13,19 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.ReplicationTask; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; @@ -41,14 +43,12 @@ import java.io.IOException; import java.util.Objects; -import org.opensearch.action.support.replication.ReplicationMode; - /** * Replication action responsible for publishing checkpoint to a replica shard. * - * @opensearch.internal + * @opensearch.api */ - +@PublicApi(since = "2.2.0") public class PublishCheckpointAction extends TransportReplicationAction< PublishCheckpointRequest, PublishCheckpointRequest, @@ -138,7 +138,7 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { timer.stop(); - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] Completed publishing checkpoint [{}], timing: {}", indexShard.shardId().getId(), @@ -153,7 +153,7 @@ public void handleResponse(ReplicationResponse response) { @Override public void handleException(TransportException e) { timer.stop(); - logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); + logger.debug("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 7549f3450e7f2..29410159a4955 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -10,20 +10,25 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.StoreFileMetadata; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; /** * Represents a Replication Checkpoint which is sent to a replica shard. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class ReplicationCheckpoint implements Writeable, Comparable<ReplicationCheckpoint> { private final ShardId shardId; @@ -32,6 +37,7 @@ public class ReplicationCheckpoint implements Writeable, Comparable<ReplicationC private final long segmentInfosVersion; private final long length; private final String codec; + private final Map<String, StoreFileMetadata> metadataMap; public static ReplicationCheckpoint empty(ShardId shardId) { return empty(shardId, ""); @@ -48,19 +54,29 @@ private ReplicationCheckpoint(ShardId shardId, String codec) { segmentInfosVersion = SequenceNumbers.NO_OPS_PERFORMED; length = 0L; this.codec = codec; + this.metadataMap = Collections.emptyMap(); } public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, String codec) { - this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec); - } - - public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, long length, String codec) { + this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap()); + } + + public ReplicationCheckpoint( + ShardId shardId, + long primaryTerm, + long segmentsGen, + long segmentInfosVersion, + long length, + String codec, + Map<String, StoreFileMetadata> metadataMap + ) { this.shardId = shardId; this.primaryTerm = primaryTerm; this.segmentsGen = segmentsGen; this.segmentInfosVersion = segmentInfosVersion; this.length = length; this.codec = codec; + this.metadataMap = metadataMap; } public ReplicationCheckpoint(StreamInput in) throws IOException { @@ -75,6 +91,11 @@ public ReplicationCheckpoint(StreamInput in) throws IOException { length = 0L; codec = null; } + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + this.metadataMap = in.readMap(StreamInput::readString, StoreFileMetadata::new); + } else { + this.metadataMap = Collections.emptyMap(); + } } /** @@ -135,6 +156,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(length); out.writeString(codec); } + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + } } @Override @@ -169,6 +193,10 @@ public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { || (primaryTerm == other.getPrimaryTerm() && segmentInfosVersion > other.getSegmentInfosVersion()); } + public Map<String, StoreFileMetadata> getMetadataMap() { + return metadataMap; + } + @Override public String toString() { return "ReplicationCheckpoint{" diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java index b4bcdc92e539a..a35d6fd103dc0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication.checkpoint; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Inject; import org.opensearch.index.shard.IndexShard; @@ -16,8 +17,9 @@ /** * Publish Segment Replication Checkpoint. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class SegmentReplicationCheckpointPublisher { private final PublishAction publishAction; @@ -39,7 +41,10 @@ public void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { /** * Represents an action that is invoked to publish segment replication checkpoint to replica shard + * + * @opensearch.api */ + @PublicApi(since = "2.2.0") public interface PublishAction { void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java index a6aa39e7cb074..3b7ae2af80ca0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -8,7 +8,6 @@ package org.opensearch.indices.replication.common; -import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; @@ -38,7 +37,6 @@ public class CopyState extends AbstractRefCounted { private final ReplicationCheckpoint replicationCheckpoint; private final Map<String, StoreFileMetadata> metadataMap; private final byte[] infosBytes; - private GatedCloseable<IndexCommit> commitRef; private final IndexShard shard; public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShard shard) throws IOException { @@ -51,7 +49,6 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar this.replicationCheckpoint = latestSegmentInfosAndCheckpoint.v2(); SegmentInfos segmentInfos = this.segmentInfosRef.get(); this.metadataMap = shard.store().getSegmentMetadataMap(segmentInfos); - this.commitRef = shard.acquireLastIndexCommit(false); ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); // resource description and name are not used, but resource description cannot be null @@ -65,10 +62,6 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar protected void closeInternal() { try { segmentInfosRef.close(); - // commitRef may be null if there were no pending delete files - if (commitRef != null) { - commitRef.close(); - } } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index 431e35906702a..4950b5da2915f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -38,9 +38,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java index f8f08dbe7a452..a039f9b860153 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationFailedException.java @@ -10,17 +10,19 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import java.io.IOException; /** * Exception thrown if replication fails * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public class ReplicationFailedException extends OpenSearchException { public ReplicationFailedException(IndexShard shard, Throwable cause) { diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java index 9a9690581ed24..cc52d2bafc614 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationLuceneIndex.java @@ -8,16 +8,16 @@ package org.opensearch.indices.replication.common; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.indices.recovery.RecoveryState; import java.io.IOException; @@ -33,8 +33,9 @@ * Represents the Lucene Index (set of files on a single shard) involved * in the replication process. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ReplicationLuceneIndex extends ReplicationTimer implements ToXContentFragment, Writeable { private final FilesDetails filesDetails; @@ -296,7 +297,7 @@ public synchronized String toString() { builder.startObject(); toXContent(builder, EMPTY_PARAMS); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } catch (IOException e) { return "{ \"error\" : \"" + e.getMessage() + "\"}"; } @@ -394,8 +395,9 @@ public boolean isComplete() { /** * Metadata about a file * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class FileMetadata implements ToXContentObject, Writeable { private String name; private long length; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 0b0d20fc9f17e..3775be7b6da15 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -32,10 +32,10 @@ package org.opensearch.indices.replication.common; -import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ListenableFuture; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.seqno.LocalCheckpointTracker; import java.util.Collections; @@ -58,8 +58,7 @@ public class ReplicationRequestTracker { * This method will mark that a request with a unique sequence number has been received. If this is the * first time the unique request has been received, this method will return a listener to be completed. * The caller should then perform the requested action and complete the returned listener. - * - * + * <p> * If the unique request has already been received, this method will either complete the provided listener * or attach that listener to the listener returned in the first call. In this case, the method will * return null and the caller should not perform the requested action as a prior caller is already diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java index 029fcb6a3b690..e783e72a3c5c3 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java @@ -8,11 +8,14 @@ package org.opensearch.indices.replication.common; +import org.opensearch.common.annotation.PublicApi; + /** * Represents a state object used to track copying of segments from an external source * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.2.0") public interface ReplicationState { ReplicationLuceneIndex getIndex(); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 0c96a87715014..aac59df4f6573 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -12,23 +12,23 @@ import org.apache.lucene.store.RateLimiter; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.RecoveryTransportRequest; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; @@ -91,6 +91,9 @@ public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIn // make sure the store is not released until we are done. this.cancellableThreads = new CancellableThreads(); store.incRef(); + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().incRef(); + } } public long getId() { @@ -278,6 +281,12 @@ public abstract void writeFileChunk( ); protected void closeInternal() { - store.decRef(); + try { + store.decRef(); + } finally { + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().decRef(); + } + } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java index 59c4e24ce3efa..d884e1676f2be 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTimer.java @@ -8,10 +8,11 @@ package org.opensearch.indices.replication.common; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; @@ -19,8 +20,9 @@ * A serializable timer that is used to measure the time taken for * file replication operations like recovery. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReplicationTimer implements Writeable { private long startTime = 0; private long startNanoTime = 0; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java new file mode 100644 index 0000000000000..c97edba72da0d --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Wrapper class for Replication Timer which also tracks time elapsed since the timer was created. + * Currently, this is being used to calculate + * 1. Replication Lag: Total time taken by replica to sync after primary refreshed. + * 2. Replication event time: Total time taken by replica to sync after primary published the checkpoint + * (excludes the time spent by primary for uploading the segments to remote store). + * + * @opensearch.internal + */ +public class SegmentReplicationLagTimer extends ReplicationTimer { + private long creationTime; + + public SegmentReplicationLagTimer() { + super(); + creationTime = System.nanoTime(); + } + + public SegmentReplicationLagTimer(StreamInput in) throws IOException { + super(in); + creationTime = in.readVLong(); + } + + @Override + public synchronized void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(creationTime); + } + + public long totalElapsedTime() { + return TimeValue.nsecToMSec(Math.max(System.nanoTime() - creationTime, 0)); + } +} diff --git a/server/src/main/java/org/opensearch/indices/store/IndicesStore.java b/server/src/main/java/org/opensearch/indices/store/IndicesStore.java index eaaf5198fba94..1efaca09204da 100644 --- a/server/src/main/java/org/opensearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/opensearch/indices/store/IndicesStore.java @@ -50,18 +50,19 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -69,7 +70,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java b/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java new file mode 100644 index 0000000000000..4ef4e91f7af8c --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.common.Nullable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.AsyncShardFetch; + +import java.io.IOException; + +/** + * This class contains information about the shard that needs to be sent as part of request in Transport Action implementing + * {@link AsyncShardFetch.Lister} to fetch shard information in async manner + * + * @opensearch.internal + */ +public class ShardAttributes implements Writeable { + private final ShardId shardId; + @Nullable + private final String customDataPath; + + public ShardAttributes(ShardId shardId, String customDataPath) { + this.shardId = shardId; + this.customDataPath = customDataPath; + } + + public ShardAttributes(StreamInput in) throws IOException { + shardId = new ShardId(in); + customDataPath = in.readString(); + } + + public ShardId getShardId() { + return shardId; + } + + /** + * Returns the custom data path that is used to look up information for this shard. + * Returns an empty string if no custom data path is used for this index. + * Returns null if custom data path information is not available (due to BWC). + */ + @Nullable + public String getCustomDataPath() { + return customDataPath; + } + + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeString(customDataPath); + } +} diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 22c5c923e6322..eeee5d8a409aa 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -32,9 +32,7 @@ package org.opensearch.indices.store; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; @@ -43,38 +41,29 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterName; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.gateway.AsyncShardFetch; -import org.opensearch.index.IndexService; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.seqno.ReplicationTracker; -import org.opensearch.index.seqno.RetentionLease; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.store.Store; -import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Collections; -import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; -import java.util.concurrent.TimeUnit; + +import static org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.listShardMetadataInternal; /** * Metadata for shard stores from a list of transport nodes @@ -125,7 +114,14 @@ public TransportNodesListShardStoreMetadata( } @Override - public void list(ShardId shardId, String customDataPath, DiscoveryNode[] nodes, ActionListener<NodesStoreFilesMetadata> listener) { + public void list( + Map<ShardId, ShardAttributes> shardAttributes, + DiscoveryNode[] nodes, + ActionListener<NodesStoreFilesMetadata> listener + ) { + assert shardAttributes.size() == 1 : "only one shard should be specified"; + final ShardId shardId = shardAttributes.keySet().iterator().next(); + final String customDataPath = shardAttributes.get(shardId).getCustomDataPath(); execute(new Request(shardId, customDataPath, nodes), listener); } @@ -159,166 +155,7 @@ protected NodeStoreFilesMetadata nodeOperation(NodeRequest request) { private StoreFilesMetadata listStoreMetadata(NodeRequest request) throws IOException { final ShardId shardId = request.getShardId(); - logger.trace("listing store meta data for {}", shardId); - long startTimeNS = System.nanoTime(); - boolean exists = false; - try { - IndexService indexService = indicesService.indexService(shardId.getIndex()); - if (indexService != null) { - IndexShard indexShard = indexService.getShardOrNull(shardId.id()); - if (indexShard != null) { - try { - final StoreFilesMetadata storeFilesMetadata = new StoreFilesMetadata( - shardId, - indexShard.snapshotStoreMetadata(), - indexShard.getPeerRecoveryRetentionLeases() - ); - exists = true; - return storeFilesMetadata; - } catch (org.apache.lucene.index.IndexNotFoundException e) { - logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } catch (IOException e) { - logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } - } - } - final String customDataPath; - if (request.getCustomDataPath() != null) { - customDataPath = request.getCustomDataPath(); - } else { - // TODO: Fallback for BWC with older predecessor (ES) versions. - // Remove this once request.getCustomDataPath() always returns non-null - if (indexService != null) { - customDataPath = indexService.getIndexSettings().customDataPath(); - } else { - IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); - if (metadata != null) { - customDataPath = new IndexSettings(metadata, settings).customDataPath(); - } else { - logger.trace("{} node doesn't have meta data for the requests index", shardId); - throw new OpenSearchException("node doesn't have meta data for index " + shardId.getIndex()); - } - } - } - final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); - if (shardPath == null) { - return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); - } - // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: - // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica - // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not - // reuse local resources. - final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( - shardPath.resolveIndex(), - shardId, - nodeEnv::shardLock, - logger - ); - // We use peer recovery retention leases from the primary for allocating replicas. We should always have retention leases when - // we refresh shard info after the primary has started. Hence, we can ignore retention leases if there is no active shard. - return new StoreFilesMetadata(shardId, metadataSnapshot, Collections.emptyList()); - } finally { - TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); - if (exists) { - logger.debug("{} loaded store meta data (took [{}])", shardId, took); - } else { - logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took); - } - } - } - - /** - * Metadata for store files - * - * @opensearch.internal - */ - public static class StoreFilesMetadata implements Iterable<StoreFileMetadata>, Writeable { - private final ShardId shardId; - private final Store.MetadataSnapshot metadataSnapshot; - private final List<RetentionLease> peerRecoveryRetentionLeases; - - public StoreFilesMetadata( - ShardId shardId, - Store.MetadataSnapshot metadataSnapshot, - List<RetentionLease> peerRecoveryRetentionLeases - ) { - this.shardId = shardId; - this.metadataSnapshot = metadataSnapshot; - this.peerRecoveryRetentionLeases = peerRecoveryRetentionLeases; - } - - public StoreFilesMetadata(StreamInput in) throws IOException { - this.shardId = new ShardId(in); - this.metadataSnapshot = new Store.MetadataSnapshot(in); - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardId.writeTo(out); - metadataSnapshot.writeTo(out); - out.writeList(peerRecoveryRetentionLeases); - } - - public ShardId shardId() { - return this.shardId; - } - - public boolean isEmpty() { - return metadataSnapshot.size() == 0; - } - - @Override - public Iterator<StoreFileMetadata> iterator() { - return metadataSnapshot.iterator(); - } - - public boolean fileExists(String name) { - return metadataSnapshot.asMap().containsKey(name); - } - - public StoreFileMetadata file(String name) { - return metadataSnapshot.asMap().get(name); - } - - /** - * Returns the retaining sequence number of the peer recovery retention lease for a given node if exists; otherwise, returns -1. - */ - public long getPeerRecoveryRetentionLeaseRetainingSeqNo(DiscoveryNode node) { - assert node != null; - final String retentionLeaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()); - return peerRecoveryRetentionLeases.stream() - .filter(lease -> lease.id().equals(retentionLeaseId)) - .mapToLong(RetentionLease::retainingSequenceNumber) - .findFirst() - .orElse(-1L); - } - - public List<RetentionLease> peerRecoveryRetentionLeases() { - return peerRecoveryRetentionLeases; - } - - /** - * @return commit sync id if exists, else null - */ - public String syncId() { - return metadataSnapshot.getSyncId(); - } - - @Override - public String toString() { - return "StoreFilesMetadata{" - + ", shardId=" - + shardId - + ", metadataSnapshot{size=" - + metadataSnapshot.size() - + ", syncId=" - + metadataSnapshot.getSyncId() - + "}" - + '}'; - } + return listShardMetadataInternal(logger, shardId, nodeEnv, indicesService, request.getCustomDataPath(), settings, clusterService); } /** diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java new file mode 100644 index 0000000000000..3f151fe1c5ca0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataBatch.java @@ -0,0 +1,346 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.AsyncShardFetch; +import org.opensearch.index.store.Store; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.INDEX_NOT_FOUND; + +/** + * Transport action for fetching the batch of shard stores Metadata from a list of transport nodes + * + * @opensearch.internal + */ +public class TransportNodesListShardStoreMetadataBatch extends TransportNodesAction< + TransportNodesListShardStoreMetadataBatch.Request, + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch, + TransportNodesListShardStoreMetadataBatch.NodeRequest, + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch> + implements + AsyncShardFetch.Lister< + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch, + TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadataBatch> { + + public static final String ACTION_NAME = "internal:cluster/nodes/indices/shard/store/batch"; + public static final ActionType<TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch> TYPE = new ActionType<>( + ACTION_NAME, + TransportNodesListShardStoreMetadataBatch.NodesStoreFilesMetadataBatch::new + ); + + private final Settings settings; + private final IndicesService indicesService; + private final NodeEnvironment nodeEnv; + + @Inject + public TransportNodesListShardStoreMetadataBatch( + Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + NodeEnvironment nodeEnv, + ActionFilters actionFilters + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + Request::new, + NodeRequest::new, + ThreadPool.Names.FETCH_SHARD_STORE, + NodeStoreFilesMetadataBatch.class + ); + this.settings = settings; + this.indicesService = indicesService; + this.nodeEnv = nodeEnv; + } + + @Override + public void list( + Map<ShardId, ShardAttributes> shardAttributes, + DiscoveryNode[] nodes, + ActionListener<NodesStoreFilesMetadataBatch> listener + ) { + execute(new TransportNodesListShardStoreMetadataBatch.Request(shardAttributes, nodes), listener); + } + + @Override + protected NodeRequest newNodeRequest(Request request) { + return new NodeRequest(request); + } + + @Override + protected NodeStoreFilesMetadataBatch newNodeResponse(StreamInput in) throws IOException { + return new NodeStoreFilesMetadataBatch(in); + } + + @Override + protected NodesStoreFilesMetadataBatch newResponse( + Request request, + List<NodeStoreFilesMetadataBatch> responses, + List<FailedNodeException> failures + ) { + return new NodesStoreFilesMetadataBatch(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeStoreFilesMetadataBatch nodeOperation(NodeRequest request) { + try { + return new NodeStoreFilesMetadataBatch(clusterService.localNode(), listStoreMetadata(request)); + } catch (IOException e) { + throw new OpenSearchException( + "Failed to list store metadata for shards [" + request.getShardAttributes().keySet().stream().map(ShardId::toString) + "]", + e + ); + } + } + + /** + * This method is similar to listStoreMetadata method of {@link TransportNodesListShardStoreMetadata} + * In this case we fetch the shard store files for batch of shards instead of one shard. + */ + private Map<ShardId, NodeStoreFilesMetadata> listStoreMetadata(NodeRequest request) throws IOException { + Map<ShardId, NodeStoreFilesMetadata> shardStoreMetadataMap = new HashMap<ShardId, NodeStoreFilesMetadata>(); + for (Map.Entry<ShardId, ShardAttributes> shardAttributes : request.getShardAttributes().entrySet()) { + final ShardId shardId = shardAttributes.getKey(); + try { + StoreFilesMetadata storeFilesMetadata = TransportNodesListShardStoreMetadataHelper.listShardMetadataInternal( + logger, + shardId, + nodeEnv, + indicesService, + shardAttributes.getValue().getCustomDataPath(), + settings, + clusterService + ); + shardStoreMetadataMap.put(shardId, new NodeStoreFilesMetadata(storeFilesMetadata, null)); + } catch (Exception e) { + // should return null in case of known exceptions being returned from listShardMetadataInternal method. + if (e.getMessage().contains(INDEX_NOT_FOUND)) { + shardStoreMetadataMap.put(shardId, null); + } else { + // return actual exception as it is for unknown exceptions + shardStoreMetadataMap.put( + shardId, + new NodeStoreFilesMetadata( + new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()), + e + ) + ); + } + } + } + return shardStoreMetadataMap; + } + + /** + * Request is used in constructing the request for making the transport request to set of other node. + * Refer {@link TransportNodesAction} class start method. + * + * @opensearch.internal + */ + public static class Request extends BaseNodesRequest<Request> { + + private final Map<ShardId, ShardAttributes> shardAttributes; + + public Request(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public Request(Map<ShardId, ShardAttributes> shardAttributes, DiscoveryNode[] nodes) { + super(nodes); + this.shardAttributes = Objects.requireNonNull(shardAttributes); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + + /** + * Metadata for the nodes store files + * + * @opensearch.internal + */ + public static class NodesStoreFilesMetadataBatch extends BaseNodesResponse<NodeStoreFilesMetadataBatch> { + + public NodesStoreFilesMetadataBatch(StreamInput in) throws IOException { + super(in); + } + + public NodesStoreFilesMetadataBatch( + ClusterName clusterName, + List<NodeStoreFilesMetadataBatch> nodes, + List<FailedNodeException> failures + ) { + super(clusterName, nodes, failures); + } + + @Override + protected List<NodeStoreFilesMetadataBatch> readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeStoreFilesMetadataBatch::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List<NodeStoreFilesMetadataBatch> nodes) throws IOException { + out.writeList(nodes); + } + } + + /** + * The metadata for the node store files + * + * @opensearch.internal + */ + public static class NodeStoreFilesMetadata { + + private StoreFilesMetadata storeFilesMetadata; + private Exception storeFileFetchException; + + public NodeStoreFilesMetadata(StoreFilesMetadata storeFilesMetadata) { + this.storeFilesMetadata = storeFilesMetadata; + this.storeFileFetchException = null; + } + + public NodeStoreFilesMetadata(StreamInput in) throws IOException { + storeFilesMetadata = new StoreFilesMetadata(in); + if (in.readBoolean()) { + this.storeFileFetchException = in.readException(); + } else { + this.storeFileFetchException = null; + } + } + + public NodeStoreFilesMetadata(StoreFilesMetadata storeFilesMetadata, Exception storeFileFetchException) { + this.storeFilesMetadata = storeFilesMetadata; + this.storeFileFetchException = storeFileFetchException; + } + + public StoreFilesMetadata storeFilesMetadata() { + return storeFilesMetadata; + } + + public void writeTo(StreamOutput out) throws IOException { + storeFilesMetadata.writeTo(out); + if (storeFileFetchException != null) { + out.writeBoolean(true); + out.writeException(storeFileFetchException); + } else { + out.writeBoolean(false); + } + } + + public Exception getStoreFileFetchException() { + return storeFileFetchException; + } + + @Override + public String toString() { + return "[[" + storeFilesMetadata + "]]"; + } + } + + /** + * NodeRequest class is for deserializing the request received by this node from other node for this transport action. + * This is used in {@link TransportNodesAction} + * @opensearch.internal + */ + public static class NodeRequest extends TransportRequest { + + private final Map<ShardId, ShardAttributes> shardAttributes; + + public NodeRequest(StreamInput in) throws IOException { + super(in); + shardAttributes = in.readMap(ShardId::new, ShardAttributes::new); + } + + public NodeRequest(Request request) { + this.shardAttributes = Objects.requireNonNull(request.getShardAttributes()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(shardAttributes, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + + public Map<ShardId, ShardAttributes> getShardAttributes() { + return shardAttributes; + } + } + + /** + * NodeStoreFilesMetadataBatch Response received by the node from other node for this transport action. + * Refer {@link TransportNodesAction} + */ + public static class NodeStoreFilesMetadataBatch extends BaseNodeResponse { + private final Map<ShardId, NodeStoreFilesMetadata> nodeStoreFilesMetadataBatch; + + protected NodeStoreFilesMetadataBatch(StreamInput in) throws IOException { + super(in); + this.nodeStoreFilesMetadataBatch = in.readMap(ShardId::new, NodeStoreFilesMetadata::new); + } + + public NodeStoreFilesMetadataBatch(DiscoveryNode node, Map<ShardId, NodeStoreFilesMetadata> nodeStoreFilesMetadataBatch) { + super(node); + this.nodeStoreFilesMetadataBatch = nodeStoreFilesMetadataBatch; + } + + public Map<ShardId, NodeStoreFilesMetadata> getNodeStoreFilesMetadataBatch() { + return this.nodeStoreFilesMetadataBatch; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(nodeStoreFilesMetadataBatch, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + } + } + +} diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java new file mode 100644 index 0000000000000..74b04d6c6d494 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadataHelper.java @@ -0,0 +1,221 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.IndicesService; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * This class has the common code used in {@link TransportNodesListShardStoreMetadata} and + * {@link TransportNodesListShardStoreMetadataBatch} to get the shard info on the local node. + * <p> + * This class should not be used to add more functions and will be removed when the + * {@link TransportNodesListShardStoreMetadata} will be deprecated and all the code will be moved to + * {@link TransportNodesListShardStoreMetadataBatch} + * + * @opensearch.internal + */ +public class TransportNodesListShardStoreMetadataHelper { + + public static final String INDEX_NOT_FOUND = "node doesn't have meta data for index "; + + public static StoreFilesMetadata listShardMetadataInternal( + Logger logger, + final ShardId shardId, + NodeEnvironment nodeEnv, + IndicesService indicesService, + String customDataPath, + Settings settings, + ClusterService clusterService + ) throws IOException { + logger.trace("listing store meta data for {}", shardId); + long startTimeNS = System.nanoTime(); + boolean exists = false; + try { + IndexService indexService = indicesService.indexService(shardId.getIndex()); + if (indexService != null) { + IndexShard indexShard = indexService.getShardOrNull(shardId.id()); + if (indexShard != null) { + try { + final StoreFilesMetadata storeFilesMetadata = new StoreFilesMetadata( + shardId, + indexShard.snapshotStoreMetadata(), + indexShard.getPeerRecoveryRetentionLeases() + ); + exists = true; + return storeFilesMetadata; + } catch (org.apache.lucene.index.IndexNotFoundException e) { + logger.trace(new ParameterizedMessage("[{}] node is missing index, responding with empty", shardId), e); + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } catch (IOException e) { + logger.warn(new ParameterizedMessage("[{}] can't read metadata from store, responding with empty", shardId), e); + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } + } + } + if (customDataPath == null) { + // TODO: Fallback for BWC with older predecessor (ES) versions. + // Remove this once request.getCustomDataPath() always returns non-null + if (indexService != null) { + customDataPath = indexService.getIndexSettings().customDataPath(); + } else { + IndexMetadata metadata = clusterService.state().metadata().index(shardId.getIndex()); + if (metadata != null) { + customDataPath = new IndexSettings(metadata, settings).customDataPath(); + } else { + logger.trace("{} node doesn't have meta data for the requests index", shardId); + throw new OpenSearchException(INDEX_NOT_FOUND + shardId.getIndex()); + } + } + } + final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath == null) { + return new StoreFilesMetadata(shardId, Store.MetadataSnapshot.EMPTY, Collections.emptyList()); + } + // note that this may fail if it can't get access to the shard lock. Since we check above there is an active shard, this means: + // 1) a shard is being constructed, which means the cluster-manager will not use a copy of this replica + // 2) A shard is shutting down and has not cleared it's content within lock timeout. In this case the cluster-manager may not + // reuse local resources. + final Store.MetadataSnapshot metadataSnapshot = Store.readMetadataSnapshot( + shardPath.resolveIndex(), + shardId, + nodeEnv::shardLock, + logger + ); + // We use peer recovery retention leases from the primary for allocating replicas. We should always have retention leases when + // we refresh shard info after the primary has started. Hence, we can ignore retention leases if there is no active shard. + return new StoreFilesMetadata(shardId, metadataSnapshot, Collections.emptyList()); + } finally { + TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); + if (exists) { + logger.debug("{} loaded store meta data (took [{}])", shardId, took); + } else { + logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took); + } + } + } + + /** + * Metadata for store files + * + * @opensearch.internal + */ + public static class StoreFilesMetadata implements Iterable<StoreFileMetadata>, Writeable { + private final ShardId shardId; + private final Store.MetadataSnapshot metadataSnapshot; + private final List<RetentionLease> peerRecoveryRetentionLeases; + + public StoreFilesMetadata( + ShardId shardId, + Store.MetadataSnapshot metadataSnapshot, + List<RetentionLease> peerRecoveryRetentionLeases + ) { + this.shardId = shardId; + this.metadataSnapshot = metadataSnapshot; + this.peerRecoveryRetentionLeases = peerRecoveryRetentionLeases; + } + + public StoreFilesMetadata(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + this.metadataSnapshot = new Store.MetadataSnapshot(in); + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + metadataSnapshot.writeTo(out); + out.writeList(peerRecoveryRetentionLeases); + } + + public ShardId shardId() { + return this.shardId; + } + + public boolean isEmpty() { + return metadataSnapshot.size() == 0; + } + + @Override + public Iterator<StoreFileMetadata> iterator() { + return metadataSnapshot.iterator(); + } + + public boolean fileExists(String name) { + return metadataSnapshot.asMap().containsKey(name); + } + + public StoreFileMetadata file(String name) { + return metadataSnapshot.asMap().get(name); + } + + /** + * Returns the retaining sequence number of the peer recovery retention lease for a given node if exists; otherwise, returns -1. + */ + public long getPeerRecoveryRetentionLeaseRetainingSeqNo(DiscoveryNode node) { + assert node != null; + final String retentionLeaseId = ReplicationTracker.getPeerRecoveryRetentionLeaseId(node.getId()); + return peerRecoveryRetentionLeases.stream() + .filter(lease -> lease.id().equals(retentionLeaseId)) + .mapToLong(RetentionLease::retainingSequenceNumber) + .findFirst() + .orElse(-1L); + } + + public List<RetentionLease> peerRecoveryRetentionLeases() { + return peerRecoveryRetentionLeases; + } + + /** + * @return commit sync id if exists, else null + */ + public String syncId() { + return metadataSnapshot.getSyncId(); + } + + @Override + public String toString() { + return "StoreFilesMetadata{" + + ", shardId=" + + shardId + + ", metadataSnapshot{size=" + + metadataSnapshot.size() + + ", syncId=" + + metadataSnapshot.getSyncId() + + "}" + + '}'; + } + } +} diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index 894d699aee7f5..a2c2137130587 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -32,23 +32,24 @@ package org.opensearch.ingest; -import java.io.IOException; -import java.io.InputStream; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.Nullable; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; import org.opensearch.script.ScriptType; import org.opensearch.script.TemplateScript; +import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -73,7 +74,7 @@ private ConfigurationUtils() {} /** * Returns and removes the specified optional property from the specified configuration map. - * + * <p> * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringProperty( @@ -88,7 +89,7 @@ public static String readOptionalStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type string an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -103,7 +104,7 @@ public static String readStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -140,7 +141,7 @@ private static String readString(String processorType, String processorTag, Stri /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -179,7 +180,7 @@ private static String readStringOrInt(String processorType, String processorTag, /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringOrIntProperty( @@ -227,7 +228,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -256,7 +257,7 @@ public static Integer readIntProperty( /** * Returns and removes the specified property from the specified configuration map. - * + * <p> * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -284,7 +285,7 @@ public static Double readDoubleProperty( /** * Returns and removes the specified property of type list from the specified configuration map. - * + * <p> * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. */ public static <T> List<T> readOptionalList( @@ -302,7 +303,7 @@ public static <T> List<T> readOptionalList( /** * Returns and removes the specified property of type list from the specified configuration map. - * + * <p> * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -332,7 +333,7 @@ private static <T> List<T> readList(String processorType, String processorTag, S /** * Returns and removes the specified property of type map from the specified configuration map. - * + * <p> * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -352,7 +353,7 @@ public static <T> Map<String, T> readMap( /** * Returns and removes the specified property of type map from the specified configuration map. - * + * <p> * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. */ public static <T> Map<String, T> readOptionalMap( @@ -386,6 +387,7 @@ private static <T> Map<String, T> readMap(String processorType, String processor /** * Returns and removes the specified property as an {@link Object} from the specified configuration map. + * If the property is missing an {@link OpenSearchParseException} is thrown */ public static Object readObject(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -395,6 +397,13 @@ public static Object readObject(String processorType, String processorTag, Map<S return value; } + /** + * Returns and removes the specified property as an {@link Object} from the specified configuration map. + */ + public static Object readOptionalObject(Map<String, Object> configuration, String propertyName) { + return configuration.remove(propertyName); + } + public static OpenSearchException newConfigurationException( String processorType, String processorTag, @@ -510,9 +519,11 @@ public static Processor readProcessor( Map<String, Processor.Factory> processorFactories, ScriptService scriptService, String type, - Object config + @Nullable Object config ) throws Exception { - if (config instanceof Map) { + if (config == null) { + throw newConfigurationException(type, null, null, "the config of processor [" + type + "] cannot be null"); + } else if (config instanceof Map) { return readProcessor(processorFactories, scriptService, type, (Map<String, Object>) config); } else if (config instanceof String && "script".equals(type)) { Map<String, Object> normalizedScript = new HashMap<>(1); @@ -527,8 +538,11 @@ public static Processor readProcessor( Map<String, Processor.Factory> processorFactories, ScriptService scriptService, String type, - Map<String, Object> config + @Nullable Map<String, Object> config ) throws Exception { + if (config == null) { + throw newConfigurationException(type, null, null, "expect the config of processor [" + type + "] to be map, but is null"); + } String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, IGNORE_FAILURE_KEY, false); @@ -578,7 +592,7 @@ private static Script extractConditional(Map<String, Object> config) throws IOEx try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(normalizeScript(scriptSource)); InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { return Script.parse(parser); diff --git a/server/src/main/java/org/opensearch/ingest/IngestDocument.java b/server/src/main/java/org/opensearch/ingest/IngestDocument.java index e0de0a9488ad9..d975b0014de1f 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/opensearch/ingest/IngestDocument.java @@ -33,6 +33,7 @@ package org.opensearch.ingest; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; @@ -752,10 +753,11 @@ public Map<String, Object> getSourceAndMetadata() { @SuppressWarnings("unchecked") public static <K, V> Map<K, V> deepCopyMap(Map<K, V> source) { + CollectionUtils.ensureNoSelfReferences(source, "IngestDocument: Self reference present in object."); return (Map<K, V>) deepCopy(source); } - private static Object deepCopy(Object value) { + public static Object deepCopy(Object value) { if (value instanceof Map) { Map<?, ?> mapValue = (Map<?, ?>) value; Map<Object, Object> copy = new HashMap<>(mapValue.size()); diff --git a/server/src/main/java/org/opensearch/ingest/IngestInfo.java b/server/src/main/java/org/opensearch/ingest/IngestInfo.java index 122aa4ab92002..ca359edb1715a 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestInfo.java +++ b/server/src/main/java/org/opensearch/ingest/IngestInfo.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 52ced9c051d14..2d4439e86461b 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -35,10 +35,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchParseException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.bulk.TransportBulkAction; import org.opensearch.action.index.IndexRequest; @@ -66,12 +65,14 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.service.ReportingService; import org.opensearch.env.Environment; import org.opensearch.gateway.GatewayService; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.analysis.AnalysisRegistry; -import org.opensearch.node.ReportingService; +import org.opensearch.indices.IndicesService; import org.opensearch.plugins.IngestPlugin; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; @@ -128,7 +129,8 @@ public IngestService( ScriptService scriptService, AnalysisRegistry analysisRegistry, List<IngestPlugin> ingestPlugins, - Client client + Client client, + IndicesService indicesService ) { this.clusterService = clusterService; this.scriptService = scriptService; @@ -143,7 +145,8 @@ public IngestService( (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC), this, client, - threadPool.generic()::execute + threadPool.generic()::execute, + indicesService ) ); this.threadPool = threadPool; @@ -695,7 +698,7 @@ public IngestStats stats() { /** * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. - * + * <p> * This is useful for components that are used by ingest processors, so that they have the opportunity to update * before these components get used by the ingest processor factory. */ diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 766fb9cd66777..2541cfbf4af77 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -34,6 +34,8 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.Nullable; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.script.ScriptService; import java.util.Arrays; import java.util.Collections; @@ -43,9 +45,6 @@ import java.util.function.BiConsumer; import java.util.function.LongSupplier; -import org.opensearch.common.metrics.OperationMetrics; -import org.opensearch.script.ScriptService; - /** * A pipeline is a list of {@link Processor} instances grouped under a unique id. * @@ -124,8 +123,8 @@ public static Pipeline create( /** * Modifies the data of a document to be indexed based on the processor this pipeline holds - * - * If <code>null</code> is returned then this document will be dropped and not indexed, otherwise + * <p> + * If {@code null} is returned then this document will be dropped and not indexed, otherwise * this document will be kept and indexed. */ public void execute(IngestDocument ingestDocument, BiConsumer<IngestDocument, Exception> handler) { diff --git a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java index 04892e4653065..477be3e74f1c7 100644 --- a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java @@ -35,18 +35,20 @@ import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Map; @@ -55,8 +57,9 @@ /** * Encapsulates a pipeline's id and configuration as a blob * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class PipelineConfiguration extends AbstractDiffable<PipelineConfiguration> implements ToXContentObject { private static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>("pipeline_config", true, Builder::new); @@ -141,7 +144,7 @@ public static PipelineConfiguration readFrom(StreamInput in) throws IOException return new PipelineConfiguration( in.readString(), in.readBytesReference(), - in.getVersion().onOrAfter(Version.V_3_0_0) ? in.readMediaType() : in.readEnum(XContentType.class) + in.getVersion().onOrAfter(Version.V_2_10_0) ? in.readMediaType() : in.readEnum(XContentType.class) ); } @@ -151,14 +154,14 @@ public static Diff<PipelineConfiguration> readDiffFrom(StreamInput in) throws IO @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/ingest/Processor.java b/server/src/main/java/org/opensearch/ingest/Processor.java index e0f196dfcb115..ecae1c139ea5e 100644 --- a/server/src/main/java/org/opensearch/ingest/Processor.java +++ b/server/src/main/java/org/opensearch/ingest/Processor.java @@ -36,6 +36,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.env.Environment; import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.indices.IndicesService; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.Scheduler; @@ -48,7 +49,7 @@ /** * A processor implementation may modify the data belonging to a document. * Whether changes are made and what exactly is modified is up to the implementation. - * + * <p> * Processors may get called concurrently and thus need to be thread-safe. * * @opensearch.internal @@ -57,7 +58,7 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. - * + * <p> * Expert method: only override this method if a processor implementation needs to make an asynchronous call, * otherwise just overwrite {@link #execute(IngestDocument)}. */ @@ -156,6 +157,8 @@ class Parameters { */ public final Client client; + public final IndicesService indicesService; + public Parameters( Environment env, ScriptService scriptService, @@ -165,7 +168,8 @@ public Parameters( BiFunction<Long, Runnable, Scheduler.ScheduledCancellable> scheduler, IngestService ingestService, Client client, - Consumer<Runnable> genericExecutor + Consumer<Runnable> genericExecutor, + IndicesService indicesService ) { this.env = env; this.scriptService = scriptService; @@ -176,6 +180,7 @@ public Parameters( this.ingestService = ingestService; this.client = client; this.genericExecutor = genericExecutor; + this.indicesService = indicesService; } } diff --git a/server/src/main/java/org/opensearch/ingest/ValueSource.java b/server/src/main/java/org/opensearch/ingest/ValueSource.java index 0ef7c3373596d..3463fb0f83b26 100644 --- a/server/src/main/java/org/opensearch/ingest/ValueSource.java +++ b/server/src/main/java/org/opensearch/ingest/ValueSource.java @@ -56,7 +56,7 @@ public interface ValueSource { /** * Returns a copy of the value this ValueSource holds and resolves templates if there're any. - * + * <p> * For immutable values only a copy of the reference to the value is made. * * @param model The model to be used when resolving any templates diff --git a/server/src/main/java/org/opensearch/lucene/queries/MinDocQuery.java b/server/src/main/java/org/opensearch/lucene/queries/MinDocQuery.java index 2af0237e800c0..5c904d8a7770d 100644 --- a/server/src/main/java/org/opensearch/lucene/queries/MinDocQuery.java +++ b/server/src/main/java/org/opensearch/lucene/queries/MinDocQuery.java @@ -86,7 +86,8 @@ public boolean equals(Object obj) { } @Override - public Query rewrite(IndexReader reader) throws IOException { + public Query rewrite(IndexSearcher searcher) throws IOException { + final IndexReader reader = searcher.getIndexReader(); if (Objects.equals(reader.getContext().id(), readerId) == false) { return new MinDocQuery(minDoc, reader.getContext().id()); } diff --git a/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java b/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java index e417a2eaa7cf4..600ba5b5a92d8 100644 --- a/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java +++ b/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.ScoreMode; @@ -77,7 +78,7 @@ public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) { this.reverseMuls = new int[numFields]; for (int i = 0; i < numFields; i++) { SortField sortField = sort.getSort()[i]; - FieldComparator<?> fieldComparator = sortField.getComparator(1, false); + FieldComparator<?> fieldComparator = sortField.getComparator(1, Pruning.NONE); @SuppressWarnings("unchecked") FieldComparator<Object> comparator = (FieldComparator<Object>) fieldComparator; comparator.setTopValue(after.fields[i]); diff --git a/server/src/main/java/org/opensearch/monitor/MonitorService.java b/server/src/main/java/org/opensearch/monitor/MonitorService.java index bed638484f7c0..ad02b18366b98 100644 --- a/server/src/main/java/org/opensearch/monitor/MonitorService.java +++ b/server/src/main/java/org/opensearch/monitor/MonitorService.java @@ -32,7 +32,7 @@ package org.opensearch.monitor; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.store.remote.filecache.FileCache; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsHealthService.java b/server/src/main/java/org/opensearch/monitor/fs/FsHealthService.java index 85a44aa905e08..4b0a79783885f 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsHealthService.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsHealthService.java @@ -35,10 +35,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; - import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index cfa29436f24a5..ddff022112665 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -34,10 +34,11 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -51,15 +52,17 @@ /** * FileSystem information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragment { /** * Path for the file system * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Path implements Writeable, ToXContentObject { String path; @@ -220,8 +223,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * The device status. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class DeviceStats implements Writeable, ToXContentFragment { final int majorDeviceNumber; @@ -235,6 +239,14 @@ public static class DeviceStats implements Writeable, ToXContentFragment { final long previousWritesCompleted; final long currentSectorsWritten; final long previousSectorsWritten; + final long currentReadTime; + final long previousReadTime; + final long currentWriteTime; + final long previousWriteTime; + final long currentQueueSize; + final long previousQueueSize; + final long currentIOTime; + final long previousIOTime; public DeviceStats( final int majorDeviceNumber, @@ -244,6 +256,10 @@ public DeviceStats( final long currentSectorsRead, final long currentWritesCompleted, final long currentSectorsWritten, + final long currentReadTime, + final long currentWriteTime, + final long currrentQueueSize, + final long currentIOTime, final DeviceStats previousDeviceStats ) { this( @@ -257,7 +273,15 @@ public DeviceStats( currentSectorsRead, previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1, currentWritesCompleted, - previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1 + previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1, + currentReadTime, + previousDeviceStats != null ? previousDeviceStats.currentReadTime : -1, + currentWriteTime, + previousDeviceStats != null ? previousDeviceStats.currentWriteTime : -1, + currrentQueueSize, + previousDeviceStats != null ? previousDeviceStats.currentQueueSize : -1, + currentIOTime, + previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1 ); } @@ -272,7 +296,15 @@ private DeviceStats( final long currentSectorsRead, final long previousSectorsRead, final long currentWritesCompleted, - final long previousWritesCompleted + final long previousWritesCompleted, + final long currentReadTime, + final long previousReadTime, + final long currentWriteTime, + final long previousWriteTime, + final long currentQueueSize, + final long previousQueueSize, + final long currentIOTime, + final long previousIOTime ) { this.majorDeviceNumber = majorDeviceNumber; this.minorDeviceNumber = minorDeviceNumber; @@ -285,6 +317,14 @@ private DeviceStats( this.previousSectorsRead = previousSectorsRead; this.currentSectorsWritten = currentSectorsWritten; this.previousSectorsWritten = previousSectorsWritten; + this.currentReadTime = currentReadTime; + this.previousReadTime = previousReadTime; + this.currentWriteTime = currentWriteTime; + this.previousWriteTime = previousWriteTime; + this.currentQueueSize = currentQueueSize; + this.previousQueueSize = previousQueueSize; + this.currentIOTime = currentIOTime; + this.previousIOTime = previousIOTime; } public DeviceStats(StreamInput in) throws IOException { @@ -299,6 +339,25 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + currentReadTime = in.readLong(); + previousReadTime = in.readLong(); + currentWriteTime = in.readLong(); + previousWriteTime = in.readLong(); + currentQueueSize = in.readLong(); + previousQueueSize = in.readLong(); + currentIOTime = in.readLong(); + previousIOTime = in.readLong(); + } else { + currentReadTime = 0; + previousReadTime = 0; + currentWriteTime = 0; + previousWriteTime = 0; + currentQueueSize = 0; + previousQueueSize = 0; + currentIOTime = 0; + previousIOTime = 0; + } } @Override @@ -314,6 +373,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(currentReadTime); + out.writeLong(previousReadTime); + out.writeLong(currentWriteTime); + out.writeLong(previousWriteTime); + out.writeLong(currentQueueSize); + out.writeLong(previousQueueSize); + out.writeLong(currentIOTime); + out.writeLong(previousIOTime); + } } public long operations() { @@ -346,24 +415,65 @@ public long writeKilobytes() { return (currentSectorsWritten - previousSectorsWritten) / 2; } + /** + * Total time taken for all read operations + */ + public long readTime() { + if (previousReadTime == -1) return -1; + return currentReadTime - previousReadTime; + } + + /** + * Total time taken for all write operations + */ + public long writeTime() { + if (previousWriteTime == -1) return -1; + return currentWriteTime - previousWriteTime; + } + + /** + * Queue size based on weighted time spent doing I/Os + */ + public long queueSize() { + if (previousQueueSize == -1) return -1; + return currentQueueSize - previousQueueSize; + } + + /** + * Total time spent doing I/Os + */ + public long ioTimeInMillis() { + if (previousIOTime == -1) return -1; + + return (currentIOTime - previousIOTime); + } + + public String getDeviceName() { + return deviceName; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("device_name", deviceName); + builder.field("device_name", getDeviceName()); builder.field(IoStats.OPERATIONS, operations()); builder.field(IoStats.READ_OPERATIONS, readOperations()); builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); builder.field(IoStats.READ_KILOBYTES, readKilobytes()); builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes()); + builder.field(IoStats.READ_TIME, readTime()); + builder.field(IoStats.WRITE_TIME, writeTime()); + builder.field(IoStats.QUEUE_SIZE, queueSize()); + builder.field(IoStats.IO_TIME_MS, ioTimeInMillis()); return builder; } - } /** * The I/O statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IoStats implements Writeable, ToXContentFragment { private static final String OPERATIONS = "operations"; @@ -371,6 +481,10 @@ public static class IoStats implements Writeable, ToXContentFragment { private static final String WRITE_OPERATIONS = "write_operations"; private static final String READ_KILOBYTES = "read_kilobytes"; private static final String WRITE_KILOBYTES = "write_kilobytes"; + private static final String READ_TIME = "read_time"; + private static final String WRITE_TIME = "write_time"; + private static final String QUEUE_SIZE = "queue_size"; + private static final String IO_TIME_MS = "io_time_in_millis"; final DeviceStats[] devicesStats; final long totalOperations; @@ -378,6 +492,10 @@ public static class IoStats implements Writeable, ToXContentFragment { final long totalWriteOperations; final long totalReadKilobytes; final long totalWriteKilobytes; + final long totalReadTime; + final long totalWriteTime; + final long totalQueueSize; + final long totalIOTimeInMillis; public IoStats(final DeviceStats[] devicesStats) { this.devicesStats = devicesStats; @@ -386,18 +504,30 @@ public IoStats(final DeviceStats[] devicesStats) { long totalWriteOperations = 0; long totalReadKilobytes = 0; long totalWriteKilobytes = 0; + long totalReadTime = 0; + long totalWriteTime = 0; + long totalQueueSize = 0; + long totalIOTimeInMillis = 0; for (DeviceStats deviceStats : devicesStats) { totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0; totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0; totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0; totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0; totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0; + totalReadTime += deviceStats.readTime() != -1 ? deviceStats.readTime() : 0; + totalWriteTime += deviceStats.writeTime() != -1 ? deviceStats.writeTime() : 0; + totalQueueSize += deviceStats.queueSize() != -1 ? deviceStats.queueSize() : 0; + totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0; } this.totalOperations = totalOperations; this.totalReadOperations = totalReadOperations; this.totalWriteOperations = totalWriteOperations; this.totalReadKilobytes = totalReadKilobytes; this.totalWriteKilobytes = totalWriteKilobytes; + this.totalReadTime = totalReadTime; + this.totalWriteTime = totalWriteTime; + this.totalQueueSize = totalQueueSize; + this.totalIOTimeInMillis = totalIOTimeInMillis; } public IoStats(StreamInput in) throws IOException { @@ -412,6 +542,17 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalReadTime = in.readLong(); + this.totalWriteTime = in.readLong(); + this.totalQueueSize = in.readLong(); + this.totalIOTimeInMillis = in.readLong(); + } else { + this.totalReadTime = 0; + this.totalWriteTime = 0; + this.totalQueueSize = 0; + this.totalIOTimeInMillis = 0; + } } @Override @@ -425,6 +566,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(totalReadTime); + out.writeLong(totalWriteTime); + out.writeLong(totalQueueSize); + out.writeLong(totalIOTimeInMillis); + } } public DeviceStats[] getDevicesStats() { @@ -451,6 +598,34 @@ public long getTotalWriteKilobytes() { return totalWriteKilobytes; } + /** + * Sum of read time across all devices + */ + public long getTotalReadTime() { + return totalReadTime; + } + + /** + * Sum of write time across all devices + */ + public long getTotalWriteTime() { + return totalWriteTime; + } + + /** + * Sum of queue size across all devices + */ + public long getTotalQueueSize() { + return totalQueueSize; + } + + /** + * Sum of IO time across all devices + */ + public long getTotalIOTimeMillis() { + return totalIOTimeInMillis; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (devicesStats.length > 0) { @@ -468,11 +643,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + + builder.field(READ_TIME, totalReadTime); + builder.field(WRITE_TIME, totalWriteTime); + builder.field(QUEUE_SIZE, totalQueueSize); + builder.field(IO_TIME_MS, totalIOTimeInMillis); builder.endObject(); } return builder; } - } private final long timestamp; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index 50d1d981f3c98..f4731a4a34373 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -39,7 +39,7 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeEnvironment.NodePath; import org.opensearch.index.store.remote.filecache.FileCache; @@ -109,6 +109,25 @@ final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, List<FsInfo.DeviceStats> devicesStats = new ArrayList<>(); + /** + * The /proc/diskstats file displays the I/O statistics of block devices. + * Each line contains the following 14 fields: ( + additional fields ) + * + * 1 major number + * 2 minor number + * 3 device name + * 4 reads completed successfully + * 5 reads merged + * 6 sectors read + * 7 time spent reading (ms) + * 8 writes completed + * 9 writes merged + * 10 sectors written + * 11 time spent writing (ms) + * 12 I/Os currently in progress + * 13 time spent doing I/Os (ms) ---- IO use percent + * 14 weighted time spent doing I/Os (ms) ---- Queue size + */ List<String> lines = readProcDiskStats(); if (!lines.isEmpty()) { for (String line : lines) { @@ -123,6 +142,12 @@ final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, final long sectorsRead = Long.parseLong(fields[5]); final long writesCompleted = Long.parseLong(fields[7]); final long sectorsWritten = Long.parseLong(fields[9]); + // readTime and writeTime calculates the total read/write time taken for each request to complete + // ioTime calculates actual time queue and disks are busy + final long readTime = Long.parseLong(fields[6]); + final long writeTime = Long.parseLong(fields[10]); + final long ioTime = fields.length > 12 ? Long.parseLong(fields[12]) : 0; + final long queueSize = fields.length > 13 ? Long.parseLong(fields[13]) : 0; final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -131,6 +156,10 @@ final FsInfo.IoStats ioStats(final Set<Tuple<Integer, Integer>> devicesNumbers, sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) ); devicesStats.add(deviceStats); diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmGcMonitorService.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmGcMonitorService.java index 7bdc74d9ae574..6481c21ffea48 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmGcMonitorService.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmGcMonitorService.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.monitor.jvm.JvmStats.GarbageCollector; import org.opensearch.threadpool.Scheduler.Cancellable; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 1acdf00ca6af0..e3290bfec6905 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -39,9 +39,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.lang.management.GarbageCollectorMXBean; @@ -171,7 +171,7 @@ public class JvmInfo implements ReportingService.Info { } final boolean bundledJdk = Booleans.parseBoolean(System.getProperty("opensearch.bundled_jdk", Boolean.FALSE.toString())); - final Boolean usingBundledJdk = bundledJdk ? usingBundledJdk() : null; + final Boolean usingBundledJdkOrJre = bundledJdk ? usingBundledJdkOrJre() : null; INSTANCE = new JvmInfo( JvmPid.getPid(), @@ -180,7 +180,7 @@ public class JvmInfo implements ReportingService.Info { runtimeMXBean.getVmVersion(), runtimeMXBean.getVmVendor(), bundledJdk, - usingBundledJdk, + usingBundledJdkOrJre, runtimeMXBean.getStartTime(), configuredInitialHeapSize, configuredMaxHeapSize, @@ -201,7 +201,7 @@ public class JvmInfo implements ReportingService.Info { } @SuppressForbidden(reason = "PathUtils#get") - private static boolean usingBundledJdk() { + private static boolean usingBundledJdkOrJre() { /* * We are using the bundled JDK if java.home is the jdk sub-directory of our working directory. This is because we always set * the working directory of Elasticsearch to home, and the bundled JDK is in the jdk sub-directory there. @@ -211,10 +211,12 @@ private static boolean usingBundledJdk() { if (Constants.MAC_OS_X) { return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk.app/Contents/Home").toAbsolutePath()); } else { - return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk").toAbsolutePath()); + return PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jre").toAbsolutePath()) + || PathUtils.get(javaHome).equals(PathUtils.get(userDir).resolve("jdk").toAbsolutePath()); } } + @SuppressWarnings("removal") public static JvmInfo jvmInfo() { SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmService.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmService.java index d59120063e338..548a5fb3650c1 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmService.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmService.java @@ -38,7 +38,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.node.ReportingService; +import org.opensearch.core.service.ReportingService; /** * Service for monitoring the JVM diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmStats.java index 5451821a565cd..61590acbe5a53 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmStats.java @@ -32,11 +32,11 @@ package org.opensearch.monitor.jvm; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/monitor/os/OsInfo.java b/server/src/main/java/org/opensearch/monitor/os/OsInfo.java index cc19e4a82aed8..98bad3f406405 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsInfo.java @@ -32,11 +32,11 @@ package org.opensearch.monitor.os; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 98229941252ba..a0a14372aa31a 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -59,13 +59,13 @@ /** * The {@link OsProbe} class retrieves information about the physical and swap size of the machine * memory, as well as the system load average and cpu load. - * + * <p> * In some exceptional cases, it's possible the underlying native methods used by * {@link #getFreePhysicalMemorySize()}, {@link #getTotalPhysicalMemorySize()}, * {@link #getFreeSwapSpaceSize()}, and {@link #getTotalSwapSpaceSize()} can return a * negative value. Because of this, we prevent those methods from returning negative values, * returning 0 instead. - * + * <p> * The OS can report a negative number in a number of cases: * - Non-supported OSes (HP-UX, or AIX) * - A failure of macOS to initialize host statistics @@ -183,11 +183,11 @@ public long getTotalSwapSpaceSize() { /** * The system load averages as an array. - * + * <p> * On Windows, this method returns {@code null}. - * + * <p> * On Linux, this method returns the 1, 5, and 15-minute load averages. - * + * <p> * On macOS, this method should return the 1-minute load average. * * @return the available system load averages or {@code null} diff --git a/server/src/main/java/org/opensearch/monitor/os/OsService.java b/server/src/main/java/org/opensearch/monitor/os/OsService.java index 00e4bb1365b71..5ae2f4f990252 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsService.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsService.java @@ -40,7 +40,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.SingleObjectCache; import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.node.ReportingService; +import org.opensearch.core.service.ReportingService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/monitor/os/OsStats.java b/server/src/main/java/org/opensearch/monitor/os/OsStats.java index c684bf10c4a03..cdcec733cb086 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsStats.java @@ -34,10 +34,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,8 +49,9 @@ /** * Holds stats for the Operating System * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class OsStats implements Writeable, ToXContentFragment { private final long timestamp; @@ -143,8 +145,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * CPU Information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Cpu implements Writeable, ToXContentFragment { private final short percent; @@ -208,8 +211,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Swap information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Swap implements Writeable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Swap.class); @@ -276,8 +280,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * OS Memory information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Mem implements Writeable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Mem.class); @@ -353,8 +358,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Encapsulates basic cgroup statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Cgroup implements Writeable, ToXContentFragment { private final String cpuAcctControlGroup; @@ -546,8 +552,9 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa /** * Encapsulates CPU time statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class CpuStat implements Writeable, ToXContentFragment { private final long numberOfElapsedPeriods; diff --git a/server/src/main/java/org/opensearch/monitor/process/ProcessInfo.java b/server/src/main/java/org/opensearch/monitor/process/ProcessInfo.java index 8664deee63d3e..d2090e897e851 100644 --- a/server/src/main/java/org/opensearch/monitor/process/ProcessInfo.java +++ b/server/src/main/java/org/opensearch/monitor/process/ProcessInfo.java @@ -32,11 +32,11 @@ package org.opensearch.monitor.process; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/monitor/process/ProcessService.java b/server/src/main/java/org/opensearch/monitor/process/ProcessService.java index 538f546bd98ee..c659724edf24f 100644 --- a/server/src/main/java/org/opensearch/monitor/process/ProcessService.java +++ b/server/src/main/java/org/opensearch/monitor/process/ProcessService.java @@ -39,7 +39,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.SingleObjectCache; -import org.opensearch.node.ReportingService; +import org.opensearch.core.service.ReportingService; /** * The service for the process diff --git a/server/src/main/java/org/opensearch/monitor/process/ProcessStats.java b/server/src/main/java/org/opensearch/monitor/process/ProcessStats.java index de49c07a5600f..a931f5efdaed2 100644 --- a/server/src/main/java/org/opensearch/monitor/process/ProcessStats.java +++ b/server/src/main/java/org/opensearch/monitor/process/ProcessStats.java @@ -32,11 +32,11 @@ package org.opensearch.monitor.process; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/node/AdaptiveSelectionStats.java b/server/src/main/java/org/opensearch/node/AdaptiveSelectionStats.java index 9d06d0c2cfe20..eea3e63830227 100644 --- a/server/src/main/java/org/opensearch/node/AdaptiveSelectionStats.java +++ b/server/src/main/java/org/opensearch/node/AdaptiveSelectionStats.java @@ -32,11 +32,12 @@ package org.opensearch.node; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -52,8 +53,9 @@ * EWMA of queue size, service time, and response time, as well as outgoing * searches to each node and the "rank" based on the ARS formula. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AdaptiveSelectionStats implements Writeable, ToXContentFragment { private final Map<String, Long> clientOutgoingConnections; diff --git a/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java b/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java index 4038ae56d6ffe..3b9bd6a05dbf9 100644 --- a/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java +++ b/server/src/main/java/org/opensearch/node/InternalSettingsPreparer.java @@ -32,6 +32,12 @@ package org.opensearch.node; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.env.Environment; + import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -39,14 +45,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.function.Supplier; import java.util.function.Function; - -import org.opensearch.Version; -import org.opensearch.cluster.ClusterName; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.env.Environment; +import java.util.function.Supplier; /** * Prepares internal settings diff --git a/server/src/main/java/org/opensearch/node/IoUsageStats.java b/server/src/main/java/org/opensearch/node/IoUsageStats.java new file mode 100644 index 0000000000000..ecb1ac1bb1de4 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/IoUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; + +/** + * This class is to store tne IO Usage Stats and used to return in node stats API. + */ +public class IoUsageStats implements Writeable, ToXContentFragment { + + private double ioUtilisationPercent; + + public IoUsageStats(double ioUtilisationPercent) { + this.ioUtilisationPercent = ioUtilisationPercent; + } + + /** + * + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the StreamOutput + */ + public IoUsageStats(StreamInput in) throws IOException { + this.ioUtilisationPercent = in.readDouble(); + } + + /** + * Write this into the {@linkplain StreamOutput}. + * + * @param out the output stream to write entity content to + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(this.ioUtilisationPercent); + } + + public double getIoUtilisationPercent() { + return ioUtilisationPercent; + } + + public void setIoUtilisationPercent(double ioUtilisationPercent) { + this.ioUtilisationPercent = ioUtilisationPercent; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("max_io_utilization_percent", String.format(Locale.ROOT, "%.1f", this.ioUtilisationPercent)); + return builder.endObject(); + } + + @Override + public String toString() { + return "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", this.ioUtilisationPercent); + } +} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d768165451a5a..3ef3ae4f6230e 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -35,44 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.opensearch.ExceptionsHelper; -import org.opensearch.common.SetOnce; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; -import org.opensearch.index.IndexModule; -import org.opensearch.index.IndexingPressureService; -import org.opensearch.index.store.remote.filecache.FileCache; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; -import org.opensearch.index.store.remote.filecache.FileCacheFactory; -import org.opensearch.indices.replication.SegmentReplicationSourceFactory; -import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.indices.replication.SegmentReplicationSourceService; -import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.NoopExtensionsManager; -import org.opensearch.monitor.fs.FsInfo; -import org.opensearch.monitor.fs.FsProbe; -import org.opensearch.plugins.ExtensionAwarePlugin; -import org.opensearch.plugins.SearchPipelinePlugin; -import org.opensearch.telemetry.tracing.NoopTracerFactory; -import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.TracerFactory; -import org.opensearch.search.backpressure.SearchBackpressureService; -import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; -import org.opensearch.search.pipeline.SearchPipelineService; -import org.opensearch.tasks.TaskCancellationMonitoringService; -import org.opensearch.tasks.TaskCancellationMonitoringSettings; -import org.opensearch.tasks.TaskResourceTrackingService; -import org.opensearch.tasks.consumer.TopNSearchTasksLogger; -import org.opensearch.threadpool.RunnableTaskExecutionListener; -import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; -import org.opensearch.telemetry.TelemetryModule; -import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.watcher.ResourceWatcherService; -import org.opensearch.core.Assertions; import org.opensearch.Build; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.Version; @@ -80,8 +44,13 @@ import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; +import org.opensearch.action.admin.indices.view.ViewService; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.action.search.SearchRequestSlowLog; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; @@ -97,6 +66,7 @@ import org.opensearch.cluster.InternalClusterInfoService; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.action.index.MappingUpdatedAction; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.AliasValidator; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -109,17 +79,20 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.BatchedRerouteService; import org.opensearch.cluster.routing.RerouteService; +import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.routing.allocation.DiskThresholdMonitor; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; import org.opensearch.common.StopWatch; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.inject.Injector; import org.opensearch.common.inject.Key; import org.opensearch.common.inject.Module; import org.opensearch.common.inject.ModulesBuilder; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.logging.HeaderWarning; import org.opensearch.common.logging.NodeAndClusterIdStateListener; @@ -132,31 +105,52 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.SettingUpgrader; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.NoopExtensionsManager; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.gateway.GatewayModule; import org.opensearch.gateway.GatewayService; import org.opensearch.gateway.MetaStateService; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.http.HttpServerTransport; import org.opensearch.identity.IdentityService; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; +import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheCleaner; +import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -164,28 +158,37 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.breaker.BreakerSettings; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.store.IndicesStore; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.monitor.fs.FsHealthService; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; import org.opensearch.persistent.PersistentTasksService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.AnalysisPlugin; +import org.opensearch.plugins.CachePlugin; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.plugins.EnginePlugin; +import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.plugins.IdentityPlugin; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.IngestPlugin; @@ -197,9 +200,12 @@ import org.opensearch.plugins.PluginsService; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.plugins.ScriptPlugin; +import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.transport.AdmissionControlTransportInterceptor; import org.opensearch.repositories.RepositoriesModule; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -210,7 +216,10 @@ import org.opensearch.search.SearchModule; import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.support.AggregationUsageService; +import org.opensearch.search.backpressure.SearchBackpressureService; +import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.search.fetch.FetchPhase; +import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.InternalSnapshotsInfoService; import org.opensearch.snapshots.RestoreService; @@ -218,17 +227,32 @@ import org.opensearch.snapshots.SnapshotsInfoService; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellationMonitoringService; +import org.opensearch.tasks.TaskCancellationMonitoringSettings; import org.opensearch.tasks.TaskCancellationService; +import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.TaskResultsService; +import org.opensearch.tasks.consumer.TopNSearchTasksLogger; +import org.opensearch.telemetry.TelemetryModule; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.MetricsRegistryFactory; +import org.opensearch.telemetry.metrics.NoopMetricsRegistryFactory; +import org.opensearch.telemetry.tracing.NoopTracerFactory; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.TracerFactory; import org.opensearch.threadpool.ExecutorBuilder; +import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; +import org.opensearch.watcher.ResourceWatcherService; import javax.net.ssl.SNIHostName; + import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; @@ -262,6 +286,8 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used @@ -380,9 +406,12 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final Tracer tracer; + + private final MetricsRegistry metricsRegistry; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference<RunnableTaskExecutionListener> runnableTaskListener; private FileCache fileCache; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -428,7 +457,7 @@ protected Node( Constants.JVM_VERSION ); if (jvmInfo.getBundledJdk()) { - logger.info("JVM home [{}], using bundled JDK [{}]", System.getProperty("java.home"), jvmInfo.getUsingBundledJdk()); + logger.info("JVM home [{}], using bundled JDK/JRE [{}]", System.getProperty("java.home"), jvmInfo.getUsingBundledJdk()); } else { logger.info("JVM home [{}]", System.getProperty("java.home")); deprecationLogger.deprecate( @@ -482,7 +511,7 @@ protected Node( for (ExtensionAwarePlugin extAwarePlugin : extensionAwarePlugins) { additionalSettings.addAll(extAwarePlugin.getExtensionSettings()); } - this.extensionsManager = new ExtensionsManager(additionalSettings); + this.extensionsManager = new ExtensionsManager(additionalSettings, identityService); } else { this.extensionsManager = new NoopExtensionsManager(); } @@ -502,7 +531,11 @@ protected Node( */ this.environment = new Environment(settings, initialEnvironment.configDir(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); Environment.assertEquivalent(initialEnvironment, this.environment); - nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + if (DiscoveryNode.isSearchNode(settings) == false) { + nodeEnvironment = new NodeEnvironment(tmpSettings, environment); + } else { + nodeEnvironment = new NodeEnvironment(settings, environment, new FileCacheCleaner(this::fileCache)); + } logger.info( "node name [{}], node ID [{}], cluster name [{}], roles {}", NODE_NAME_SETTING.get(tmpSettings), @@ -514,12 +547,15 @@ protected Node( .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); - localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); final List<ExecutorBuilder<?>> executorBuilders = pluginsService.getExecutorBuilders(settings); runnableTaskListener = new AtomicReference<>(); final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); + + final SetOnce<RepositoriesService> repositoriesServiceReference = new SetOnce<>(); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(repositoriesServiceReference::get, threadPool); + localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId(), remoteStoreNodeService); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -573,17 +609,38 @@ protected Node( new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() ); } - final IngestService ingestService = new IngestService( - clusterService, - threadPool, - this.environment, - scriptService, - analysisModule.getAnalysisRegistry(), - pluginsService.filterPlugins(IngestPlugin.class), - client - ); - final SetOnce<RepositoriesService> repositoriesServiceReference = new SetOnce<>(); + TracerFactory tracerFactory; + MetricsRegistryFactory metricsRegistryFactory; + if (FeatureFlags.isEnabled(TELEMETRY)) { + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { + List<TelemetryPlugin> telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + if (telemetrySettings.isTracingFeatureEnabled()) { + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + if (telemetrySettings.isMetricsFeatureEnabled()) { + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, telemetryModule.getTelemetry()); + } else { + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + + tracer = tracerFactory.getTracer(); + metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + resourcesToClose.add(tracer::close); + resourcesToClose.add(metricsRegistry::close); + final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -629,7 +686,6 @@ protected Node( ); // File cache will be initialized by the node once circuit breakers are in place. initializeFileCache(settings, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST)); - final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnvironment, fileCache); final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, fileCache); pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { @@ -667,6 +723,19 @@ protected Node( clusterService.getClusterSettings(), threadPool::relativeTimeInMillis ); + final RemoteClusterStateService remoteClusterStateService; + if (isRemoteStoreClusterStateEnabled(settings)) { + remoteClusterStateService = new RemoteClusterStateService( + nodeEnvironment.nodeId(), + repositoriesServiceReference::get, + settings, + clusterService.getClusterSettings(), + threadPool::preciseRelativeTimeInNanos, + threadPool + ); + } else { + remoteClusterStateService = null; + } // collect engine factory providers from plugins final Collection<EnginePlugin> enginePlugins = pluginsService.filterPlugins(EnginePlugin.class); @@ -716,11 +785,19 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool ); + final SearchRequestStats searchRequestStats = new SearchRequestStats(clusterService.getClusterSettings()); + final SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); + CacheModule cacheModule = new CacheModule(pluginsService.filterPlugins(CachePlugin.class), settings); + CacheService cacheService = cacheModule.getCacheService(); final IndicesService indicesService = new IndicesService( settings, pluginsService, @@ -744,7 +821,21 @@ protected Node( recoveryStateFactories, remoteDirectoryFactory, repositoriesServiceReference::get, - fileCacheCleaner + searchRequestStats, + remoteStoreStatsTrackerFactory, + recoverySettings, + cacheService + ); + + final IngestService ingestService = new IngestService( + clusterService, + threadPool, + this.environment, + scriptService, + analysisModule.getAnalysisRegistry(), + pluginsService.filterPlugins(IngestPlugin.class), + client, + indicesService ); final AliasValidator aliasValidator = new AliasValidator(); @@ -780,6 +871,8 @@ protected Node( metadataCreateIndexService ); + final ViewService viewService = new ViewService(clusterService, client, null); + Collection<Object> pluginComponents = pluginsService.filterPlugins(Plugin.class) .stream() .flatMap( @@ -799,6 +892,17 @@ protected Node( ) .collect(Collectors.toList()); + // register all standard SearchRequestOperationsCompositeListenerFactory to the SearchRequestOperationsCompositeListenerFactory + final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = + new SearchRequestOperationsCompositeListenerFactory( + Stream.concat( + Stream.of(searchRequestStats, searchRequestSlowLog), + pluginComponents.stream() + .filter(p -> p instanceof SearchRequestOperationsListener) + .map(p -> (SearchRequestOperationsListener) p) + ).toArray(SearchRequestOperationsListener[]::new) + ); + ActionModule actionModule = new ActionModule( settings, clusterModule.getIndexNameExpressionResolver(), @@ -818,6 +922,30 @@ protected Node( final RestController restController = actionModule.getRestController(); + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + monitorService.fsService(), + threadPool, + settings, + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, + threadPool + ); + + final AdmissionControlService admissionControlService = new AdmissionControlService( + settings, + clusterService, + threadPool, + resourceUsageCollectorService + ); + + AdmissionControlTransportInterceptor admissionControlTransportInterceptor = new AdmissionControlTransportInterceptor( + admissionControlService + ); + + List<TransportInterceptor> transportInterceptors = List.of(admissionControlTransportInterceptor); final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), @@ -829,8 +957,11 @@ protected Node( xContentRegistry, networkService, restController, - clusterService.getClusterSettings() + clusterService.getClusterSettings(), + tracer, + transportInterceptors ); + Collection<UnaryOperator<Map<String, IndexTemplateMetadata>>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( Plugin.class ).stream().map(Plugin::getIndexTemplateMetadataUpgrader).collect(Collectors.toList()); @@ -859,7 +990,8 @@ protected Node( networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), - taskHeaders + taskHeaders, + tracer ); TopNSearchTasksLogger taskConsumer = new TopNSearchTasksLogger(settings, settingsModule.getClusterSettings()); transportService.getTaskManager().registerTaskResourceConsumer(taskConsumer); @@ -869,8 +1001,10 @@ protected Node( transportService, clusterService, environment.settings(), - client + client, + identityService ); + final PersistedStateRegistry persistedStateRegistry = new PersistedStateRegistry(); final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( @@ -901,7 +1035,7 @@ protected Node( transportService.getTaskManager() ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), @@ -911,6 +1045,11 @@ protected Node( xContentRegistry, recoverySettings ); + CryptoHandlerRegistry.initRegistry( + pluginsService.filterPlugins(CryptoPlugin.class), + pluginsService.filterPlugins(CryptoKeyProviderPlugin.class), + settings + ); RepositoriesService repositoryService = repositoriesModule.getRepositoryService(); repositoriesServiceReference.set(repositoryService); SnapshotsService snapshotsService = new SnapshotsService( @@ -941,8 +1080,18 @@ protected Node( clusterModule.getAllocationService(), metadataCreateIndexService, metadataIndexUpgradeService, - clusterService.getClusterSettings(), - shardLimitValidator + shardLimitValidator, + indicesService, + clusterInfoService::getClusterInfo + ); + + RemoteStoreRestoreService remoteStoreRestoreService = new RemoteStoreRestoreService( + clusterService, + clusterModule.getAllocationService(), + metadataCreateIndexService, + metadataIndexUpgradeService, + shardLimitValidator, + remoteClusterStateService ); final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( @@ -969,7 +1118,9 @@ protected Node( environment.configDir(), gatewayMetaState, rerouteService, - fsHealthService + fsHealthService, + persistedStateRegistry, + remoteStoreNodeService ); final SearchPipelineService searchPipelineService = new SearchPipelineService( clusterService, @@ -1012,7 +1163,11 @@ protected Node( searchBackpressureService, searchPipelineService, fileCache, - taskCancellationMonitoringService + taskCancellationMonitoringService, + resourceUsageCollectorService, + segmentReplicationStatsTracker, + repositoryService, + admissionControlService ); final SearchService searchService = newSearchService( @@ -1028,18 +1183,6 @@ protected Node( searchModule.getIndexSearcherExecutor(threadPool) ); - TracerFactory tracerFactory; - if (FeatureFlags.isEnabled(TELEMETRY)) { - final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); - List<TelemetryPlugin> telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); - TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); - tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); - } else { - tracerFactory = new NoopTracerFactory(); - } - tracer = tracerFactory.getTracer(); - resourcesToClose.add(tracer::close); - final List<PersistentTasksExecutor<?>> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) .stream() .map( @@ -1086,6 +1229,7 @@ protected Node( b.bind(IndexingPressureService.class).toInstance(indexingPressureService); b.bind(TaskResourceTrackingService.class).toInstance(taskResourceTrackingService); b.bind(SearchBackpressureService.class).toInstance(searchBackpressureService); + b.bind(AdmissionControlService.class).toInstance(admissionControlService); b.bind(UsageService.class).toInstance(usageService); b.bind(AggregationUsageService.class).toInstance(searchModule.getValuesSourceRegistry().getUsageService()); b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry); @@ -1093,10 +1237,12 @@ protected Node( b.bind(MetaStateService.class).toInstance(metaStateService); b.bind(PersistedClusterStateService.class).toInstance(lucenePersistedStateFactory); b.bind(IndicesService.class).toInstance(indicesService); + b.bind(RemoteStoreStatsTrackerFactory.class).toInstance(remoteStoreStatsTrackerFactory); b.bind(AliasValidator.class).toInstance(aliasValidator); b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(AwarenessReplicaBalance.class).toInstance(awarenessReplicaBalance); b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); + b.bind(ViewService.class).toInstance(viewService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class) @@ -1140,12 +1286,22 @@ protected Node( b.bind(SnapshotShardsService.class).toInstance(snapshotShardsService); b.bind(TransportNodesSnapshotsStatus.class).toInstance(nodesSnapshotsStatus); b.bind(RestoreService.class).toInstance(restoreService); + b.bind(RemoteStoreRestoreService.class).toInstance(remoteStoreRestoreService); b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); + b.bind(NodeResourceUsageTracker.class).toInstance(nodeResourceUsageTracker); + b.bind(ResourceUsageCollectorService.class).toInstance(resourceUsageCollectorService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); + b.bind(SearchRequestStats.class).toInstance(searchRequestStats); + b.bind(SearchRequestSlowLog.class).toInstance(searchRequestSlowLog); + b.bind(MetricsRegistry.class).toInstance(metricsRegistry); + b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); + b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); + b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); + b.bind(SearchRequestOperationsCompositeListenerFactory.class).toInstance(searchRequestOperationsCompositeListenerFactory); }); injector = modules.createInjector(); @@ -1198,9 +1354,10 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { - return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { @@ -1254,6 +1411,8 @@ public Node start() throws NodeValidationException { injector.getInstance(RepositoriesService.class).start(); injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); + injector.getInstance(NodeResourceUsageTracker.class).start(); + injector.getInstance(ResourceUsageCollectorService.class).start(); nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); @@ -1282,8 +1441,13 @@ public Node start() throws NodeValidationException { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); + injector.getInstance(SegmentReplicationTargetService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); + final RemoteClusterStateService remoteClusterStateService = injector.getInstance(RemoteClusterStateService.class); + if (remoteClusterStateService != null) { + remoteClusterStateService.start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start( @@ -1293,7 +1457,10 @@ public Node start() throws NodeValidationException { injector.getInstance(MetaStateService.class), injector.getInstance(MetadataIndexUpgradeService.class), injector.getInstance(MetadataUpgrader.class), - injector.getInstance(PersistedClusterStateService.class) + injector.getInstance(PersistedClusterStateService.class), + injector.getInstance(RemoteClusterStateService.class), + injector.getInstance(PersistedStateRegistry.class), + injector.getInstance(RemoteStoreRestoreService.class) ); if (Assertions.ENABLED) { try { @@ -1409,6 +1576,8 @@ private Node stop() { injector.getInstance(ClusterService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); + injector.getInstance(NodeResourceUsageTracker.class).stop(); + injector.getInstance(ResourceUsageCollectorService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); @@ -1461,6 +1630,7 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(injector.getInstance(PeerRecoverySourceService.class)); toClose.add(injector.getInstance(SegmentReplicationSourceService.class)); + toClose.add(injector.getInstance(SegmentReplicationTargetService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); @@ -1472,6 +1642,10 @@ public synchronized void close() throws IOException { toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_tracker")); + toClose.add(injector.getInstance(NodeResourceUsageTracker.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_collector")); + toClose.add(injector.getInstance(ResourceUsageCollectorService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("search")); @@ -1503,6 +1677,7 @@ public synchronized void close() throws IOException { toClose.add(stopWatch::stop); if (FeatureFlags.isEnabled(TELEMETRY)) { toClose.add(injector.getInstance(Tracer.class)); + toClose.add(injector.getInstance(MetricsRegistry.class)); } if (logger.isTraceEnabled()) { @@ -1702,15 +1877,27 @@ private static class LocalNodeFactory implements Function<BoundTransportAddress, private final SetOnce<DiscoveryNode> localNode = new SetOnce<>(); private final String persistentNodeId; private final Settings settings; + private final RemoteStoreNodeService remoteStoreNodeService; - private LocalNodeFactory(Settings settings, String persistentNodeId) { + private LocalNodeFactory(Settings settings, String persistentNodeId, RemoteStoreNodeService remoteStoreNodeService) { this.persistentNodeId = persistentNodeId; this.settings = settings; + this.remoteStoreNodeService = remoteStoreNodeService; } @Override public DiscoveryNode apply(BoundTransportAddress boundTransportAddress) { - localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + final DiscoveryNode discoveryNode = DiscoveryNode.createLocal( + settings, + boundTransportAddress.publishAddress(), + persistentNodeId + ); + + if (isRemoteStoreAttributePresent(settings)) { + remoteStoreNodeService.createAndVerifyRepositories(discoveryNode); + } + + localNode.set(discoveryNode); return localNode.get(); } diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java new file mode 100644 index 0000000000000..26e53218cf026 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -0,0 +1,110 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.Version; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * This represents the resource usage stats of a node along with the timestamp at which the stats object was created + * in the respective node + */ +public class NodeResourceUsageStats implements Writeable { + final String nodeId; + long timestamp; + double cpuUtilizationPercent; + double memoryUtilizationPercent; + private IoUsageStats ioUsageStats; + + public NodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent, + IoUsageStats ioUsageStats + ) { + this.nodeId = nodeId; + this.timestamp = timestamp; + this.cpuUtilizationPercent = cpuUtilizationPercent; + this.memoryUtilizationPercent = memoryUtilizationPercent; + this.ioUsageStats = ioUsageStats; + } + + public NodeResourceUsageStats(StreamInput in) throws IOException { + this.nodeId = in.readString(); + this.timestamp = in.readLong(); + this.cpuUtilizationPercent = in.readDouble(); + this.memoryUtilizationPercent = in.readDouble(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + this.ioUsageStats = in.readOptionalWriteable(IoUsageStats::new); + } else { + this.ioUsageStats = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.nodeId); + out.writeLong(this.timestamp); + out.writeDouble(this.cpuUtilizationPercent); + out.writeDouble(this.memoryUtilizationPercent); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeOptionalWriteable(this.ioUsageStats); + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); + sb.append(nodeId).append("]("); + sb.append("Timestamp: ").append(timestamp); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", this.getCpuUtilizationPercent())); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", this.getMemoryUtilizationPercent())); + if (this.ioUsageStats != null) { + sb.append(", ").append(this.getIoUsageStats()); + } + sb.append(")"); + return sb.toString(); + } + + NodeResourceUsageStats(NodeResourceUsageStats nodeResourceUsageStats) { + this( + nodeResourceUsageStats.nodeId, + nodeResourceUsageStats.timestamp, + nodeResourceUsageStats.memoryUtilizationPercent, + nodeResourceUsageStats.cpuUtilizationPercent, + nodeResourceUsageStats.ioUsageStats + ); + } + + public double getMemoryUtilizationPercent() { + return memoryUtilizationPercent; + } + + public double getCpuUtilizationPercent() { + return cpuUtilizationPercent; + } + + public IoUsageStats getIoUsageStats() { + return ioUsageStats; + } + + public void setIoUsageStats(IoUsageStats ioUsageStats) { + this.ioUsageStats = ioUsageStats; + } + + public long getTimestamp() { + return timestamp; + } +} diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 6f4fe1e083ad7..15cc8f3d20bb3 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -32,27 +32,30 @@ package org.opensearch.node; -import org.opensearch.cluster.routing.WeightedRoutingStats; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.Build; import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.search.SearchTransportService; +import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.discovery.Discovery; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.plugins.PluginsService; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.backpressure.SearchBackpressureService; @@ -83,6 +86,7 @@ public class NodeService implements Closeable { private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; private final ResponseCollectorService responseCollectorService; + private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; @@ -92,6 +96,9 @@ public class NodeService implements Closeable { private final Discovery discovery; private final FileCache fileCache; private final TaskCancellationMonitoringService taskCancellationMonitoringService; + private final RepositoriesService repositoriesService; + private final AdmissionControlService admissionControlService; + private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; NodeService( Settings settings, @@ -114,7 +121,11 @@ public class NodeService implements Closeable { SearchBackpressureService searchBackpressureService, SearchPipelineService searchPipelineService, FileCache fileCache, - TaskCancellationMonitoringService taskCancellationMonitoringService + TaskCancellationMonitoringService taskCancellationMonitoringService, + ResourceUsageCollectorService resourceUsageCollectorService, + SegmentReplicationStatsTracker segmentReplicationStatsTracker, + RepositoriesService repositoriesService, + AdmissionControlService admissionControlService ) { this.settings = settings; this.threadPool = threadPool; @@ -137,8 +148,12 @@ public class NodeService implements Closeable { this.clusterService = clusterService; this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.repositoriesService = repositoriesService; + this.admissionControlService = admissionControlService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); + this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; } public NodeInfo info( @@ -217,7 +232,11 @@ public NodeStats stats( boolean weightedRoutingStats, boolean fileCacheStats, boolean taskCancellation, - boolean searchPipelineStats + boolean searchPipelineStats, + boolean resourceUsageStats, + boolean segmentReplicationTrackerStats, + boolean repositoriesStats, + boolean admissionControl ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -237,6 +256,7 @@ public NodeStats stats( discoveryStats ? discovery.stats() : null, ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, + resourceUsageStats ? resourceUsageCollectorService.stats() : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressureService.nodeStats() : null, shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, @@ -245,7 +265,10 @@ public NodeStats stats( weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, taskCancellation ? this.taskCancellationMonitoringService.stats() : null, - searchPipelineStats ? this.searchPipelineService.stats() : null + searchPipelineStats ? this.searchPipelineService.stats() : null, + segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, + repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null, + admissionControl ? this.admissionControlService.stats() : null ); } diff --git a/server/src/main/java/org/opensearch/node/NodeValidationException.java b/server/src/main/java/org/opensearch/node/NodeValidationException.java index ef1500f1e4ede..b316288b7cf06 100644 --- a/server/src/main/java/org/opensearch/node/NodeValidationException.java +++ b/server/src/main/java/org/opensearch/node/NodeValidationException.java @@ -32,7 +32,7 @@ package org.opensearch.node; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import java.util.List; diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java new file mode 100644 index 0000000000000..35c82c904ad1c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * This class represents resource usage stats such as CPU, Memory and IO resource usage of each node along with the + * timestamp of the stats recorded. + */ +public class NodesResourceUsageStats implements Writeable, ToXContentFragment { + + // Map of node id to resource usage stats of the corresponding node. + private final Map<String, NodeResourceUsageStats> nodeIdToResourceUsageStatsMap; + + public NodesResourceUsageStats(Map<String, NodeResourceUsageStats> nodeIdToResourceUsageStatsMap) { + this.nodeIdToResourceUsageStatsMap = nodeIdToResourceUsageStatsMap; + } + + public NodesResourceUsageStats(StreamInput in) throws IOException { + this.nodeIdToResourceUsageStatsMap = in.readMap(StreamInput::readString, NodeResourceUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.nodeIdToResourceUsageStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + /** + * Returns map of node id to resource usage stats of the corresponding node. + */ + public Map<String, NodeResourceUsageStats> getNodeIdToResourceUsageStatsMap() { + return nodeIdToResourceUsageStatsMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("resource_usage_stats"); + for (String nodeId : nodeIdToResourceUsageStatsMap.keySet()) { + builder.startObject(nodeId); + NodeResourceUsageStats resourceUsageStats = nodeIdToResourceUsageStatsMap.get(nodeId); + if (resourceUsageStats != null) { + builder.field("timestamp", resourceUsageStats.timestamp); + builder.field("cpu_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.cpuUtilizationPercent)); + builder.field( + "memory_utilization_percent", + String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) + ); + if (resourceUsageStats.getIoUsageStats() != null) { + builder.field("io_usage_stats", resourceUsageStats.getIoUsageStats()); + } + } + builder.endObject(); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java new file mode 100644 index 0000000000000..ecd2a5615e1fe --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -0,0 +1,163 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentMap; + +/** + * This collects node level resource usage statistics such as cpu, memory, IO of each node and makes it available for + * coordinator node to aid in throttling, ranking etc + */ +public class ResourceUsageCollectorService extends AbstractLifecycleComponent implements ClusterStateListener { + + /** + * This refresh interval denotes the polling interval of ResourceUsageCollectorService to refresh the resource usage + * stats from local node + */ + private static long REFRESH_INTERVAL_IN_MILLIS = 1000; + + private static final Logger logger = LogManager.getLogger(ResourceUsageCollectorService.class); + private final ConcurrentMap<String, NodeResourceUsageStats> nodeIdToResourceUsageStats = ConcurrentCollections.newConcurrentMap(); + + private ThreadPool threadPool; + private volatile Scheduler.Cancellable scheduledFuture; + + private NodeResourceUsageTracker nodeResourceUsageTracker; + private ClusterService clusterService; + + public ResourceUsageCollectorService( + NodeResourceUsageTracker nodeResourceUsageTracker, + ClusterService clusterService, + ThreadPool threadPool + ) { + this.threadPool = threadPool; + this.nodeResourceUsageTracker = nodeResourceUsageTracker; + this.clusterService = clusterService; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesRemoved()) { + for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) { + removeNodeResourceUsageStats(removedNode.getId()); + } + } + } + + void removeNodeResourceUsageStats(String nodeId) { + nodeIdToResourceUsageStats.remove(nodeId); + } + + /** + * Collect node resource usage stats along with the timestamp + */ + public void collectNodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent, + IoUsageStats ioUsageStats + ) { + nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { + if (resourceUsageStats == null) { + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent, ioUsageStats); + } else { + resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; + resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.setIoUsageStats(ioUsageStats); + resourceUsageStats.timestamp = timestamp; + return resourceUsageStats; + } + }); + } + + /** + * Get all node resource usage statistics which will be used for node stats + */ + public Map<String, NodeResourceUsageStats> getAllNodeStatistics() { + Map<String, NodeResourceUsageStats> nodeStats = new HashMap<>(nodeIdToResourceUsageStats.size()); + nodeIdToResourceUsageStats.forEach((nodeId, resourceUsageStats) -> { + nodeStats.put(nodeId, new NodeResourceUsageStats(resourceUsageStats)); + }); + return nodeStats; + } + + /** + * Optionally return a {@code NodeResourceUsageStats} for the given nodeid, if + * resource usage stats information exists for the given node. Returns an empty + * {@code Optional} if the node was not found. + */ + public Optional<NodeResourceUsageStats> getNodeStatistics(final String nodeId) { + return Optional.ofNullable(nodeIdToResourceUsageStats.get(nodeId)) + .map(resourceUsageStats -> new NodeResourceUsageStats(resourceUsageStats)); + } + + /** + * Returns collected resource usage statistics of all nodes + */ + public NodesResourceUsageStats stats() { + return new NodesResourceUsageStats(getAllNodeStatistics()); + } + + /** + * Fetch local node resource usage statistics and add it to store along with the current timestamp + */ + private void collectLocalNodeResourceUsageStats() { + if (nodeResourceUsageTracker.isReady() && clusterService.state() != null) { + collectNodeResourceUsageStats( + clusterService.state().nodes().getLocalNodeId(), + System.currentTimeMillis(), + nodeResourceUsageTracker.getMemoryUtilizationPercent(), + nodeResourceUsageTracker.getCpuUtilizationPercent(), + nodeResourceUsageTracker.getIoUsageStats() + ); + } + } + + @Override + protected void doStart() { + /** + * Fetch local node resource usage statistics every second + */ + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + collectLocalNodeResourceUsageStats(); + } catch (Exception e) { + logger.warn("failure in ResourceUsageCollectorService", e); + } + }, new TimeValue(REFRESH_INTERVAL_IN_MILLIS), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/ResponseCollectorService.java b/server/src/main/java/org/opensearch/node/ResponseCollectorService.java index 3b73ec8f9622d..7bd3c2d8d8ec6 100644 --- a/server/src/main/java/org/opensearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/opensearch/node/ResponseCollectorService.java @@ -37,10 +37,11 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.ExponentiallyWeightedMovingAverage; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; import java.io.IOException; import java.util.HashMap; @@ -54,8 +55,9 @@ * tasks executed on each node, making the EWMA of the values available to the * coordinating node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ResponseCollectorService implements ClusterStateListener { private static final double ALPHA = 0.3; @@ -120,7 +122,10 @@ public Optional<ComputedNodeStats> getNodeStatistics(final String nodeId) { * Struct-like class encapsulating a point-in-time snapshot of a particular * node's statistics. This includes the EWMA of queue size, response time, * and service time. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ComputedNodeStats implements Writeable { // We store timestamps with nanosecond precision, however, the // formula specifies milliseconds, therefore we need to convert diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java new file mode 100644 index 0000000000000..7b2a6c34d3db6 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.node.Node; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This is an abstraction for validating and storing information specific to remote backed storage nodes. + * + * @opensearch.internal + */ +public class RemoteStoreNodeAttribute { + + public static final String REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = "remote_store"; + public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; + public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; + public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; + public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + + CryptoMetadata.CRYPTO_METADATA_KEY; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX = REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT + + "." + + CryptoMetadata.SETTINGS_KEY; + public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; + private final RepositoriesMetadata repositoriesMetadata; + + /** + * Creates a new {@link RemoteStoreNodeAttribute} + */ + public RemoteStoreNodeAttribute(DiscoveryNode node) { + this.repositoriesMetadata = buildRepositoriesMetadata(node); + } + + private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) { + String attributeValue = node.getAttributes().get(attributeKey); + if (attributeValue == null || attributeValue.isEmpty()) { + throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKey + "]"); + } + + return attributeValue; + } + + private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName) { + String metadataKey = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repositoryName); + boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); + if (isRepoEncrypted == false) { + return null; + } + + String keyProviderName = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); + String keyProviderType = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, + repositoryName + ); + + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix + ".", ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + return new CryptoMetadata(keyProviderName, keyProviderType, settings.build()); + } + + private Map<String, String> validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName) { + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repositoryName + ); + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> validateAttributeNonNull(node, key))); + + if (settingsMap.isEmpty()) { + throw new IllegalStateException( + "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" + ); + } + + return settingsMap; + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + String type = validateAttributeNonNull( + node, + String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name) + ); + Map<String, String> settingsMap = validateSettingsAttributesNonNull(node, name); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + CryptoMetadata cryptoMetadata = buildCryptoMetadata(node, name); + + // Repository metadata built here will always be for a system repository. + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build(), cryptoMetadata); + } + + private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { + List<RepositoryMetadata> repositoryMetadataList = new ArrayList<>(); + Set<String> repositoryNames = new HashSet<>(); + + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + + for (String repositoryName : repositoryNames) { + repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); + } + + return new RepositoriesMetadata(repositoryMetadataList); + } + + public static boolean isRemoteStoreAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; + } + + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { + return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteStoreAttributePresent(settings); + } + + public RepositoriesMetadata getRepositoriesMetadata() { + return this.repositoriesMetadata; + } + + @Override + public int hashCode() { + // The hashCode is generated by computing the hash of all the repositoryMetadata present in + // repositoriesMetadata without generation. Below is the modified list hashCode generation logic. + + int hashCode = 1; + Iterator iterator = this.repositoriesMetadata.repositories().iterator(); + while (iterator.hasNext()) { + RepositoryMetadata repositoryMetadata = (RepositoryMetadata) iterator.next(); + hashCode = 31 * hashCode + (repositoryMetadata == null + ? 0 + : Objects.hash(repositoryMetadata.name(), repositoryMetadata.type(), repositoryMetadata.settings())); + } + return hashCode; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + + return this.getRepositoriesMetadata().equalsIgnoreGenerations(that.getRepositoriesMetadata()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('{').append(this.repositoriesMetadata).append('}'); + return super.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java new file mode 100644 index 0000000000000..33b182dd3cc97 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -0,0 +1,226 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; + +/** + * Contains all the method needed for a remote store backed node lifecycle. + */ +public class RemoteStoreNodeService { + + private static final Logger logger = LogManager.getLogger(RemoteStoreNodeService.class); + private final Supplier<RepositoriesService> repositoriesService; + private final ThreadPool threadPool; + public static final Setting<CompatibilityMode> REMOTE_STORE_COMPATIBILITY_MODE_SETTING = new Setting<>( + "remote_store.compatibility_mode", + CompatibilityMode.STRICT.name(), + CompatibilityMode::parseString, + value -> { + if (value == CompatibilityMode.MIXED + && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " mixed mode is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<Direction> MIGRATION_DIRECTION_SETTING = new Setting<>( + "migration.direction", + Direction.NONE.name(), + Direction::parseString, + value -> { + if (value != Direction.NONE && FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING) == false) { + throw new IllegalArgumentException( + " migration.direction is under an experimental feature and can be activated only by enabling " + + REMOTE_STORE_MIGRATION_EXPERIMENTAL + + " feature flag in the JVM options " + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Node join compatibility mode introduced with remote backed storage. + * + * @opensearch.internal + */ + public enum CompatibilityMode { + STRICT("strict"), + MIXED("mixed"); + + public final String mode; + + CompatibilityMode(String mode) { + this.mode = mode; + } + + public static CompatibilityMode parseString(String compatibilityMode) { + try { + return CompatibilityMode.valueOf(compatibilityMode.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "[" + + compatibilityMode + + "] compatibility mode is not supported. " + + "supported modes are [" + + Arrays.toString(CompatibilityMode.values()) + + "]" + ); + } + } + } + + /** + * Migration Direction intended for docrep to remote store migration and vice versa + * + * @opensearch.internal + */ + public enum Direction { + REMOTE_STORE("remote_store"), + NONE("none"), + DOCREP("docrep"); + + public final String direction; + + Direction(String d) { + this.direction = d; + } + + public static Direction parseString(String direction) { + try { + return Direction.valueOf(direction.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("[" + direction + "] migration.direction is not supported."); + } + } + } + + public RemoteStoreNodeService(Supplier<RepositoriesService> repositoriesService, ThreadPool threadPool) { + this.repositoriesService = repositoriesService; + this.threadPool = threadPool; + } + + /** + * Creates a repository during a node startup and performs verification by invoking verify method against + * mentioned repository. This verification will happen on a local node to validate if the node is able to connect + * to the repository with appropriate permissions. + * If the creation or verification fails this will close all the repositories this method created and throw + * exception. + */ + public void createAndVerifyRepositories(DiscoveryNode localNode) { + RemoteStoreNodeAttribute nodeAttribute = new RemoteStoreNodeAttribute(localNode); + RepositoriesService reposService = repositoriesService.get(); + Map<String, Repository> repositories = new HashMap<>(); + for (RepositoryMetadata repositoryMetadata : nodeAttribute.getRepositoriesMetadata().repositories()) { + String repositoryName = repositoryMetadata.name(); + Repository repository; + RepositoriesService.validate(repositoryName); + + // Create Repository + repository = reposService.createRepository(repositoryMetadata); + logger.info( + "remote backed storage repository with name [{}] and type [{}] created", + repository.getMetadata().name(), + repository.getMetadata().type() + ); + + // Verify Repository + String verificationToken = repository.startVerification(); + repository.verify(verificationToken, localNode); + repository.endVerification(verificationToken); + logger.info(() -> new ParameterizedMessage("successfully verified [{}] repository", repositoryName)); + repositories.put(repositoryName, repository); + } + // Updating the repositories map in RepositoriesService + reposService.updateRepositoriesMap(repositories); + } + + /** + * Updates repositories metadata in the cluster state if not already present. If a repository metadata for a + * repository is already present in the cluster state and if it's different then the joining remote store backed + * node repository metadata an exception will be thrown and the node will not be allowed to join the cluster. + */ + public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode, RepositoriesMetadata existingRepositories) { + if (joiningNode.isRemoteStoreNode()) { + List<RepositoryMetadata> updatedRepositoryMetadataList = new ArrayList<>(); + List<RepositoryMetadata> newRepositoryMetadataList = new RemoteStoreNodeAttribute(joiningNode).getRepositoriesMetadata() + .repositories(); + + if (existingRepositories == null) { + return new RepositoriesMetadata(newRepositoryMetadataList); + } else { + updatedRepositoryMetadataList.addAll(existingRepositories.repositories()); + } + + for (RepositoryMetadata newRepositoryMetadata : newRepositoryMetadataList) { + boolean repositoryAlreadyPresent = false; + for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { + if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + try { + // This will help in handling two scenarios - + // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository + // metadata constructed from the node attributes of the joining node will be validated + // against the repository information provided by existing nodes in cluster state. + // 2. It's possible to update repository settings except the restricted ones post the + // creation of a system repository and if a node drops we will need to allow it to join + // even if the non-restricted system repository settings are now different. + repositoriesService.get().ensureValidSystemRepositoryUpdate(newRepositoryMetadata, existingRepositoryMetadata); + newRepositoryMetadata = existingRepositoryMetadata; + repositoryAlreadyPresent = true; + break; + } catch (RepositoryException e) { + throw new IllegalStateException( + "new repository metadata [" + + newRepositoryMetadata + + "] supplied by joining node is different from existing repository metadata [" + + existingRepositoryMetadata + + "]." + ); + } + } + } + if (repositoryAlreadyPresent == false) { + updatedRepositoryMetadataList.add(newRepositoryMetadata); + } + } + return new RepositoriesMetadata(updatedRepositoryMetadataList); + } else { + return existingRepositories; + } + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/package-info.java b/server/src/main/java/org/opensearch/node/remotestore/package-info.java new file mode 100644 index 0000000000000..e2592aa5fcc29 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Restore remote store transport handler. */ +package org.opensearch.node.remotestore; diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java new file mode 100644 index 0000000000000..69c7afc1d4b43 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for sliding window resource usage trackers + */ +public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { + private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); + + protected final ThreadPool threadPool; + protected final TimeValue pollingInterval; + private TimeValue windowDuration; + private final AtomicReference<MovingAverage> observations = new AtomicReference<>(); + + protected volatile Scheduler.Cancellable scheduledFuture; + + public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + this.threadPool = threadPool; + this.pollingInterval = pollingInterval; + this.windowDuration = windowDuration; + this.setWindowSize(windowDuration); + } + + public abstract long getUsage(); + + /** + * Returns the moving average of the datapoints + */ + public double getAverage() { + return observations.get().getAverage(); + } + + /** + * Checks if we have datapoints more than or equal to the window size + */ + public boolean isReady() { + return observations.get().isReady(); + } + + /** + * Creates a new instance of MovingAverage with a new window size based on WindowDuration + */ + public void setWindowSize(TimeValue windowDuration) { + this.windowDuration = windowDuration; + int windowSize = (int) (windowDuration.nanos() / pollingInterval.nanos()); + LOGGER.debug("updated window size: {}", windowSize); + observations.set(new MovingAverage(windowSize)); + } + + public TimeValue getPollingInterval() { + return pollingInterval; + } + + public TimeValue getWindowDuration() { + return windowDuration; + } + + public long getWindowSize() { + return observations.get().getCount(); + } + + public void recordUsage(long usage) { + observations.get().record(usage); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java new file mode 100644 index 0000000000000..160d385762eb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.threadpool.ThreadPool; + +/** + * AverageCpuUsageTracker tracks the average CPU usage by polling the CPU usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageCpuUsageTracker extends AbstractAverageUsageTracker { + private static final Logger LOGGER = LogManager.getLogger(AverageCpuUsageTracker.class); + + public AverageCpuUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Returns the process CPU usage in percent + */ + @Override + public long getUsage() { + long usage = ProcessProbe.getInstance().getProcessCpuPercent(); + LOGGER.debug("Recording cpu usage: {}%", usage); + return usage; + } + +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java new file mode 100644 index 0000000000000..5472d4bda2326 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageIoUsageTracker.java @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; +import org.opensearch.common.ValidationException; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsInfo.DeviceStats; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.node.IoUsageStats; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Optional; + +/** + * AverageIoUsageTracker tracks the IO usage by polling the FS Stats for IO metrics every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageIoUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageIoUsageTracker.class); + private final FsService fsService; + private final HashMap<String, Long> prevIoTimeDeviceMap; + private long prevTimeInMillis; + private IoUsageStats ioUsageStats; + + public AverageIoUsageTracker(FsService fsService, ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + this.fsService = fsService; + this.prevIoTimeDeviceMap = new HashMap<>(); + this.prevTimeInMillis = -1; + this.ioUsageStats = null; + } + + /** + * Get current IO usage percentage calculated using fs stats + */ + @Override + public long getUsage() { + long usage = 0; + Optional<ValidationException> validationException = this.preValidateFsStats(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); + } + // Currently even during the raid setup we have only one mount device and it is giving 0 io time from /proc/diskstats + DeviceStats[] devicesStats = fsService.stats().getIoStats().getDevicesStats(); + long latestTimeInMillis = fsService.stats().getTimestamp(); + for (DeviceStats devicesStat : devicesStats) { + long devicePreviousIoTime = prevIoTimeDeviceMap.getOrDefault(devicesStat.getDeviceName(), (long) -1); + long deviceCurrentIoTime = devicesStat.ioTimeInMillis(); + if (prevTimeInMillis > 0 && (latestTimeInMillis - this.prevTimeInMillis > 0) && devicePreviousIoTime > 0) { + long absIoTime = (deviceCurrentIoTime - devicePreviousIoTime); + long deviceCurrentIoUsage = absIoTime * 100 / (latestTimeInMillis - this.prevTimeInMillis); + // We are returning the maximum IO Usage for all the attached devices + usage = Math.max(usage, deviceCurrentIoUsage); + } + prevIoTimeDeviceMap.put(devicesStat.getDeviceName(), devicesStat.ioTimeInMillis()); + } + this.prevTimeInMillis = latestTimeInMillis; + return usage; + } + + @Override + protected void doStart() { + if (Constants.LINUX) { + this.ioUsageStats = new IoUsageStats(-1); + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + updateIoUsageStats(); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + } + + public Optional<ValidationException> preValidateFsStats() { + ValidationException validationException = new ValidationException(); + if (fsService == null + || fsService.stats() == null + || fsService.stats().getIoStats() == null + || fsService.stats().getIoStats().getDevicesStats() == null) { + validationException.addValidationError("FSService IoStats Or DeviceStats are Missing"); + } + return validationException.validationErrors().isEmpty() ? Optional.empty() : Optional.of(validationException); + } + + private void updateIoUsageStats() { + this.ioUsageStats.setIoUtilisationPercent(this.isReady() ? this.getAverage() : -1); + } + + public IoUsageStats getIoUsageStats() { + return this.ioUsageStats; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java new file mode 100644 index 0000000000000..c1d1c83656859 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +/** + * AverageMemoryUsageTracker tracks the average JVM usage by polling the JVM usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageMemoryUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageMemoryUsageTracker.class); + + private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + + public AverageMemoryUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Get current memory usage percentage calculated against max heap memory + */ + @Override + public long getUsage() { + long usage = MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed() * 100 / MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); + LOGGER.debug("Recording memory usage: {}%", usage); + return usage; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java new file mode 100644 index 0000000000000..621f90e80454c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.lucene.util.Constants; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.node.IoUsageStats; +import org.opensearch.threadpool.ThreadPool; + +/** + * This tracks the usage of node resources such as CPU, IO and memory + */ +public class NodeResourceUsageTracker extends AbstractLifecycleComponent { + private ThreadPool threadPool; + private final ClusterSettings clusterSettings; + private AverageCpuUsageTracker cpuUsageTracker; + private AverageMemoryUsageTracker memoryUsageTracker; + private AverageIoUsageTracker ioUsageTracker; + + private ResourceTrackerSettings resourceTrackerSettings; + + private final FsService fsService; + + public NodeResourceUsageTracker(FsService fsService, ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.fsService = fsService; + this.threadPool = threadPool; + this.clusterSettings = clusterSettings; + this.resourceTrackerSettings = new ResourceTrackerSettings(settings); + initialize(); + } + + /** + * Return CPU utilization average if we have enough datapoints, otherwise return 0 + */ + public double getCpuUtilizationPercent() { + if (cpuUsageTracker.isReady()) { + return cpuUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return memory utilization average if we have enough datapoints, otherwise return 0 + */ + public double getMemoryUtilizationPercent() { + if (memoryUsageTracker.isReady()) { + return memoryUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return io stats average if we have enough datapoints, otherwise return 0 + */ + public IoUsageStats getIoUsageStats() { + return ioUsageTracker.getIoUsageStats(); + } + + /** + * Checks if all of the resource usage trackers are ready + */ + public boolean isReady() { + if (Constants.LINUX) { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady() && ioUsageTracker.isReady(); + } + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); + } + + void initialize() { + cpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + resourceTrackerSettings.getCpuPollingInterval(), + resourceTrackerSettings.getCpuWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setCpuWindowDuration + ); + + memoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + resourceTrackerSettings.getMemoryPollingInterval(), + resourceTrackerSettings.getMemoryWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + this::setMemoryWindowDuration + ); + + ioUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + resourceTrackerSettings.getIoPollingInterval(), + resourceTrackerSettings.getIoWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING, + this::setIoWindowDuration + ); + } + + private void setMemoryWindowDuration(TimeValue windowDuration) { + memoryUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setMemoryWindowDuration(windowDuration); + } + + private void setCpuWindowDuration(TimeValue windowDuration) { + cpuUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setCpuWindowDuration(windowDuration); + } + + private void setIoWindowDuration(TimeValue windowDuration) { + ioUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setIoWindowDuration(windowDuration); + } + + /** + * Visible for testing + */ + ResourceTrackerSettings getResourceTrackerSettings() { + return resourceTrackerSettings; + } + + @Override + protected void doStart() { + cpuUsageTracker.doStart(); + memoryUsageTracker.doStart(); + ioUsageTracker.doStart(); + } + + @Override + protected void doStop() { + cpuUsageTracker.doStop(); + memoryUsageTracker.doStop(); + ioUsageTracker.doStop(); + } + + @Override + protected void doClose() { + cpuUsageTracker.doClose(); + memoryUsageTracker.doClose(); + ioUsageTracker.doClose(); + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java new file mode 100644 index 0000000000000..b423b92c8a4fb --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +/** + * Settings related to resource usage trackers such as polling interval, window duration etc + */ +public class ResourceTrackerSettings { + + private static class Defaults { + /** + * This is the default polling interval of usage trackers to get the resource utilization data + */ + private static final long POLLING_INTERVAL_IN_MILLIS = 500; + /** + * This is the default window duration on which the average resource utilization values will be calculated + */ + private static final long WINDOW_DURATION_IN_SECONDS = 30; + /** + * This is the default polling interval for IO usage tracker + */ + private static final long IO_POLLING_INTERVAL_IN_MILLIS = 5000; + /** + * This is the default window duration for IO usage tracker on which the average resource utilization values will be calculated + */ + private static final long IO_WINDOW_DURATION_IN_SECONDS = 120; + } + + public static final Setting<TimeValue> GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<TimeValue> GLOBAL_IO_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_io_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.IO_POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting<TimeValue> GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_io_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.IO_WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting<TimeValue> GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + + public static final Setting<TimeValue> GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile TimeValue cpuWindowDuration; + private volatile TimeValue cpuPollingInterval; + private volatile TimeValue memoryWindowDuration; + private volatile TimeValue memoryPollingInterval; + private volatile TimeValue ioWindowDuration; + private volatile TimeValue ioPollingInterval; + + public ResourceTrackerSettings(Settings settings) { + this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.ioPollingInterval = GLOBAL_IO_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.ioWindowDuration = GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + } + + public TimeValue getCpuWindowDuration() { + return this.cpuWindowDuration; + } + + public TimeValue getCpuPollingInterval() { + return cpuPollingInterval; + } + + public TimeValue getMemoryPollingInterval() { + return memoryPollingInterval; + } + + public TimeValue getMemoryWindowDuration() { + return memoryWindowDuration; + } + + public TimeValue getIoPollingInterval() { + return ioPollingInterval; + } + + public TimeValue getIoWindowDuration() { + return ioWindowDuration; + } + + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { + this.cpuWindowDuration = cpuWindowDuration; + } + + public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { + this.memoryWindowDuration = memoryWindowDuration; + } + + public void setIoWindowDuration(TimeValue ioWindowDuration) { + this.ioWindowDuration = ioWindowDuration; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java new file mode 100644 index 0000000000000..aace2a019973e --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Node level resource usage stats tracker package + */ +package org.opensearch.node.resource.tracker; diff --git a/server/src/main/java/org/opensearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/opensearch/persistent/AllocatedPersistentTask.java index e5af66dd5054f..1e20a3b1d5275 100644 --- a/server/src/main/java/org/opensearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/opensearch/persistent/AllocatedPersistentTask.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.CancellableTask; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java index 9ef1749c077c9..e43a67ea82fdd 100644 --- a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java @@ -31,7 +31,6 @@ package org.opensearch.persistent; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java index 209df1e1f498d..403630b89e42a 100644 --- a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java @@ -37,7 +37,7 @@ /** * This component is responsible for execution of persistent tasks. - * + * <p> * It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTaskResponse.java b/server/src/main/java/org/opensearch/persistent/PersistentTaskResponse.java index b0fe789badb5e..c52b5574ecf5e 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTaskResponse.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTaskResponse.java @@ -31,7 +31,7 @@ package org.opensearch.persistent; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index bce3095d0c30d..4e38fb34dbf17 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateListener; @@ -52,6 +51,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; +import org.opensearch.core.action.ActionListener; import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.opensearch.persistent.decider.AssignmentDecision; @@ -323,7 +323,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * This unassigns a task from any node, i.e. it is assigned to a {@code null} node with the provided reason. - * + * <p> * Since the assignment executor node is null, the {@link PersistentTasksClusterService} will attempt to reassign it to a valid * node quickly. * diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksCustomMetadata.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksCustomMetadata.java index 48a169a8da961..c83362ac2d5ab 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksCustomMetadata.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksCustomMetadata.java @@ -41,12 +41,12 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Nullable; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; import org.opensearch.core.xcontent.ToXContent; @@ -211,7 +211,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } public long getNumberOfTasksOnNode(String nodeId, String taskName) { @@ -429,7 +429,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } public String getId() { diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksExecutor.java index 08a509af0b307..ab2e50d5fb42b 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksExecutor.java @@ -35,9 +35,9 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; +import org.opensearch.core.tasks.TaskId; import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; -import org.opensearch.tasks.TaskId; import java.util.Map; import java.util.function.Predicate; diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java index 0900cf2e19254..602be47476ad2 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksNodeService.java @@ -34,20 +34,20 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; -import org.opensearch.common.Strings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.gateway.GatewayService; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; import java.io.IOException; @@ -364,7 +364,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java index 224943ce8ce38..53ef6590df192 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksService.java @@ -33,9 +33,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.opensearch.client.Client; @@ -45,9 +44,10 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.node.NodeClosedException; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import java.util.function.Predicate; diff --git a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java index b9d9ea8f51fca..45fe8917095c3 100644 --- a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java @@ -31,7 +31,6 @@ package org.opensearch.persistent; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java index b607bd7441858..c839fb72c4d05 100644 --- a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java @@ -31,7 +31,6 @@ package org.opensearch.persistent; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; @@ -46,6 +45,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java index 32c8961120663..030b3d231cdd3 100644 --- a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java @@ -31,7 +31,6 @@ package org.opensearch.persistent; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; @@ -45,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/plugins/ActionPlugin.java b/server/src/main/java/org/opensearch/plugins/ActionPlugin.java index 031ac7d068ef9..fadf17ef48622 100644 --- a/server/src/main/java/org/opensearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ActionPlugin.java @@ -32,9 +32,8 @@ package org.opensearch.plugins; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.RequestValidators; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -48,6 +47,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; diff --git a/server/src/main/java/org/opensearch/plugins/CachePlugin.java b/server/src/main/java/org/opensearch/plugins/CachePlugin.java new file mode 100644 index 0000000000000..d962ed1db14bf --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CachePlugin.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.ICache; + +import java.util.Map; + +/** + * Plugin to extend cache related classes + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CachePlugin { + + /** + * Returns a map of cacheStoreType and a factory via which objects can be created on demand. + * For example: + * If there are two implementations of this plugin, lets say A and B, each may return below which can be + * aggregated by fetching all plugins. + * + * A: Map.of(DISK, new ADiskCache.Factor(), + * ON_HEAP, new AOnHeapCache.Factor()) + * + * B: Map.of(ON_HEAP, new ADiskCache.Factor()) + * + * @return Map of cacheStoreType and an associated factory. + */ + Map<String, ICache.Factory> getCacheFactoryMap(); + + String getName(); +} diff --git a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java index f52aab5b238b3..3552c8286b7a3 100644 --- a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java @@ -32,10 +32,10 @@ package org.opensearch.plugins; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.BreakerSettings; -import org.opensearch.indices.breaker.CircuitBreakerService; /** * An extension point for {@link Plugin} implementations to add custom circuit breakers @@ -46,9 +46,9 @@ public interface CircuitBreakerPlugin { /** * Each of the factory functions are passed to the configured {@link CircuitBreakerService}. - * + * <p> * The service then constructs a {@link CircuitBreaker} given the resulting {@link BreakerSettings}. - * + * <p> * Custom circuit breakers settings can be found in {@link BreakerSettings}. * See: * - limit (example: `breaker.foo.limit`) {@link BreakerSettings#CIRCUIT_BREAKER_LIMIT_SETTING} @@ -63,7 +63,7 @@ public interface CircuitBreakerPlugin { /** * The passed {@link CircuitBreaker} object is the same one that was constructed by the {@link BreakerSettings} * provided by {@link CircuitBreakerPlugin#getCircuitBreaker(Settings)}. - * + * <p> * This reference should never change throughout the lifetime of the node. * * @param circuitBreaker The constructed {@link CircuitBreaker} object from the {@link BreakerSettings} diff --git a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java index c2e147b86d17f..1edd9f52d97a7 100644 --- a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java @@ -64,7 +64,7 @@ default Collection<AllocationDecider> createAllocationDeciders(Settings settings /** * Return {@link ShardsAllocator} implementations added by this plugin. - * + * <p> * The key of the returned {@link Map} is the name of the allocator, and the value * is a function to construct the allocator. * @@ -88,7 +88,7 @@ default Map<String, ExistingShardsAllocator> getExistingShardsAllocators() { /** * Called when the node is started * - * DEPRECATED: Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. + * @deprecated Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. */ @Deprecated default void onNodeStarted() {} diff --git a/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java new file mode 100644 index 0000000000000..d9b5f0d79ec8c --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.crypto.MasterKeyProvider; + +/** + * Crypto plugin to provide support for custom key providers. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface CryptoKeyProviderPlugin { + + /** + * Every call to this method should return a new key provider. + * @param cryptoMetadata These are crypto settings needed for creation of a new key provider. + * @return master key provider. + */ + MasterKeyProvider createKeyProvider(CryptoMetadata cryptoMetadata); + + /** + * One crypto plugin extension implementation refers to a unique key provider type. + * @return key provider type + */ + String type(); +} diff --git a/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java new file mode 100644 index 0000000000000..ad348d07e23d3 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; + +/** + * Crypto plugin to provide encryption and decryption support. + * @opensearch.api + */ +@ExperimentalApi +public interface CryptoPlugin<T, U> { + + /** + * To create a crypto handler for handling encryption and decryption ops. + * @param keyProvider key provider instance to provide keys used in encrypting data. + * @param keyProviderName Name of key provider to distinguish between multiple instances created with different + * configurations of same keyProviderType. + * @param keyProviderType Unique type of key provider to distinguish between different key provider implementations. + * @param onClose Closes key provider or other clean up operations on close. + * @return crypto handler instance. + */ + CryptoHandler<T, U> getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ); +} diff --git a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java index 89433b2a3b67d..63f0d826b592f 100644 --- a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.transport.TransportService; @@ -67,13 +68,10 @@ public interface DiscoveryPlugin { * This can be handy if you want to provide your own Network interface name like _mycard_ * and implement by yourself the logic to get an actual IP address/hostname based on this * name. - * + * <p> * For example: you could call a third party service (an API) to resolve _mycard_. * Then you could define in opensearch.yml settings like: - * - * <pre>{@code - * network.host: _mycard_ - * }</pre> + * {@code network.host: _mycard_ } */ default NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { return null; @@ -81,12 +79,12 @@ default NetworkService.CustomNameResolver getCustomNameResolver(Settings setting /** * Returns providers of seed hosts for discovery. - * + * <p> * The key of the returned map is the name of the host provider * (see {@link org.opensearch.discovery.DiscoveryModule#DISCOVERY_SEED_PROVIDERS_SETTING}), and * the value is a supplier to construct the host provider when it is selected for use. * - * @param transportService Use to form the {@link org.opensearch.common.transport.TransportAddress} portion + * @param transportService Use to form the {@link TransportAddress} portion * of a {@link org.opensearch.cluster.node.DiscoveryNode} * @param networkService Use to find the publish host address of the current node */ diff --git a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java index 92ae2ff9cd661..5ea5442d84ffa 100644 --- a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java @@ -92,7 +92,7 @@ default Optional<CodecServiceFactory> getCustomCodecServiceFactory(IndexSettings * When an index is created this method is invoked for each engine plugin. Engine plugins that need to provide a * custom {@link TranslogDeletionPolicy} can override this method to return a function that takes the {@link IndexSettings} * and a {@link Supplier} for {@link RetentionLeases} and returns a custom {@link TranslogDeletionPolicy}. - * + * <p> * Only one of the installed Engine plugins can override this otherwise {@link IllegalStateException} will be thrown. * * @return a function that returns an instance of {@link TranslogDeletionPolicy} diff --git a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java index 4dd4010383934..367d335ac4fea 100644 --- a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java @@ -36,7 +36,7 @@ /** * An extension point for {@link Plugin} implementations to be themselves extensible. - * + * <p> * This class provides a callback for extensible plugins to be informed of other plugins * which extend them. * @@ -62,7 +62,7 @@ interface ExtensionLoader { /** * Allow this plugin to load extensions from other plugins. - * + * <p> * This method is called once only, after initializing this plugin and all plugins extending this plugin. It is called before * any other methods on this Plugin instance are called. */ diff --git a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java index 00f3f8aff585c..410535504f0dd 100644 --- a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java @@ -19,16 +19,14 @@ public interface IdentityPlugin { /** - * Get the current subject - * - * Should never return null + * Get the current subject. + * @return Should never return null * */ public Subject getSubject(); /** * Get the Identity Plugin's token manager implementation - * - * Should never return null + * @return Should never return null. */ public TokenManager getTokenManager(); } diff --git a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java index 2f549fec54759..ebd5717a00319 100644 --- a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.indices.recovery.RecoveryState; @@ -49,12 +50,16 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexStorePlugin { /** * An interface that describes how to create a new directory instance per shard. + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") interface DirectoryFactory { /** * Creates a new directory per shard. This method is called once per shard on shard creation. @@ -77,8 +82,11 @@ interface DirectoryFactory { /** * An interface that allows to create a new {@link RecoveryState} per shard. + * + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") interface RecoveryStateFactory { /** * Creates a new {@link RecoveryState} per shard. This method is called once per shard on shard creation. diff --git a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java index f78170c2ae3af..dc4f22de71344 100644 --- a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java @@ -32,11 +32,11 @@ package org.opensearch.plugins; +import org.opensearch.ingest.Processor; + import java.util.Collections; import java.util.Map; -import org.opensearch.ingest.Processor; - /** * An extension point for {@link Plugin} implementations to add custom ingest processors * @@ -46,7 +46,7 @@ public interface IngestPlugin { /** * Returns additional ingest processor types added by this plugin. - * + * <p> * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.ingest.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 8ea8333f4851a..07df40bafe6a1 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -31,25 +31,26 @@ package org.opensearch.plugins; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + /** * Plugin for extending network and transport related classes * @@ -82,7 +83,8 @@ default Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.emptyMap(); } @@ -100,7 +102,8 @@ default Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/opensearch/plugins/Plugin.java b/server/src/main/java/org/opensearch/plugins/Plugin.java index 02a46f44ef23b..48486a6b55dfd 100644 --- a/server/src/main/java/org/opensearch/plugins/Plugin.java +++ b/server/src/main/java/org/opensearch/plugins/Plugin.java @@ -32,7 +32,6 @@ package org.opensearch.plugins; -import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -41,13 +40,14 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Module; -import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.SettingUpgrader; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; @@ -58,6 +58,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.watcher.ResourceWatcherService; import java.io.Closeable; import java.io.IOException; @@ -89,6 +90,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Plugin implements Closeable { /** @@ -119,7 +121,7 @@ public Collection<Class<? extends LifecycleComponent>> getGuiceServiceClasses() /** * Returns components added by this plugin. - * + * <p> * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice * to themselves. diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 8936f9eba7a7f..b6030f4ded5e5 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -32,19 +32,28 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.json.JsonReadFeature; + import org.opensearch.Version; import org.opensearch.bootstrap.JarHell; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.json.JsonXContentParser; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.SemverRange; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -59,15 +68,20 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class PluginInfo implements Writeable, ToXContentObject { public static final String OPENSEARCH_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String OPENSEARCH_PLUGIN_POLICY = "plugin-security.policy"; + private static final JsonFactory jsonFactory = new JsonFactory().configure( + JsonReadFeature.ALLOW_UNQUOTED_FIELD_NAMES.mappedFeature(), + true + ); private final String name; private final String description; private final String version; - private final Version opensearchVersion; + private final List<SemverRange> opensearchVersionRanges; private final String javaVersion; private final String classname; private final String customFolderName; @@ -97,11 +111,41 @@ public PluginInfo( String customFolderName, List<String> extendedPlugins, boolean hasNativeController + ) { + this( + name, + description, + version, + List.of(SemverRange.fromString(opensearchVersion.toString())), + javaVersion, + classname, + customFolderName, + extendedPlugins, + hasNativeController + ); + } + + public PluginInfo( + String name, + String description, + String version, + List<SemverRange> opensearchVersionRanges, + String javaVersion, + String classname, + String customFolderName, + List<String> extendedPlugins, + boolean hasNativeController ) { this.name = name; this.description = description; this.version = version; - this.opensearchVersion = opensearchVersion; + // Ensure only one range is specified (for now) + if (opensearchVersionRanges.size() != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [" + name + "]" + ); + } + this.opensearchVersionRanges = opensearchVersionRanges; this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; @@ -150,11 +194,16 @@ public PluginInfo( * @param in the stream * @throws IOException if an I/O exception occurred reading the plugin info from the stream */ + @SuppressWarnings("unchecked") public PluginInfo(final StreamInput in) throws IOException { this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); - this.opensearchVersion = in.readVersion(); + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + this.opensearchVersionRanges = (List<SemverRange>) in.readGenericValue(); + } else { + this.opensearchVersionRanges = List.of(new SemverRange(in.readVersion(), SemverRange.RangeOperator.DEFAULT)); + } this.javaVersion = in.readString(); this.classname = in.readString(); this.customFolderName = in.readString(); @@ -167,7 +216,15 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(description); out.writeString(version); - out.writeVersion(opensearchVersion); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeGenericValue(opensearchVersionRanges); + } else { + /* + This works for currently supported range notations (=,~) + As more notations get added, then a suitable version must be picked. + */ + out.writeVersion(opensearchVersionRanges.get(0).getRangeVersion()); + } out.writeString(javaVersion); out.writeString(classname); if (customFolderName != null) { @@ -212,10 +269,49 @@ public static PluginInfo readFromProperties(final Path path) throws IOException } final String opensearchVersionString = propsMap.remove("opensearch.version"); - if (opensearchVersionString == null) { - throw new IllegalArgumentException("property [opensearch.version] is missing for plugin [" + name + "]"); + final String dependenciesValue = propsMap.remove("dependencies"); + if (opensearchVersionString == null && dependenciesValue == null) { + throw new IllegalArgumentException( + "Either [opensearch.version] or [dependencies] property must be specified for the plugin [" + name + "]" + ); + } + if (opensearchVersionString != null && dependenciesValue != null) { + throw new IllegalArgumentException( + "Only one of [opensearch.version] or [dependencies] property can be specified for the plugin [" + name + "]" + ); } - final Version opensearchVersion = Version.fromString(opensearchVersionString); + + final List<SemverRange> opensearchVersionRanges = new ArrayList<>(); + if (opensearchVersionString != null) { + opensearchVersionRanges.add(SemverRange.fromString(opensearchVersionString)); + } else { + Map<String, String> dependenciesMap; + try ( + final JsonXContentParser parser = new JsonXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + jsonFactory.createParser(dependenciesValue) + ) + ) { + dependenciesMap = parser.mapStrings(); + } + if (dependenciesMap.size() != 1) { + throw new IllegalArgumentException( + "Exactly one dependency is allowed to be specified in plugin descriptor properties: " + dependenciesMap + ); + } + if (dependenciesMap.keySet().stream().noneMatch(s -> s.equals("opensearch"))) { + throw new IllegalArgumentException("Only opensearch is allowed to be specified as a plugin dependency: " + dependenciesMap); + } + String[] ranges = dependenciesMap.get("opensearch").split(","); + if (ranges.length != 1) { + throw new IllegalArgumentException( + "Exactly one range is allowed to be specified in dependencies for the plugin [\" + name + \"]" + ); + } + opensearchVersionRanges.add(SemverRange.fromString(ranges[0].trim())); + } + final String javaVersionString = propsMap.remove("java.version"); if (javaVersionString == null) { throw new IllegalArgumentException("property [java.version] is missing for plugin [" + name + "]"); @@ -271,7 +367,7 @@ public static PluginInfo readFromProperties(final Path path) throws IOException name, description, version, - opensearchVersion, + opensearchVersionRanges, javaVersionString, classname, customFolderName, @@ -335,12 +431,26 @@ public String getVersion() { } /** - * The version of OpenSearch the plugin was built for. + * The list of OpenSearch version ranges the plugin is compatible with. * - * @return an OpenSearch version + * @return a list of OpenSearch version ranges */ - public Version getOpenSearchVersion() { - return opensearchVersion; + public List<SemverRange> getOpenSearchVersionRanges() { + return opensearchVersionRanges; + } + + /** + * Pretty print the semver ranges and return the string. + * @return semver ranges string + */ + public String getOpenSearchVersionRangesString() { + if (opensearchVersionRanges == null || opensearchVersionRanges.isEmpty()) { + throw new IllegalStateException("Opensearch version ranges list cannot be empty"); + } + if (opensearchVersionRanges.size() == 1) { + return opensearchVersionRanges.get(0).toString(); + } + return opensearchVersionRanges.stream().map(Object::toString).collect(Collectors.joining(",", "[", "]")); } /** @@ -376,7 +486,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws { builder.field("name", name); builder.field("version", version); - builder.field("opensearch_version", opensearchVersion); + builder.field("opensearch_version", getOpenSearchVersionRangesString()); builder.field("java_version", javaVersion); builder.field("description", description); builder.field("classname", classname); @@ -430,7 +540,7 @@ public String toString(String prefix) { .append("\n") .append(prefix) .append("OpenSearch Version: ") - .append(opensearchVersion) + .append(getOpenSearchVersionRangesString()) .append("\n") .append(prefix) .append("Java Version: ") diff --git a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java index e7d92016d4082..1bf8642d1112f 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java +++ b/server/src/main/java/org/opensearch/plugins/PluginSecurity.java @@ -135,6 +135,7 @@ static String formatPermission(Permission permission) { /** * Parses plugin policy into a set of permissions. Each permission is formatted for output to users. */ + @SuppressWarnings("removal") public static Set<String> parsePermissions(Path file, Path tmpDir) throws IOException { // create a zero byte file for "comparison" // this is necessary because the default policy impl automatically grants two permissions: diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 3cd226c357367..a6eefd2f4fd17 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -44,14 +44,15 @@ import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.bootstrap.JarHell; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.LifecycleComponent; import org.opensearch.common.inject.Module; +import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.service.ReportingService; import org.opensearch.index.IndexModule; -import org.opensearch.node.ReportingService; +import org.opensearch.semver.SemverRange; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.transport.TransportSettings; @@ -387,12 +388,12 @@ public static List<Path> findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current OpenSearch installation. */ static void verifyCompatibility(PluginInfo info) { - if (info.getOpenSearchVersion().equals(Version.CURRENT) == false) { + if (!isPluginVersionCompatible(info, Version.CURRENT)) { throw new IllegalArgumentException( "Plugin [" + info.getName() + "] was built for OpenSearch version " - + info.getOpenSearchVersion() + + info.getOpenSearchVersionRangesString() + " but version " + Version.CURRENT + " is running" @@ -401,6 +402,16 @@ static void verifyCompatibility(PluginInfo info) { JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + public static boolean isPluginVersionCompatible(final PluginInfo pluginInfo, final Version coreVersion) { + // Core version must satisfy the semver range in plugin info + for (SemverRange range : pluginInfo.getOpenSearchVersionRanges()) { + if (!range.isSatisfiedBy(coreVersion)) { + return false; + } + } + return true; + } + static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { /* * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the @@ -468,7 +479,7 @@ private static Bundle readPluginBundle(final Set<Bundle> bundles, final Path plu /** * Return the given bundles, sorted in dependency loading order. - * + * <p> * This sort is stable, so that if two plugins do not have any interdependency, * their relative order from iteration of the provided set will not change. * @@ -682,6 +693,7 @@ static void checkBundleJarHell(Set<URL> classpath, Bundle bundle, Map<String, Se } } + @SuppressWarnings("removal") private Plugin loadBundle(Bundle bundle, Map<String, Plugin> loaded) { String name = bundle.plugin.getName(); diff --git a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java index 189ba3cfc16ab..09233d49f3aea 100644 --- a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java @@ -52,9 +52,8 @@ public interface RepositoryPlugin { * Returns repository types added by this plugin. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map<String, Repository.Factory> getRepositories( Environment env, @@ -70,9 +69,8 @@ default Map<String, Repository.Factory> getRepositories( * through the external API. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map<String, Repository.Factory> getInternalRepositories( Environment env, diff --git a/server/src/main/java/org/opensearch/plugins/ScriptPlugin.java b/server/src/main/java/org/opensearch/plugins/ScriptPlugin.java index 7c8d71d21c0fd..51f4ab3128cee 100644 --- a/server/src/main/java/org/opensearch/plugins/ScriptPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ScriptPlugin.java @@ -31,15 +31,15 @@ package org.opensearch.plugins; +import org.opensearch.common.settings.Settings; +import org.opensearch.script.ScriptContext; +import org.opensearch.script.ScriptEngine; + import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; -import org.opensearch.common.settings.Settings; -import org.opensearch.script.ScriptContext; -import org.opensearch.script.ScriptEngine; - /** * An additional extension point for {@link Plugin}s that extends OpenSearch's scripting functionality. * diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index d2ef2b65c5944..7288a8caaec58 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -35,7 +35,7 @@ public interface SearchPipelinePlugin { /** * Returns additional search pipeline request processor types added by this plugin. - * + * <p> * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -46,7 +46,7 @@ default Map<String, Processor.Factory<SearchRequestProcessor>> getRequestProcess /** * Returns additional search pipeline response processor types added by this plugin. - * + * <p> * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -57,7 +57,7 @@ default Map<String, Processor.Factory<SearchResponseProcessor>> getResponseProce /** * Returns additional search pipeline search phase results processor types added by this plugin. - * + * <p> * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index d55cec18f2c54..40b4f97cd1897 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -36,12 +36,12 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.lucene.search.function.ScoreFunction; +import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.search.function.ScoreFunction; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentParser; @@ -551,8 +551,8 @@ class CompositeAggregationSpec { private final Consumer<ValuesSourceRegistry.Builder> aggregatorRegistrar; private final Class<?> valueSourceBuilderClass; @Deprecated - /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of - * byte code + /* This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + byte code */ private Byte byteCode; private final CompositeAggregationParsingFunction parsingFunction; diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java index 33dc9b7a0c843..3fc28713b63d5 100644 --- a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java @@ -8,6 +8,7 @@ package org.opensearch.plugins; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; @@ -15,10 +16,13 @@ /** * Plugin for extending telemetry related classes + * + * @opensearch.experimental */ +@ExperimentalApi public interface TelemetryPlugin { - Optional<Telemetry> getTelemetry(TelemetrySettings settings); + Optional<Telemetry> getTelemetry(TelemetrySettings telemetrySettings); String getName(); diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java new file mode 100644 index 0000000000000..5b842ff0d3399 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlService.java @@ -0,0 +1,153 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER; +import static org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER; + +/** + * Admission control Service that bootstraps and manages all the Admission Controllers in OpenSearch. + */ +public class AdmissionControlService { + private final ThreadPool threadPool; + public final AdmissionControlSettings admissionControlSettings; + private final ConcurrentMap<String, AdmissionController> admissionControllers; + private static final Logger logger = LogManager.getLogger(AdmissionControlService.class); + private final ClusterService clusterService; + private final Settings settings; + private final ResourceUsageCollectorService resourceUsageCollectorService; + + /** + * + * @param settings Immutable settings instance + * @param clusterService ClusterService Instance + * @param threadPool ThreadPool Instance + * @param resourceUsageCollectorService Instance used to get node resource usage stats + */ + public AdmissionControlService( + Settings settings, + ClusterService clusterService, + ThreadPool threadPool, + ResourceUsageCollectorService resourceUsageCollectorService + ) { + this.threadPool = threadPool; + this.admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + this.admissionControllers = new ConcurrentHashMap<>(); + this.clusterService = clusterService; + this.settings = settings; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.initialize(); + } + + /** + * Initialise and Register all the admissionControllers + */ + private void initialize() { + // Initialise different type of admission controllers + registerAdmissionController(CPU_BASED_ADMISSION_CONTROLLER); + if (Constants.LINUX) { + registerAdmissionController(IO_BASED_ADMISSION_CONTROLLER); + } + } + + /** + * + * @param action Transport action name + * @param admissionControlActionType admissionControllerActionType value + */ + public void applyTransportAdmissionControl(String action, AdmissionControlActionType admissionControlActionType) { + this.admissionControllers.forEach( + (name, admissionController) -> { admissionController.apply(action, admissionControlActionType); } + ); + } + + /** + * + * @param admissionControllerName admissionControllerName to register into the service. + */ + public void registerAdmissionController(String admissionControllerName) { + AdmissionController admissionController = this.controllerFactory(admissionControllerName); + this.admissionControllers.put(admissionControllerName, admissionController); + } + + /** + * @return AdmissionController Instance + */ + private AdmissionController controllerFactory(String admissionControllerName) { + switch (admissionControllerName) { + case CPU_BASED_ADMISSION_CONTROLLER: + return new CpuBasedAdmissionController( + admissionControllerName, + this.resourceUsageCollectorService, + this.clusterService, + this.settings + ); + case IO_BASED_ADMISSION_CONTROLLER: + return new IoBasedAdmissionController( + admissionControllerName, + this.resourceUsageCollectorService, + this.clusterService, + this.settings + ); + default: + throw new IllegalArgumentException("Not Supported AdmissionController : " + admissionControllerName); + } + } + + /** + * + * @return list of the registered admissionControllers + */ + public List<AdmissionController> getAdmissionControllers() { + return new ArrayList<>(this.admissionControllers.values()); + } + + /** + * + * @param controllerName name of the admissionController + * @return instance of the AdmissionController Instance + */ + public AdmissionController getAdmissionController(String controllerName) { + return this.admissionControllers.getOrDefault(controllerName, null); + } + + /** + * Return admission control stats + */ + public AdmissionControlStats stats() { + List<AdmissionControllerStats> statsList = new ArrayList<>(); + if (!this.admissionControllers.isEmpty()) { + this.admissionControllers.forEach((controllerName, admissionController) -> { + AdmissionControllerStats admissionControllerStats = new AdmissionControllerStats(admissionController); + statsList.add(admissionControllerStats); + }); + return new AdmissionControlStats(statsList); + } + return null; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java new file mode 100644 index 0000000000000..b557190ab54ac --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettings.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to admission control. + * @opensearch.internal + */ +public final class AdmissionControlSettings { + + /** + * Default parameters for the AdmissionControlSettings + */ + public static class Defaults { + public static final String MODE = "disabled"; + } + + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting<AdmissionControlMode> ADMISSION_CONTROL_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.mode", + Defaults.MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private volatile AdmissionControlMode transportLayeradmissionControlMode; + + /** + * @param clusterSettings clusterSettings Instance + * @param settings settings instance + */ + public AdmissionControlSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayeradmissionControlMode = ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, this::setAdmissionControlTransportLayerMode); + } + + /** + * + * @param admissionControlMode update the mode of admission control feature + */ + private void setAdmissionControlTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayeradmissionControlMode = admissionControlMode; + } + + /** + * + * @return return the default mode of the admissionControl + */ + public AdmissionControlMode getAdmissionControlTransportLayerMode() { + return this.transportLayeradmissionControlMode; + } + + /** + * + * @return true based on the admission control feature is enforced else false + */ + public Boolean isTransportLayerAdmissionControlEnforced() { + return this.transportLayeradmissionControlMode == AdmissionControlMode.ENFORCED; + } + + /** + * + * @return true based on the admission control feature is enabled else false + */ + public Boolean isTransportLayerAdmissionControlEnabled() { + return this.transportLayeradmissionControlMode != AdmissionControlMode.DISABLED; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java new file mode 100644 index 0000000000000..f5bb5fa660e7f --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/AdmissionController.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Abstract class for Admission Controller in OpenSearch, which aims to provide resource based request admission control. + * It provides methods for any tracking-object that can be incremented (such as memory size), + * and admission control can be applied if configured limit has been reached + */ +public abstract class AdmissionController { + private final String admissionControllerName; + final ResourceUsageCollectorService resourceUsageCollectorService; + public final Map<String, AtomicLong> rejectionCountMap; + public final ClusterService clusterService; + + /** + * @param admissionControllerName name of the admissionController + * @param resourceUsageCollectorService instance used to get resource usage stats of the node + * @param clusterService instance of the clusterService + */ + public AdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService + ) { + this.admissionControllerName = admissionControllerName; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.clusterService = clusterService; + this.rejectionCountMap = ConcurrentCollections.newConcurrentMap(); + } + + /** + * Return the current state of the admission controller + * @return true if admissionController is enabled for the transport layer else false + */ + public boolean isEnabledForTransportLayer(AdmissionControlMode admissionControlMode) { + return admissionControlMode != AdmissionControlMode.DISABLED; + } + + /** + * + * @return true if admissionController is Enforced Mode else false + */ + public Boolean isAdmissionControllerEnforced(AdmissionControlMode admissionControlMode) { + return admissionControlMode == AdmissionControlMode.ENFORCED; + } + + /** + * Apply admission control based on the resource usage for an action + */ + public abstract void apply(String action, AdmissionControlActionType admissionControlActionType); + + /** + * @return name of the admission-controller + */ + public String getName() { + return this.admissionControllerName; + } + + /** + * Add rejection count to the rejection count metric tracked by the admission controller + */ + public void addRejectionCount(String admissionControlActionType, long count) { + if (!this.rejectionCountMap.containsKey(admissionControlActionType)) { + this.rejectionCountMap.put(admissionControlActionType, new AtomicLong(0)); + } + this.rejectionCountMap.get(admissionControlActionType).getAndAdd(count); + } + + /** + * @return current value of the rejection count metric tracked by the admission-controller. + */ + public long getRejectionCount(String admissionControlActionType) { + if (this.rejectionCountMap.containsKey(admissionControlActionType)) { + return this.rejectionCountMap.get(admissionControlActionType).get(); + } + return 0; + } + + /** + * Get rejection stats of the admission controller + */ + public Map<String, Long> getRejectionStats() { + Map<String, Long> rejectionStats = new HashMap<>(); + rejectionCountMap.forEach((actionType, count) -> rejectionStats.put(actionType, count.get())); + return rejectionStats; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java new file mode 100644 index 0000000000000..5c180346c05e1 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionController.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; + +import java.util.Locale; +import java.util.Optional; + +/** + * Class for CPU Based Admission Controller in OpenSearch, which aims to provide CPU utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class CpuBasedAdmissionController extends AdmissionController { + public static final String CPU_BASED_ADMISSION_CONTROLLER = "global_cpu_usage"; + private static final Logger LOGGER = LogManager.getLogger(CpuBasedAdmissionController.class); + public CpuBasedAdmissionControllerSettings settings; + + /** + * @param admissionControllerName Name of the admission controller + * @param resourceUsageCollectorService Instance used to get node resource usage stats + * @param clusterService ClusterService Instance + * @param settings Immutable settings instance + */ + public CpuBasedAdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService, + Settings settings + ) { + super(admissionControllerName, resourceUsageCollectorService, clusterService); + this.settings = new CpuBasedAdmissionControllerSettings(clusterService.getClusterSettings(), settings); + } + + /** + * Apply admission control based on process CPU usage + * @param action is the transport action + */ + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action, admissionControlActionType); + } + } + + /** + * Apply transport layer admission control if configured limit has been reached + */ + private void applyForTransportLayer(String actionName, AdmissionControlActionType admissionControlActionType) { + if (isLimitsBreached(actionName, admissionControlActionType)) { + this.addRejectionCount(admissionControlActionType.getType(), 1); + if (this.isAdmissionControllerEnforced(this.settings.getTransportLayerAdmissionControllerMode())) { + throw new OpenSearchRejectedExecutionException( + String.format( + Locale.ROOT, + "CPU usage admission controller rejected the request for action [%s] as CPU limit reached", + admissionControlActionType.name() + ) + ); + } + } + } + + /** + * Check if the configured resource usage limits are breached for the action + */ + private boolean isLimitsBreached(String actionName, AdmissionControlActionType admissionControlActionType) { + // check if cluster state is ready + if (clusterService.state() != null && clusterService.state().nodes() != null) { + long maxCpuLimit = this.getCpuRejectionThreshold(admissionControlActionType); + Optional<NodeResourceUsageStats> nodePerformanceStatistics = this.resourceUsageCollectorService.getNodeStatistics( + this.clusterService.state().nodes().getLocalNodeId() + ); + if (nodePerformanceStatistics.isPresent()) { + double cpuUsage = nodePerformanceStatistics.get().getCpuUtilizationPercent(); + if (cpuUsage >= maxCpuLimit) { + LOGGER.warn( + "CpuBasedAdmissionController limit reached as the current CPU " + + "usage [{}] exceeds the allowed limit [{}] for transport action [{}] in admissionControlMode [{}]", + cpuUsage, + maxCpuLimit, + actionName, + this.settings.getTransportLayerAdmissionControllerMode() + ); + return true; + } + } + } + return false; + } + + /** + * Get CPU rejection threshold based on action type + */ + private long getCpuRejectionThreshold(AdmissionControlActionType admissionControlActionType) { + switch (admissionControlActionType) { + case SEARCH: + return this.settings.getSearchCPULimit(); + case INDEXING: + return this.settings.getIndexingCPULimit(); + default: + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Admission control not Supported for AdmissionControlActionType: %s", + admissionControlActionType.getType() + ) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java new file mode 100644 index 0000000000000..ad6cc3ff378f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionController.java @@ -0,0 +1,126 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; + +import java.util.Locale; +import java.util.Optional; + +/** + * Class for IO Based Admission Controller in OpenSearch, which aims to provide IO utilisation admission control. + * It provides methods to apply admission control if configured limit has been reached + */ +public class IoBasedAdmissionController extends AdmissionController { + public static final String IO_BASED_ADMISSION_CONTROLLER = "global_io_usage"; + private static final Logger LOGGER = LogManager.getLogger(IoBasedAdmissionController.class); + public IoBasedAdmissionControllerSettings settings; + + /** + * @param admissionControllerName name of the admissionController + * @param resourceUsageCollectorService instance used to get resource usage stats of the node + * @param clusterService instance of the clusterService + */ + public IoBasedAdmissionController( + String admissionControllerName, + ResourceUsageCollectorService resourceUsageCollectorService, + ClusterService clusterService, + Settings settings + ) { + super(admissionControllerName, resourceUsageCollectorService, clusterService); + this.settings = new IoBasedAdmissionControllerSettings(clusterService.getClusterSettings(), settings); + } + + /** + * Apply admission control based on the resource usage for an action + * + * @param action is the transport action + * @param admissionControlActionType type of admissionControlActionType + */ + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + if (this.isEnabledForTransportLayer(this.settings.getTransportLayerAdmissionControllerMode())) { + this.applyForTransportLayer(action, admissionControlActionType); + } + } + + /** + * Apply transport layer admission control if configured limit has been reached + */ + private void applyForTransportLayer(String actionName, AdmissionControlActionType admissionControlActionType) { + if (isLimitsBreached(actionName, admissionControlActionType)) { + this.addRejectionCount(admissionControlActionType.getType(), 1); + if (this.isAdmissionControllerEnforced(this.settings.getTransportLayerAdmissionControllerMode())) { + throw new OpenSearchRejectedExecutionException( + String.format( + Locale.ROOT, + "Io usage admission controller rejected the request for action [%s] as IO limit reached", + admissionControlActionType.name() + ) + ); + } + } + } + + /** + * Check if the configured resource usage limits are breached for the action + */ + private boolean isLimitsBreached(String actionName, AdmissionControlActionType admissionControlActionType) { + // check if cluster state is ready + if (clusterService.state() != null && clusterService.state().nodes() != null) { + long ioUsageThreshold = this.getIoRejectionThreshold(admissionControlActionType); + Optional<NodeResourceUsageStats> nodePerformanceStatistics = this.resourceUsageCollectorService.getNodeStatistics( + this.clusterService.state().nodes().getLocalNodeId() + ); + if (nodePerformanceStatistics.isPresent()) { + double ioUsage = nodePerformanceStatistics.get().getIoUsageStats().getIoUtilisationPercent(); + if (ioUsage >= ioUsageThreshold) { + LOGGER.warn( + "IoBasedAdmissionController limit reached as the current IO " + + "usage [{}] exceeds the allowed limit [{}] for transport action [{}] in admissionControlMode [{}]", + ioUsage, + ioUsageThreshold, + actionName, + this.settings.getTransportLayerAdmissionControllerMode() + ); + return true; + } + } + } + return false; + } + + /** + * Get IO rejection threshold based on action type + */ + private long getIoRejectionThreshold(AdmissionControlActionType admissionControlActionType) { + switch (admissionControlActionType) { + case SEARCH: + return this.settings.getSearchIOUsageLimit(); + case INDEXING: + return this.settings.getIndexingIOUsageLimit(); + default: + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Admission control not Supported for AdmissionControlActionType: %s", + admissionControlActionType.getType() + ) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java new file mode 100644 index 0000000000000..23746cc61a203 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/controllers/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes related to the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.controllers; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java new file mode 100644 index 0000000000000..8cf6e973ceb64 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionType.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Enums that defines the type of the transport requests + */ +public enum AdmissionControlActionType { + INDEXING("indexing"), + SEARCH("search"); + + private final String type; + + AdmissionControlActionType(String uriType) { + this.type = uriType; + } + + /** + * + * @return type of the request + */ + public String getType() { + return type; + } + + public static AdmissionControlActionType fromName(String name) { + name = name.toLowerCase(Locale.ROOT); + switch (name) { + case "indexing": + return INDEXING; + case "search": + return SEARCH; + default: + throw new IllegalArgumentException("Not Supported TransportAction Type: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java new file mode 100644 index 0000000000000..2ae2436ba84e7 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlMode.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import java.util.Locale; + +/** + * Defines the AdmissionControlMode + */ +public enum AdmissionControlMode { + /** + * AdmissionController is completely disabled. + */ + DISABLED("disabled"), + + /** + * AdmissionController only monitors the rejection criteria for the requests. + */ + MONITOR("monitor_only"), + + /** + * AdmissionController monitors and rejects tasks that exceed resource usage thresholds. + */ + ENFORCED("enforced"); + + private final String mode; + + /** + * @param mode update mode of the admission controller + */ + AdmissionControlMode(String mode) { + this.mode = mode; + } + + /** + * + * @return mode of the admission controller + */ + public String getMode() { + return this.mode; + } + + /** + * + * @param name is the mode of the current + * @return Enum of AdmissionControlMode based on the mode + */ + public static AdmissionControlMode fromName(String name) { + switch (name.toLowerCase(Locale.ROOT)) { + case "disabled": + return DISABLED; + case "monitor_only": + return MONITOR; + case "enforced": + return ENFORCED; + default: + throw new IllegalArgumentException("Invalid AdmissionControlMode: " + name); + } + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java new file mode 100644 index 0000000000000..98b08ebd0a7bf --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/enums/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains enums related to the different admission controller feature + */ +package org.opensearch.ratelimitting.admissioncontrol.enums; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java new file mode 100644 index 0000000000000..b3dc229f86fb6 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains base classes needed for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java new file mode 100644 index 0000000000000..1bddd1446a4c4 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/CpuBasedAdmissionControllerSettings.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to cpu based admission controller. + * @opensearch.internal + */ +public class CpuBasedAdmissionControllerSettings { + + /** + * Default parameters for the CpuBasedAdmissionControllerSettings + */ + public static class Defaults { + public static final long CPU_USAGE_LIMIT = 95; + } + + private AdmissionControlMode transportLayerMode; + private Long searchCPULimit; + private Long indexingCPULimit; + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting<AdmissionControlMode> CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.cpu_usage.mode_override", + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU Limits for the search requests by default it will use default IO usage limit + */ + public static final Setting<Long> SEARCH_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.search.cpu_usage.limit", + Defaults.CPU_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the CPU limits for the indexing requests by default it will use default IO usage limit + */ + public static final Setting<Long> INDEXING_CPU_USAGE_LIMIT = Setting.longSetting( + "admission_control.indexing.cpu_usage.limit", + Defaults.CPU_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + // currently limited to one setting will add further more settings in follow-up PR's + public CpuBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayerMode = CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); + this.searchCPULimit = SEARCH_CPU_USAGE_LIMIT.get(settings); + this.indexingCPULimit = INDEXING_CPU_USAGE_LIMIT.get(settings); + clusterSettings.addSettingsUpdateConsumer(INDEXING_CPU_USAGE_LIMIT, this::setIndexingCPULimit); + clusterSettings.addSettingsUpdateConsumer(SEARCH_CPU_USAGE_LIMIT, this::setSearchCPULimit); + } + + private void setTransportLayerMode(AdmissionControlMode admissionControlMode) { + this.transportLayerMode = admissionControlMode; + } + + public AdmissionControlMode getTransportLayerAdmissionControllerMode() { + return transportLayerMode; + } + + public Long getSearchCPULimit() { + return searchCPULimit; + } + + public Long getIndexingCPULimit() { + return indexingCPULimit; + } + + public void setIndexingCPULimit(Long indexingCPULimit) { + this.indexingCPULimit = indexingCPULimit; + } + + public void setSearchCPULimit(Long searchCPULimit) { + this.searchCPULimit = searchCPULimit; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java new file mode 100644 index 0000000000000..e58ed28d21605 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettings.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; + +/** + * Settings related to IO based admission controller. + * @opensearch.internal + */ +public class IoBasedAdmissionControllerSettings { + + /** + * Default parameters for the IoBasedAdmissionControllerSettings + */ + public static class Defaults { + public static final long IO_USAGE_LIMIT = 95; + } + + private AdmissionControlMode transportLayerMode; + private Long searchIOUsageLimit; + private Long indexingIOUsageLimit; + + /** + * Feature level setting to operate in shadow-mode or in enforced-mode. If enforced field is set + * rejection will be performed, otherwise only rejection metrics will be populated. + */ + public static final Setting<AdmissionControlMode> IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE = new Setting<>( + "admission_control.transport.io_usage.mode_override", + AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, + AdmissionControlMode::fromName, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the IO Limits for the search requests by default it will use default IO usage limit + */ + public static final Setting<Long> SEARCH_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.search.io_usage.limit", + Defaults.IO_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * This setting used to set the IO limits for the indexing requests by default it will use default IO usage limit + */ + public static final Setting<Long> INDEXING_IO_USAGE_LIMIT = Setting.longSetting( + "admission_control.indexing.io_usage.limit", + Defaults.IO_USAGE_LIMIT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public IoBasedAdmissionControllerSettings(ClusterSettings clusterSettings, Settings settings) { + this.transportLayerMode = IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.get(settings); + clusterSettings.addSettingsUpdateConsumer(IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, this::setTransportLayerMode); + this.searchIOUsageLimit = SEARCH_IO_USAGE_LIMIT.get(settings); + this.indexingIOUsageLimit = INDEXING_IO_USAGE_LIMIT.get(settings); + clusterSettings.addSettingsUpdateConsumer(INDEXING_IO_USAGE_LIMIT, this::setIndexingIOUsageLimit); + clusterSettings.addSettingsUpdateConsumer(SEARCH_IO_USAGE_LIMIT, this::setSearchIOUsageLimit); + } + + public void setIndexingIOUsageLimit(Long indexingIOUsageLimit) { + this.indexingIOUsageLimit = indexingIOUsageLimit; + } + + public void setSearchIOUsageLimit(Long searchIOUsageLimit) { + this.searchIOUsageLimit = searchIOUsageLimit; + } + + public AdmissionControlMode getTransportLayerAdmissionControllerMode() { + return transportLayerMode; + } + + public void setTransportLayerMode(AdmissionControlMode transportLayerMode) { + this.transportLayerMode = transportLayerMode; + } + + public Long getIndexingIOUsageLimit() { + return indexingIOUsageLimit; + } + + public Long getSearchIOUsageLimit() { + return searchIOUsageLimit; + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java new file mode 100644 index 0000000000000..a024ccc756745 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/settings/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains settings related classes for the different admission controllers + */ +package org.opensearch.ratelimitting.admissioncontrol.settings; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java new file mode 100644 index 0000000000000..39909c571c63e --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStats.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Class for admission control stats used as part of node stats + * @opensearch.internal + */ +public class AdmissionControlStats implements ToXContentFragment, Writeable { + + private final List<AdmissionControllerStats> admissionControllerStatsList; + + /** + * + * @param admissionControllerStatsList list of admissionControllerStats + */ + public AdmissionControlStats(List<AdmissionControllerStats> admissionControllerStatsList) { + this.admissionControllerStatsList = admissionControllerStatsList; + } + + /** + * + * @param in the stream to read from + * @throws IOException if an I/O error occurs + */ + public AdmissionControlStats(StreamInput in) throws IOException { + this.admissionControllerStatsList = in.readList(AdmissionControllerStats::new); + } + + /** + * Write this into the {@linkplain StreamOutput}. + * + * @param out the output stream to write entity content to + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(this.admissionControllerStatsList); + } + + public List<AdmissionControllerStats> getAdmissionControllerStatsList() { + return admissionControllerStatsList; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("admission_control"); + for (AdmissionControllerStats admissionControllerStats : this.admissionControllerStatsList) { + builder.field(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java new file mode 100644 index 0000000000000..3895cac3eaa07 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStats.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; + +import java.io.IOException; +import java.util.Map; + +/** + * Class for admission controller ( such as CPU ) stats which includes rejection count for each action type + * @opensearch.internal + */ +public class AdmissionControllerStats implements Writeable, ToXContentFragment { + public Map<String, Long> rejectionCount; + public String admissionControllerName; + + public AdmissionControllerStats(AdmissionController admissionController) { + this.rejectionCount = admissionController.getRejectionStats(); + this.admissionControllerName = admissionController.getName(); + } + + public AdmissionControllerStats(StreamInput in) throws IOException { + this.rejectionCount = in.readMap(StreamInput::readString, StreamInput::readLong); + this.admissionControllerName = in.readString(); + } + + public String getAdmissionControllerName() { + return admissionControllerName; + } + + public Map<String, Long> getRejectionCount() { + return rejectionCount; + } + + /** + * Writes this instance into a {@link StreamOutput} + * @param out the {@link StreamOutput} to write to + * @throws IOException if an error occurs while writing to the StreamOutput + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.rejectionCount, StreamOutput::writeString, StreamOutput::writeLong); + out.writeString(this.admissionControllerName); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("transport"); + { + builder.startObject("rejection_count"); + { + for (Map.Entry<String, Long> rejectionCountEntry : this.rejectionCount.entrySet()) { + builder.field(rejectionCountEntry.getKey(), rejectionCountEntry.getValue()); + } + } + builder.endObject(); + } + builder.endObject(); + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java new file mode 100644 index 0000000000000..7c96dcd569d64 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/stats/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains stats related classes for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol.stats; diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java new file mode 100644 index 0000000000000..1e8f309234f90 --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandler.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * AdmissionControl Handler to intercept Transport Requests. + * @param <T> Transport Request + */ +public class AdmissionControlTransportHandler<T extends TransportRequest> implements TransportRequestHandler<T> { + + private final String action; + private final TransportRequestHandler<T> actualHandler; + protected final Logger log = LogManager.getLogger(this.getClass()); + AdmissionControlService admissionControlService; + boolean forceExecution; + AdmissionControlActionType admissionControlActionType; + + public AdmissionControlTransportHandler( + String action, + TransportRequestHandler<T> actualHandler, + AdmissionControlService admissionControlService, + boolean forceExecution, + AdmissionControlActionType admissionControlActionType + ) { + super(); + this.action = action; + this.actualHandler = actualHandler; + this.admissionControlService = admissionControlService; + this.forceExecution = forceExecution; + this.admissionControlActionType = admissionControlActionType; + } + + /** + * @param request Transport Request that landed on the node + * @param channel Transport channel allows to send a response to a request + * @param task Current task that is executing + * @throws Exception when admission control rejected the requests + */ + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + // skip admission control if force execution is true + if (!this.forceExecution) { + // intercept the transport requests here and apply admission control + try { + this.admissionControlService.applyTransportAdmissionControl(this.action, this.admissionControlActionType); + } catch (final OpenSearchRejectedExecutionException openSearchRejectedExecutionException) { + log.warn(openSearchRejectedExecutionException.getMessage()); + channel.sendResponse(openSearchRejectedExecutionException); + return; + } + } + actualHandler.messageReceived(request, channel, task); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java new file mode 100644 index 0000000000000..ae1520bca769d --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportInterceptor.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.transport.TransportInterceptor; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +/** + * This class allows throttling by intercepting requests on both the sender and the receiver side. + */ +public class AdmissionControlTransportInterceptor implements TransportInterceptor { + + AdmissionControlService admissionControlService; + + public AdmissionControlTransportInterceptor(AdmissionControlService admissionControlService) { + this.admissionControlService = admissionControlService; + } + + /** + * + * @return admissionController handler to intercept transport requests + */ + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler, + AdmissionControlActionType admissionControlActionType + ) { + return new AdmissionControlTransportHandler<>( + action, + actualHandler, + this.admissionControlService, + forceExecution, + admissionControlActionType + ); + } +} diff --git a/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java new file mode 100644 index 0000000000000..f97f31bc7b1db --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/admissioncontrol/transport/package-info.java @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +/** + * This package contains transport related classes for the admissionController Feature + */ +package org.opensearch.ratelimitting.admissioncontrol.transport; diff --git a/server/src/main/java/org/opensearch/ratelimitting/package-info.java b/server/src/main/java/org/opensearch/ratelimitting/package-info.java new file mode 100644 index 0000000000000..c04358e14284f --- /dev/null +++ b/server/src/main/java/org/opensearch/ratelimitting/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base OpenSearch Throttling package + */ +package org.opensearch.ratelimitting; diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 462d105dc0c68..697ac37c4a175 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -33,17 +33,17 @@ import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleListener; -import org.opensearch.index.mapper.MapperService; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; @@ -137,6 +137,16 @@ public long getRestoreThrottleTimeInNanos() { return in.getRestoreThrottleTimeInNanos(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return in.getRemoteUploadThrottleTimeInNanos(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return in.getRemoteDownloadThrottleTimeInNanos(); + } + @Override public String startVerification() { return in.startVerification(); @@ -157,6 +167,11 @@ public boolean isReadOnly() { return in.isReadOnly(); } + @Override + public boolean isSystemRepository() { + return in.isSystemRepository(); + } + @Override public void snapshotShard( Store store, diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index eb6fb051957c7..87a0063e8c21b 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -32,13 +32,14 @@ package org.opensearch.repositories; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import java.io.IOException; import java.util.Objects; @@ -46,8 +47,9 @@ /** * Represents a single snapshotted index in the repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexId implements Writeable, ToXContentObject { protected static final String NAME = "name"; protected static final String ID = "id"; @@ -80,7 +82,7 @@ public String getName() { * The unique ID for the index within the repository. This is *not* the same as the * index's UUID, but merely a unique file/URL friendly identifier that a repository can * use to name blobs for the index. - * + * <p> * We could not use the index's actual UUID (See {@link Index#getUUID()}) because in the * case of snapshot/restore, the index UUID in the snapshotted index will be different * from the index UUID assigned to it when it is restored. Hence, the actual index UUID diff --git a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java index 25cb0eaf43455..41f77e6830f5f 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/IndexMetaDataGenerations.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.snapshots.SnapshotId; import java.util.Collection; @@ -50,8 +51,9 @@ * {@link IndexMetadata} should be computed and then used to check if it already exists in the repository via * {@link #getIndexMetaBlobId(String)}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexMetaDataGenerations { public static final IndexMetaDataGenerations EMPTY = new IndexMetaDataGenerations(Collections.emptyMap(), Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java index cc4d3c006d84c..afb6e530b0eec 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesModule.java @@ -39,6 +39,7 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -71,6 +72,11 @@ public RepositoriesModule( metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) ); + factories.put( + ReloadableFsRepository.TYPE, + metadata -> new ReloadableFsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) + ); + for (RepositoryPlugin repoPlugin : repoPlugins) { Map<String, Repository.Factory> newRepoTypes = repoPlugin.getRepositories( env, diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index e7f7a1d9c0554..68669feb16abc 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.cluster.AckedClusterStateUpdateTask; @@ -49,6 +49,7 @@ import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -57,15 +58,16 @@ import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,12 +84,14 @@ import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RepositoriesService extends AbstractLifecycleComponent implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(RepositoriesService.class); @@ -151,7 +155,7 @@ public RepositoriesService( } /** - * Registers new repository in the cluster + * Registers new repository or updates an existing repository in the cluster * <p> * This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. @@ -159,12 +163,20 @@ public RepositoriesService( * @param request register repository request * @param listener register repository listener */ - public void registerRepository(final PutRepositoryRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { + public void registerOrUpdateRepository(final PutRepositoryRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; - final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata(request.name(), request.type(), request.settings()); + final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ); validate(request.name()); validateRepositoryMetadataSettings(clusterService, request.name(), request.settings()); + if (newRepositoryMetadata.cryptoMetadata() != null) { + validate(newRepositoryMetadata.cryptoMetadata().keyProviderName()); + } final ActionListener<ClusterStateUpdateResponse> registrationListener; if (request.verify()) { @@ -211,27 +223,58 @@ public ClusterState execute(ClusterState currentState) { if (repositories == null) { logger.info("put repository [{}]", request.name()); repositories = new RepositoriesMetadata( - Collections.singletonList(new RepositoryMetadata(request.name(), request.type(), request.settings())) + Collections.singletonList( + new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ) + ) ); } else { boolean found = false; List<RepositoryMetadata> repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - if (repositoryMetadata.name().equals(newRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { + RepositoryMetadata updatedRepositoryMetadata = newRepositoryMetadata; + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + Settings updatedSettings = Settings.builder() + .put(newRepositoryMetadata.settings()) + .put(SYSTEM_REPOSITORY_SETTING.getKey(), true) + .build(); + updatedRepositoryMetadata = new RepositoryMetadata( + newRepositoryMetadata.name(), + newRepositoryMetadata.type(), + updatedSettings, + newRepositoryMetadata.cryptoMetadata() + ); + } + if (repositoryMetadata.name().equals(updatedRepositoryMetadata.name())) { + if (updatedRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { // Previous version is the same as this one no update is needed. return currentState; } + ensureCryptoSettingsAreSame(repositoryMetadata, request); found = true; - repositoriesMetadata.add(newRepositoryMetadata); + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + ensureValidSystemRepositoryUpdate(updatedRepositoryMetadata, repositoryMetadata); + } + repositoriesMetadata.add(updatedRepositoryMetadata); } else { repositoriesMetadata.add(repositoryMetadata); } } if (!found) { logger.info("put repository [{}]", request.name()); - repositoriesMetadata.add(new RepositoryMetadata(request.name(), request.type(), request.settings())); + repositoriesMetadata.add( + new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ) + ); } else { logger.info("update repository [{}]", request.name()); } @@ -289,6 +332,7 @@ public ClusterState execute(ClusterState currentState) { for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { if (Regex.simpleMatch(request.name(), repositoryMetadata.name())) { ensureRepositoryNotInUse(currentState, repositoryMetadata.name()); + ensureNotSystemRepository(repositoryMetadata); logger.info("delete repository [{}]", repositoryMetadata.name()); changed = true; } else { @@ -393,7 +437,13 @@ public void applyClusterState(ClusterChangedEvent event) { // Check if repositories got changed if ((oldMetadata == null && newMetadata == null) || (oldMetadata != null && oldMetadata.equalsIgnoreGenerations(newMetadata))) { for (Repository repo : repositories.values()) { - repo.updateState(state); + // Update State should only be invoked for repository which are already in cluster state. This + // check needs to be added as system repositories can be populated before cluster state has the + // repository metadata. + RepositoriesMetadata stateRepositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + if (stateRepositoriesMetadata != null && stateRepositoriesMetadata.repository(repo.getMetadata().name()) != null) { + repo.updateState(state); + } } return; } @@ -407,7 +457,6 @@ public void applyClusterState(ClusterChangedEvent event) { logger.debug("unregistering repository [{}]", entry.getKey()); Repository repository = entry.getValue(); closeRepository(repository); - archiveRepositoryStats(repository, state.version()); } else { survivors.put(entry.getKey(), entry.getValue()); } @@ -424,24 +473,48 @@ public void applyClusterState(ClusterChangedEvent event) { if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) { // Previous version is different from the version in settings - logger.debug("updating repository [{}]", repositoryMetadata.name()); - closeRepository(repository); - archiveRepositoryStats(repository, state.version()); - repository = null; - try { - repository = createRepository(repositoryMetadata, typesRegistry); - } catch (RepositoryException ex) { - // TODO: this catch is bogus, it means the old repo is already closed, - // but we have nothing to replace it - logger.warn( - () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), - ex + if (repository.isSystemRepository() && repository.isReloadable()) { + logger.debug( + "updating repository [{}] in-place to use new metadata [{}]", + repositoryMetadata.name(), + repositoryMetadata ); + repository.validateMetadata(repositoryMetadata); + repository.reload(repositoryMetadata); + } else { + logger.debug("updating repository [{}]", repositoryMetadata.name()); + closeRepository(repository); + repository = null; + try { + repository = createRepository(repositoryMetadata, typesRegistry); + } catch (RepositoryException ex) { + // TODO: this catch is bogus, it means the old repo is already closed, + // but we have nothing to replace it + logger.warn( + () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), + ex + ); + } } } } else { try { - repository = createRepository(repositoryMetadata, typesRegistry); + // System repositories are already created and verified and hence during cluster state + // update we should avoid creating it again. Once the cluster state is update with the + // repository metadata the repository metadata update will land in the above if block. + if (repositories.containsKey(repositoryMetadata.name()) == false) { + repository = createRepository(repositoryMetadata, typesRegistry); + } else { + // Validate the repository metadata which was created during bootstrap is same as the + // one present in incoming cluster state. + repository = repositories.get(repositoryMetadata.name()); + if (repositoryMetadata.equalsIgnoreGenerations(repository.getMetadata()) == false) { + throw new RepositoryException( + repositoryMetadata.name(), + "repository was already " + "registered with different metadata during bootstrap than cluster state" + ); + } + } } catch (RepositoryException ex) { logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetadata.name()), ex); } @@ -500,12 +573,12 @@ public Repository repository(String repositoryName) { } public List<RepositoryStatsSnapshot> repositoriesStats() { - List<RepositoryStatsSnapshot> archivedRepoStats = repositoriesStatsArchive.getArchivedStats(); List<RepositoryStatsSnapshot> activeRepoStats = getRepositoryStatsForActiveRepositories(); + return activeRepoStats; + } - List<RepositoryStatsSnapshot> repositoriesStats = new ArrayList<>(archivedRepoStats); - repositoriesStats.addAll(activeRepoStats); - return repositoriesStats; + public RepositoriesStats getRepositoriesStats() { + return new RepositoriesStats(repositoriesStats()); } private List<RepositoryStatsSnapshot> getRepositoryStatsForActiveRepositories() { @@ -560,18 +633,16 @@ public void unregisterInternalRepository(String name) { } /** Closes the given repository. */ - private void closeRepository(Repository repository) { + public void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); repository.close(); } - private void archiveRepositoryStats(Repository repository, long clusterStateVersion) { - if (repository instanceof MeteredBlobStoreRepository) { - RepositoryStatsSnapshot stats = ((MeteredBlobStoreRepository) repository).statsSnapshotForArchival(clusterStateVersion); - if (repositoriesStatsArchive.archive(stats) == false) { - logger.warn("Unable to archive the repository stats [{}] as the archive is full.", stats); - } - } + /** + * Creates repository holder. This method starts the non-internal repository + */ + public Repository createRepository(RepositoryMetadata repositoryMetadata) { + return this.createRepository(repositoryMetadata, typesRegistry); } /** @@ -598,15 +669,15 @@ private Repository createRepository(RepositoryMetadata repositoryMetadata, Map<S } } - private static void validate(final String repositoryName) { - if (org.opensearch.core.common.Strings.hasLength(repositoryName) == false) { - throw new RepositoryException(repositoryName, "cannot be empty"); + public static void validate(final String identifier) { + if (org.opensearch.core.common.Strings.hasLength(identifier) == false) { + throw new RepositoryException(identifier, "cannot be empty"); } - if (repositoryName.contains("#")) { - throw new RepositoryException(repositoryName, "must not contain '#'"); + if (identifier.contains("#")) { + throw new RepositoryException(identifier, "must not contain '#'"); } - if (Strings.validFileName(repositoryName) == false) { - throw new RepositoryException(repositoryName, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); + if (Strings.validFileName(identifier) == false) { + throw new RepositoryException(identifier, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); } } @@ -628,10 +699,13 @@ public static void validateRepositoryMetadataSettings( + minVersionInCluster ); } - if (REMOTE_STORE_INDEX_SHALLOW_COPY.get(repositoryMetadataSettings) && !FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { + // Validation to not allow users to create system repository via put repository call. + if (isSystemRepositorySettingPresent(repositoryMetadataSettings)) { throw new RepositoryException( repositoryName, - "setting " + REMOTE_STORE_INDEX_SHALLOW_COPY.getKey() + " cannot be enabled, as remote store feature is not enabled." + "setting " + + SYSTEM_REPOSITORY_SETTING.getKey() + + " cannot provide system repository setting; this setting is managed by OpenSearch" ); } } @@ -642,6 +716,23 @@ private static void ensureRepositoryNotInUse(ClusterState clusterState, String r } } + private static void ensureCryptoSettingsAreSame(RepositoryMetadata repositoryMetadata, PutRepositoryRequest request) { + if (repositoryMetadata.cryptoMetadata() == null && request.cryptoSettings() == null) { + return; + } + if (repositoryMetadata.cryptoMetadata() == null || request.cryptoSettings() == null) { + throw new IllegalArgumentException("Crypto settings changes found in the repository update request. This is not allowed"); + } + + CryptoMetadata cryptoMetadata = repositoryMetadata.cryptoMetadata(); + CryptoSettings cryptoSettings = request.cryptoSettings(); + if (!cryptoMetadata.keyProviderName().equals(cryptoSettings.getKeyProviderName()) + || !cryptoMetadata.keyProviderType().equals(cryptoSettings.getKeyProviderType()) + || !cryptoMetadata.settings().toString().equals(cryptoSettings.getSettings().toString())) { + throw new IllegalArgumentException("Changes in crypto settings found in the repository update request. This is not allowed"); + } + } + /** * Checks if a repository is currently in use by one of the snapshots * @@ -680,6 +771,76 @@ private static boolean isRepositoryInUse(ClusterState clusterState, String repos return false; } + /** + * This method will be used to update the repositories map. + */ + public void updateRepositoriesMap(Map<String, Repository> repos) { + if (repositories.isEmpty()) { + repositories = repos; + } else { + throw new IllegalArgumentException("can't overwrite as repositories are already present"); + } + } + + private static void ensureNotSystemRepository(RepositoryMetadata repositoryMetadata) { + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + throw new RepositoryException(repositoryMetadata.name(), "cannot delete a system repository"); + } + } + + private static boolean isSystemRepositorySettingPresent(Settings repositoryMetadataSettings) { + return SYSTEM_REPOSITORY_SETTING.get(repositoryMetadataSettings); + } + + private static boolean isValueEqual(String key, String newValue, String currentValue) { + if (newValue == null && currentValue == null) { + return true; + } + if (newValue == null) { + throw new IllegalArgumentException("[" + key + "] cannot be empty, " + "current value [" + currentValue + "]"); + } + if (newValue.equals(currentValue) == false) { + throw new IllegalArgumentException( + "trying to modify an unmodifiable attribute " + + key + + " of system repository from " + + "current value [" + + currentValue + + "] to new value [" + + newValue + + "]" + ); + } + return true; + } + + public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMetadata, RepositoryMetadata currentRepositoryMetadata) { + if (isSystemRepositorySettingPresent(currentRepositoryMetadata.settings())) { + try { + isValueEqual("type", newRepositoryMetadata.type(), currentRepositoryMetadata.type()); + + Repository repository = repositories.get(currentRepositoryMetadata.name()); + Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); + Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + + List<String> restrictedSettings = repository.getRestrictedSystemRepositorySettings() + .stream() + .map(setting -> setting.getKey()) + .collect(Collectors.toList()); + + for (String restrictedSettingKey : restrictedSettings) { + isValueEqual( + restrictedSettingKey, + newRepositoryMetadataSettings.get(restrictedSettingKey), + currentRepositoryMetadataSettings.get(restrictedSettingKey) + ); + } + } catch (IllegalArgumentException e) { + throw new RepositoryException(currentRepositoryMetadata.name(), e.getMessage()); + } + } + } + @Override protected void doStart() { diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java new file mode 100644 index 0000000000000..cfcbc6bb88e66 --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Encapsulates stats for multiple repositories + * + * @opensearch.api + */ +@PublicApi(since = "2.11.0") +public class RepositoriesStats implements Writeable, ToXContentObject { + + List<RepositoryStatsSnapshot> repositoryStatsSnapshots; + + public RepositoriesStats(List<RepositoryStatsSnapshot> repositoryStatsSnapshots) { + this.repositoryStatsSnapshots = repositoryStatsSnapshots; + } + + public RepositoriesStats(StreamInput in) throws IOException { + this.repositoryStatsSnapshots = in.readList(RepositoryStatsSnapshot::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(repositoryStatsSnapshots); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("repositories"); + if (CollectionUtils.isEmpty(repositoryStatsSnapshots) == false) { + for (RepositoryStatsSnapshot repositoryStatsSnapshot : repositoryStatsSnapshots) { + repositoryStatsSnapshot.toXContent(builder, params); + } + } + builder.endArray(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java index b8f100706f81e..3d35f75176eaf 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java @@ -70,11 +70,6 @@ public RepositoriesStatsArchive(TimeValue retentionPeriod, int maxCapacity, Long * @return {@code true} if the repository stats were archived, {@code false} otherwise. */ synchronized boolean archive(final RepositoryStatsSnapshot repositoryStats) { - assert containsRepositoryStats(repositoryStats) == false : "A repository with ephemeral id " - + repositoryStats.getRepositoryInfo().ephemeralId - + " is already archived"; - assert repositoryStats.isArchived(); - evict(); if (archive.size() >= maxCapacity) { @@ -116,15 +111,6 @@ private void evict() { } } - private boolean containsRepositoryStats(RepositoryStatsSnapshot repositoryStats) { - return archive.stream() - .anyMatch( - entry -> entry.repositoryStatsSnapshot.getRepositoryInfo().ephemeralId.equals( - repositoryStats.getRepositoryInfo().ephemeralId - ) - ); - } - private static class ArchiveEntry { private final RepositoryStatsSnapshot repositoryStatsSnapshot; private final long createdAtMillis; diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 8a712b0a0c098..b3f1e9ce2eed9 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.SnapshotsInProgress; @@ -42,9 +41,12 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; -import org.opensearch.common.component.LifecycleComponent; -import org.opensearch.index.mapper.MapperService; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; @@ -55,6 +57,8 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -72,14 +76,18 @@ * <li>When all shard calls return cluster-manager calls {@link #finalizeSnapshot} with possible list of failures</li> * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Repository extends LifecycleComponent { /** * An factory interface for constructing repositories. * See {@link org.opensearch.plugins.RepositoryPlugin}. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Factory { /** * Constructs a repository. @@ -198,6 +206,16 @@ default void deleteSnapshotsAndReleaseLockFiles( */ long getRestoreThrottleTimeInNanos(); + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteUploadThrottleTimeInNanos(); + + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteDownloadThrottleTimeInNanos(); + /** * Returns stats on the repository usage */ @@ -237,6 +255,13 @@ default RepositoryStats stats() { */ boolean isReadOnly(); + /** + * Returns true if the repository is managed by the system directly and doesn't allow managing the lifetime of the + * repository through external APIs + * @return true if the repository is system managed + */ + boolean isSystemRepository(); + /** * Creates a snapshot of the shard based on the index commit point. * <p> @@ -325,6 +350,14 @@ void restoreShard( ActionListener<Void> listener ); + /** + * Returns the list of restricted system repository settings that cannot be mutated post repository creation. + * @return list of settings + */ + default List<Setting<?>> getRestrictedSystemRepositorySettings() { + return Collections.emptyList(); + } + /** * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. * <p> @@ -423,4 +456,22 @@ default void cloneRemoteStoreIndexShardSnapshot( default Map<String, Object> adaptUserMetadata(Map<String, Object> userMetadata) { return userMetadata; } + + /** + * Checks if the repository can be reloaded inplace or not + * @return true if the repository can be reloaded inplace, false otherwise + */ + default boolean isReloadable() { + return false; + } + + /** + * Reload the repository inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Validate the repository metadata + */ + default void validateMetadata(RepositoryMetadata repositoryMetadata) {} } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java b/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java index 185735f39c24c..eeddf4bd76659 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryCleanupResult.java @@ -31,13 +31,14 @@ package org.opensearch.repositories; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,8 +48,9 @@ /** * Result of a repository cleanup action * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryCleanupResult implements Writeable, ToXContentObject { public static final ObjectParser<RepositoryCleanupResult, Void> PARSER = new ObjectParser<>( @@ -105,6 +107,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index 90d7b404d089a..ea48d9b1a49fe 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -37,6 +37,7 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; @@ -60,8 +61,9 @@ * A class that represents the data in a repository, as captured in the * repository's index blob. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryData { /** diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java index 40dfd8cc77529..c947ff19bbca8 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java @@ -32,12 +32,12 @@ package org.opensearch.repositories; -import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -48,67 +48,31 @@ /** * Information about a repository * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryInfo implements Writeable, ToXContentFragment { - public final String ephemeralId; public final String name; public final String type; public final Map<String, String> location; - public final long startedAt; - @Nullable - public final Long stoppedAt; - public RepositoryInfo(String ephemeralId, String name, String type, Map<String, String> location, long startedAt) { - this(ephemeralId, name, type, location, startedAt, null); - } - - public RepositoryInfo( - String ephemeralId, - String name, - String type, - Map<String, String> location, - long startedAt, - @Nullable Long stoppedAt - ) { - this.ephemeralId = ephemeralId; + public RepositoryInfo(String name, String type, Map<String, String> location) { this.name = name; this.type = type; this.location = location; - this.startedAt = startedAt; - if (stoppedAt != null && startedAt > stoppedAt) { - throw new IllegalArgumentException("createdAt must be before or equal to stoppedAt"); - } - this.stoppedAt = stoppedAt; } public RepositoryInfo(StreamInput in) throws IOException { - this.ephemeralId = in.readString(); this.name = in.readString(); this.type = in.readString(); this.location = in.readMap(StreamInput::readString, StreamInput::readString); - this.startedAt = in.readLong(); - this.stoppedAt = in.readOptionalLong(); - } - - public RepositoryInfo stopped(long stoppedAt) { - assert isStopped() == false : "The repository is already stopped"; - - return new RepositoryInfo(ephemeralId, name, type, location, startedAt, stoppedAt); - } - - public boolean isStopped() { - return stoppedAt != null; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(ephemeralId); out.writeString(name); out.writeString(type); out.writeMap(location, StreamOutput::writeString, StreamOutput::writeString); - out.writeLong(startedAt); - out.writeOptionalLong(stoppedAt); } @Override @@ -116,11 +80,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repository_name", name); builder.field("repository_type", type); builder.field("repository_location", location); - builder.field("repository_ephemeral_id", ephemeralId); - builder.field("repository_started_at", startedAt); - if (stoppedAt != null) { - builder.field("repository_stopped_at", stoppedAt); - } return builder; } @@ -129,21 +88,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryInfo that = (RepositoryInfo) o; - return ephemeralId.equals(that.ephemeralId) - && name.equals(that.name) - && type.equals(that.type) - && location.equals(that.location) - && startedAt == that.startedAt - && Objects.equals(stoppedAt, that.stoppedAt); + return name.equals(that.name) && type.equals(that.type) && location.equals(that.location); } @Override public int hashCode() { - return Objects.hash(ephemeralId, name, type, location, startedAt, stoppedAt); + return Objects.hash(name, type, location); } @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryShardId.java b/server/src/main/java/org/opensearch/repositories/RepositoryShardId.java index 4799f36156a73..5b16ffcdf83a4 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryShardId.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryShardId.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -42,8 +43,9 @@ /** * Represents a shard snapshot in a repository. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryShardId implements Writeable { private final IndexId index; diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java index efd5d6f8560b6..a0cc1637af00e 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java @@ -32,9 +32,14 @@ package org.opensearch.repositories; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -45,34 +50,66 @@ /** * Stats about a repository * - * @opensearch.internal + * @opensearch.api */ -public class RepositoryStats implements Writeable { +@PublicApi(since = "1.0.0") +public class RepositoryStats implements Writeable, ToXContentFragment { public static final RepositoryStats EMPTY_STATS = new RepositoryStats(Collections.emptyMap()); + @Nullable public final Map<String, Long> requestCounts; + @Nullable + public final Map<BlobStore.Metric, Map<String, Long>> extendedStats; + public final boolean detailed; public RepositoryStats(Map<String, Long> requestCounts) { this.requestCounts = Collections.unmodifiableMap(requestCounts); + this.extendedStats = Collections.emptyMap(); + this.detailed = false; + } + + public RepositoryStats(Map<BlobStore.Metric, Map<String, Long>> extendedStats, boolean detailed) { + this.requestCounts = Collections.emptyMap(); + this.extendedStats = Collections.unmodifiableMap(extendedStats); + this.detailed = detailed; } public RepositoryStats(StreamInput in) throws IOException { this.requestCounts = in.readMap(StreamInput::readString, StreamInput::readLong); + this.extendedStats = in.readMap( + e -> e.readEnum(BlobStore.Metric.class), + i -> i.readMap(StreamInput::readString, StreamInput::readLong) + ); + this.detailed = in.readBoolean(); } public RepositoryStats merge(RepositoryStats otherStats) { - final Map<String, Long> result = new HashMap<>(); - result.putAll(requestCounts); - for (Map.Entry<String, Long> entry : otherStats.requestCounts.entrySet()) { - result.merge(entry.getKey(), entry.getValue(), Math::addExact); + assert this.detailed == otherStats.detailed; + if (detailed) { + final Map<BlobStore.Metric, Map<String, Long>> result = new HashMap<>(); + result.putAll(extendedStats); + for (Map.Entry<BlobStore.Metric, Map<String, Long>> entry : otherStats.extendedStats.entrySet()) { + for (Map.Entry<String, Long> nested : entry.getValue().entrySet()) { + result.get(entry.getKey()).merge(nested.getKey(), nested.getValue(), Math::addExact); + } + } + return new RepositoryStats(result, true); + } else { + final Map<String, Long> result = new HashMap<>(); + result.putAll(requestCounts); + for (Map.Entry<String, Long> entry : otherStats.requestCounts.entrySet()) { + result.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + return new RepositoryStats(result); } - return new RepositoryStats(result); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(requestCounts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(extendedStats, StreamOutput::writeEnum, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeLong)); + out.writeBoolean(detailed); } @Override @@ -80,16 +117,32 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryStats that = (RepositoryStats) o; - return requestCounts.equals(that.requestCounts); + return requestCounts.equals(that.requestCounts) && extendedStats.equals(that.extendedStats) && detailed == that.detailed; } @Override public int hashCode() { - return Objects.hash(requestCounts); + return Objects.hash(requestCounts, detailed, extendedStats); } @Override public String toString() { - return "RepositoryStats{" + "requestCounts=" + requestCounts + '}'; + return "RepositoryStats{" + "requestCounts=" + requestCounts + "extendedStats=" + extendedStats + "detailed =" + detailed + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (detailed == false) { + builder.field("request_counts", requestCounts); + } else { + extendedStats.forEach((k, v) -> { + try { + builder.field(k.metricName(), v); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + return builder; } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java index 2357b572ed3d0..3340defffc90f 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java @@ -32,11 +32,12 @@ package org.opensearch.repositories; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable;; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,28 +47,25 @@ /** * Stats snapshot about a repository * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RepositoryStatsSnapshot implements Writeable, ToXContentObject { public static final long UNKNOWN_CLUSTER_VERSION = -1; private final RepositoryInfo repositoryInfo; private final RepositoryStats repositoryStats; private final long clusterVersion; - private final boolean archived; - public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion, boolean archived) { - assert archived != (clusterVersion == UNKNOWN_CLUSTER_VERSION); + public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion) { this.repositoryInfo = repositoryInfo; this.repositoryStats = repositoryStats; this.clusterVersion = clusterVersion; - this.archived = archived; } public RepositoryStatsSnapshot(StreamInput in) throws IOException { this.repositoryInfo = new RepositoryInfo(in); this.repositoryStats = new RepositoryStats(in); this.clusterVersion = in.readLong(); - this.archived = in.readBoolean(); } public RepositoryInfo getRepositoryInfo() { @@ -78,10 +76,6 @@ public RepositoryStats getRepositoryStats() { return repositoryStats; } - public boolean isArchived() { - return archived; - } - public long getClusterVersion() { return clusterVersion; } @@ -91,18 +85,13 @@ public void writeTo(StreamOutput out) throws IOException { repositoryInfo.writeTo(out); repositoryStats.writeTo(out); out.writeLong(clusterVersion); - out.writeBoolean(archived); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); repositoryInfo.toXContent(builder, params); - builder.field("request_counts", repositoryStats.requestCounts); - builder.field("archived", archived); - if (archived) { - builder.field("cluster_version", clusterVersion); - } + repositoryStats.toXContent(builder, params); builder.endObject(); return builder; } @@ -114,17 +103,16 @@ public boolean equals(Object o) { RepositoryStatsSnapshot that = (RepositoryStatsSnapshot) o; return repositoryInfo.equals(that.repositoryInfo) && repositoryStats.equals(that.repositoryStats) - && clusterVersion == that.clusterVersion - && archived == that.archived; + && clusterVersion == that.clusterVersion; } @Override public int hashCode() { - return Objects.hash(repositoryInfo, repositoryStats, clusterVersion, archived); + return Objects.hash(repositoryInfo, repositoryStats, clusterVersion); } @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java index d918eb7e1e476..3591e683e58c1 100644 --- a/server/src/main/java/org/opensearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/opensearch/repositories/ShardGenerations.java @@ -33,6 +33,7 @@ package org.opensearch.repositories; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.Arrays; import java.util.Collection; @@ -48,8 +49,9 @@ /** * Generations of shards for snapshots * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardGenerations { public static final ShardGenerations EMPTY = new ShardGenerations(Collections.emptyMap()); @@ -177,8 +179,9 @@ public static Builder builder() { /** * Builder for the shard generations. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class Builder { private final Map<IndexId, Map<Integer, String>> generations = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java index ff5ffdbfe1e3e..96f9081e23c77 100644 --- a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java @@ -35,12 +35,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.EmptyTransportResponseHandler; @@ -48,7 +49,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index ae8080da73fa8..4a932d296a82f 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -47,7 +47,6 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; @@ -67,57 +66,64 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Numbers; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.EncryptedBlobStore; import org.opensearch.common.blobstore.fs.FsBlobContainer; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.blobstore.transfer.stream.RateLimitingOffsetRangeInputStream; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.compress.Compressor; -import org.opensearch.common.compress.CompressorFactory; -import org.opensearch.common.compress.CompressorType; -import org.opensearch.core.common.compress.NotXContentException; +import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.store.InputStreamIndexInput; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.compress.NotXContentException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.core.util.BytesRefUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; -import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; -import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots; import org.opensearch.index.snapshots.blobstore.IndexShardSnapshot; import org.opensearch.index.snapshots.blobstore.RateLimitingInputStream; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.snapshots.blobstore.SlicedInputStream; import org.opensearch.index.snapshots.blobstore.SnapshotFiles; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.lockmanager.FileLockInfo; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.IndexId; @@ -144,6 +150,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -167,6 +174,7 @@ import java.util.stream.Stream; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; +import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; /** * BlobStore - based implementation of Snapshot Repository @@ -229,6 +237,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.Deprecated ); + private static final Logger staticLogger = LogManager.getLogger(BlobStoreRepository.class); + /** * Setting to disable caching of the latest repository data. */ @@ -264,10 +274,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false, Setting.Property.NodeScope); - public static final Setting<CompressorType> COMPRESSION_TYPE_SETTING = new Setting<>( + public static final Setting<Compressor> COMPRESSION_TYPE_SETTING = new Setting<>( "compression_type", - CompressorType.DEFLATE.name().toLowerCase(Locale.ROOT), - s -> CompressorType.valueOf(s.toUpperCase(Locale.ROOT)), + DeflateCompressor.NAME.toLowerCase(Locale.ROOT), + s -> CompressorRegistry.getCompressor(s.toUpperCase(Locale.ROOT)), Setting.Property.NodeScope ); @@ -282,22 +292,39 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting("readonly", false, Setting.Property.NodeScope); - protected final boolean supportURLRepo; + /*** + * Setting to set repository as system repository + */ + public static final Setting<Boolean> SYSTEM_REPOSITORY_SETTING = Setting.boolSetting( + "system_repository", + false, + Setting.Property.NodeScope + ); + + protected volatile boolean supportURLRepo; + + private volatile int maxShardBlobDeleteBatch; + + private volatile Compressor compressor; - private final int maxShardBlobDeleteBatch; + private volatile boolean cacheRepositoryData; - private final Compressor compressor; + private volatile RateLimiter snapshotRateLimiter; - private final boolean cacheRepositoryData; + private volatile RateLimiter restoreRateLimiter; - private final RateLimiter snapshotRateLimiter; + private volatile RateLimiter remoteUploadRateLimiter; - private final RateLimiter restoreRateLimiter; + private volatile RateLimiter remoteDownloadRateLimiter; private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric(); private final CounterMetric restoreRateLimitingTimeInNanos = new CounterMetric(); + private final CounterMetric remoteDownloadRateLimitingTimeInNanos = new CounterMetric(); + + private final CounterMetric remoteUploadRateLimitingTimeInNanos = new CounterMetric(); + public static final ChecksumBlobStoreFormat<Metadata> GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "metadata", METADATA_NAME_FORMAT, @@ -333,7 +360,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobStoreIndexShardSnapshots::fromXContent ); - private final boolean readOnly; + private volatile boolean readOnly; + + private final boolean isSystemRepository; private final Object lock = new Object(); @@ -341,7 +370,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce<BlobStore> blobStore = new SetOnce<>(); - private final ClusterService clusterService; + protected final ClusterService clusterService; private final RecoverySettings recoverySettings; @@ -375,33 +404,54 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * IO buffer size hint for reading and writing to the underlying blob store. */ - protected final int bufferSize; + protected volatile int bufferSize; /** * Constructs new BlobStoreRepository - * @param metadata The metadata for this repository including name and settings + * @param repositoryMetadata The metadata for this repository including name and settings * @param clusterService ClusterService */ protected BlobStoreRepository( - final RepositoryMetadata metadata, - final boolean compress, + final RepositoryMetadata repositoryMetadata, final NamedXContentRegistry namedXContentRegistry, final ClusterService clusterService, final RecoverySettings recoverySettings ) { - this.metadata = metadata; + // Read RepositoryMetadata as the first step + readRepositoryMetadata(repositoryMetadata); + + isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); this.namedXContentRegistry = namedXContentRegistry; this.threadPool = clusterService.getClusterApplierService().threadPool(); this.clusterService = clusterService; this.recoverySettings = recoverySettings; - this.supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + readRepositoryMetadata(repositoryMetadata); + } + + /** + * Reloads the values derived from the Repository Metadata + * + * @param repositoryMetadata RepositoryMetadata instance to derive the values from + */ + private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) { + this.metadata = repositoryMetadata; + + supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", ByteSizeValue.ZERO); + remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); + remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); readOnly = READONLY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); maxShardBlobDeleteBatch = MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.get(metadata.settings()); - this.compressor = compress ? COMPRESSION_TYPE_SETTING.get(metadata.settings()).compressor() : CompressorFactory.NONE_COMPRESSOR; + compressor = COMPRESS_SETTING.get(metadata.settings()) + ? COMPRESSION_TYPE_SETTING.get(metadata.settings()) + : CompressorRegistry.none(); } @Override @@ -616,7 +666,7 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreShardShallowCopySnapshot remStoreBasedShardMetadata = (RemoteStoreShardShallowCopySnapshot) indexShardSnapshot; String indexUUID = remStoreBasedShardMetadata.getIndexUUID(); String remoteStoreRepository = remStoreBasedShardMetadata.getRemoteStoreRepository(); - RemoteStoreMetadataLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( + RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()) @@ -741,6 +791,9 @@ public BlobStore blobStore() { } try { store = createBlobStore(); + if (metadata.cryptoMetadata() != null) { + store = new EncryptedBlobStore(store, metadata.cryptoMetadata()); + } } catch (RepositoryException e) { throw e; } catch (Exception e) { @@ -769,7 +822,7 @@ public BlobStore blobStore() { * @return true if compression is needed */ protected final boolean isCompress() { - return compressor != CompressorFactory.NONE_COMPRESSOR; + return compressor != CompressorRegistry.none(); } /** @@ -788,11 +841,21 @@ public RepositoryMetadata getMetadata() { return metadata; } + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } + + public Compressor getCompressor() { + return compressor; + } + @Override public RepositoryStats stats() { final BlobStore store = blobStore.get(); if (store == null) { return RepositoryStats.EMPTY_STATS; + } else if (store.extendedStats() != null && store.extendedStats().isEmpty() == false) { + return new RepositoryStats(store.extendedStats(), true); } return new RepositoryStats(store.stats()); } @@ -1038,6 +1101,78 @@ private void asyncCleanupUnlinkedShardLevelBlobs( } } + public static void remoteDirectoryCleanupAsync( + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, + ThreadPool threadpool, + String remoteStoreRepoForIndex, + String indexUUID, + ShardId shardId, + String threadPoolName + ) { + threadpool.executor(threadPoolName) + .execute( + new RemoteStoreShardCleanupTask( + () -> RemoteSegmentStoreDirectory.remoteDirectoryCleanup( + remoteDirectoryFactory, + remoteStoreRepoForIndex, + indexUUID, + shardId + ), + indexUUID, + shardId + ) + ); + } + + protected void releaseRemoteStoreLockAndCleanup( + String shardId, + String shallowSnapshotUUID, + BlobContainer shardContainer, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + if (remoteStoreLockManagerFactory == null) { + return; + } + + RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( + shardContainer, + shallowSnapshotUUID, + namedXContentRegistry + ); + String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); + String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); + // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while + // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during + // next delete operation for releasing this lock file + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); + remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); + logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); + if (!isIndexPresent(clusterService, indexUUID)) { + // Note: this is a temporary solution where snapshot deletion triggers remote store side cleanup if + // index is already deleted. shard cleanup will still happen asynchronously using REMOTE_PURGE + // threadpool. if it fails, it could leave some stale files in remote directory. this issue could + // even happen in cases of shard level remote store data cleanup which also happens asynchronously. + // in long term, we have plans to implement remote store GC poller mechanism which will take care of + // such stale data. related issue: https://github.com/opensearch-project/OpenSearch/issues/8469 + RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ); + remoteDirectoryCleanupAsync( + remoteDirectoryFactory, + threadPool, + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), + ThreadPool.Names.REMOTE_PURGE + ); + } + } + // When remoteStoreLockManagerFactory is non-null, while deleting the files, lock files are also released before deletion of respective // shallow-snap-UUID files. And if it is null, we just delete the stale shard blobs. private void executeStaleShardDelete( @@ -1049,39 +1184,34 @@ private void executeStaleShardDelete( if (filesToDelete != null) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { try { - if (remoteStoreLockManagerFactory != null) { - for (String fileToDelete : filesToDelete) { - if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { - String[] fileToDeletePath = fileToDelete.split("/"); - String indexId = fileToDeletePath[1]; - String shardId = fileToDeletePath[2]; - String shallowSnapBlob = fileToDeletePath[3]; - String snapshotUUID = shallowSnapBlob.substring( - SHALLOW_SNAPSHOT_PREFIX.length(), - shallowSnapBlob.length() - ".dat".length() - ); - BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); - RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = - REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( - shardContainer, - snapshotUUID, - namedXContentRegistry - ); - String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); - String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); - // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while - // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during - // next delete operation for releasing this lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardId); - remoteStoreMetadataLockManager.release( - FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() + // filtering files for which remote store lock release and cleanup succeeded, + // remaining files for which it failed will be retried in next snapshot delete run. + List<String> eligibleFilesToDelete = new ArrayList<>(); + for (String fileToDelete : filesToDelete) { + if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { + String[] fileToDeletePath = fileToDelete.split("/"); + String indexId = fileToDeletePath[1]; + String shardId = fileToDeletePath[2]; + String shallowSnapBlob = fileToDeletePath[3]; + String snapshotUUID = extractShallowSnapshotUUID(shallowSnapBlob).orElseThrow(); + BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); + try { + releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); + eligibleFilesToDelete.add(fileToDelete); + } catch (Exception e) { + logger.error( + "Failed to release lock or cleanup shard for indexID {}, shardID {} " + "and snapshot {}", + indexId, + shardId, + snapshotUUID ); } + } else { + eligibleFilesToDelete.add(fileToDelete); } } // Deleting the shard blobs - deleteFromContainer(blobContainer(), filesToDelete); + deleteFromContainer(blobContainer(), eligibleFilesToDelete); l.onResponse(null); } catch (Exception e) { logger.warn( @@ -1487,6 +1617,15 @@ private void cleanupStaleIndices( } } + private static boolean isIndexPresent(ClusterService clusterService, String indexUUID) { + for (final IndexMetadata indexMetadata : clusterService.state().metadata().getIndices().values()) { + if (indexUUID.equals(indexMetadata.getIndexUUID())) { + return true; + } + } + return false; + } + private void executeOneStaleIndexDelete( BlockingQueue<Map.Entry<String, BlobContainer>> staleIndicesToDelete, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, @@ -1500,31 +1639,17 @@ private void executeOneStaleIndexDelete( try { logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); if (remoteStoreLockManagerFactory != null) { - Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); - if (!shardBlobs.isEmpty()) { - for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { - Map<String, BlobMetadata> shardLevelBlobs = shardBlob.getValue().listBlobs(); - for (Map.Entry<String, BlobMetadata> shardLevelBlob : shardLevelBlobs.entrySet()) { - String blob = shardLevelBlob.getKey(); - String snapshotUUID = blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()); - if (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) && blob.endsWith(".dat")) { - RemoteStoreShardShallowCopySnapshot remoteStoreShardShallowCopySnapshot = - REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.read( - shardBlob.getValue(), - snapshotUUID, - namedXContentRegistry - ); - String indexUUID = remoteStoreShardShallowCopySnapshot.getIndexUUID(); - String remoteStoreRepoForIndex = remoteStoreShardShallowCopySnapshot.getRemoteStoreRepository(); - // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure - // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file - // and that would be used during next delete operation for releasing this stale lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()); - remoteStoreMetadataLockManager.release( - FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() - ); - } + final Map<String, BlobContainer> shardBlobs = indexEntry.getValue().children(); + for (Map.Entry<String, BlobContainer> shardBlob : shardBlobs.entrySet()) { + for (String blob : shardBlob.getValue().listBlobs().keySet()) { + final Optional<String> snapshotUUID = extractShallowSnapshotUUID(blob); + if (snapshotUUID.isPresent()) { + releaseRemoteStoreLockAndCleanup( + shardBlob.getKey(), + snapshotUUID.get(), + shardBlob.getValue(), + remoteStoreLockManagerFactory + ); } } } @@ -1745,6 +1870,16 @@ public long getRestoreThrottleTimeInNanos() { return restoreRateLimitingTimeInNanos.count(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return remoteUploadRateLimitingTimeInNanos.count(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return remoteDownloadRateLimitingTimeInNanos.count(); + } + protected void assertSnapshotOrGenericThread() { assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SNAPSHOT + ']') || Thread.currentThread().getName().contains('[' + ThreadPool.Names.GENERIC + ']') : "Expected current thread [" @@ -1764,8 +1899,10 @@ public String startVerification() { byte[] testBytes = Strings.toUTF8Bytes(seed); BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); BytesArray bytes = new BytesArray(testBytes); - try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + if (isSystemRepository == false) { + try (InputStream stream = bytes.streamInput()) { + testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + } } return seed; } @@ -1916,7 +2053,7 @@ private void cacheRepositoryData(BytesReference updated, long generation) { if (cacheRepositoryData && bestEffortConsistency == false) { final BytesReference serialized; try { - serialized = CompressorFactory.defaultCompressor().compress(updated); + serialized = CompressorRegistry.defaultCompressor().compress(updated); final int len = serialized.length(); if (len > ByteSizeUnit.KB.toBytes(500)) { logger.debug( @@ -1952,9 +2089,9 @@ private void cacheRepositoryData(BytesReference updated, long generation) { } private RepositoryData repositoryDataFromCachedEntry(Tuple<Long, BytesReference> cacheEntry) throws IOException { - try (InputStream input = CompressorFactory.defaultCompressor().threadLocalInputStream(cacheEntry.v2().streamInput())) { + try (InputStream input = CompressorRegistry.defaultCompressor().threadLocalInputStream(cacheEntry.v2().streamInput())) { return RepositoryData.snapshotsFromXContent( - XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input), + MediaTypeRegistry.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input), cacheEntry.v1() ); } @@ -2047,7 +2184,7 @@ private RepositoryData getRepositoryData(long indexGen) { // EMPTY is safe here because RepositoryData#fromXContent calls namedObject try ( InputStream blob = blobContainer().readBlob(snapshotsIndexBlobName); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { return RepositoryData.snapshotsFromXContent(parser, indexGen); @@ -2074,6 +2211,11 @@ public boolean isReadOnly() { return readOnly; } + @Override + public boolean isSystemRepository() { + return isSystemRepository; + } + /** * Writing a new index generation is a three step process. * First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its @@ -2433,7 +2575,7 @@ private RepositoryMetadata getRepoMetadata(ClusterState state) { * the next version number from when the index blob was written. Each individual index-N blob is * only written once and never overwritten. The highest numbered index-N blob is the latest one * that contains the current snapshots in the repository. - * + * <p> * Package private for testing */ long latestIndexBlobId() throws IOException { @@ -2972,20 +3114,80 @@ private static ActionListener<Void> fileQueueListener( }); } - private static InputStream maybeRateLimit(InputStream stream, Supplier<RateLimiter> rateLimiterSupplier, CounterMetric metric) { - return new RateLimitingInputStream(stream, rateLimiterSupplier, metric::inc); + private static void mayBeLogRateLimits(BlobStoreTransferContext context, RateLimiter rateLimiter, long time) { + logger.debug( + () -> new ParameterizedMessage( + "Rate limited blob store transfer, context [{}], for duration [{} ms] for configured rate [{} MBps]", + context, + TimeValue.timeValueNanos(time).millis(), + rateLimiter.getMBPerSec() + ) + ); + } + + private static InputStream maybeRateLimit( + InputStream stream, + Supplier<RateLimiter> rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingInputStream(stream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); + } + + private static OffsetRangeInputStream maybeRateLimitRemoteTransfers( + OffsetRangeInputStream offsetRangeInputStream, + Supplier<RateLimiter> rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingOffsetRangeInputStream(offsetRangeInputStream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); } public InputStream maybeRateLimitRestores(InputStream stream) { return maybeRateLimit( - maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos), + maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT_RESTORE), + recoverySettings::rateLimiter, + restoreRateLimitingTimeInNanos, + BlobStoreTransferContext.SNAPSHOT_RESTORE + ); + } + + public OffsetRangeInputStream maybeRateLimitRemoteUploadTransfers(OffsetRangeInputStream offsetRangeInputStream) { + return maybeRateLimitRemoteTransfers( + offsetRangeInputStream, + () -> remoteUploadRateLimiter, + remoteUploadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_UPLOAD + ); + } + + public InputStream maybeRateLimitRemoteDownloadTransfers(InputStream inputStream) { + return maybeRateLimit( + maybeRateLimit( + inputStream, + () -> remoteDownloadRateLimiter, + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD + ), recoverySettings::rateLimiter, - restoreRateLimitingTimeInNanos + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD ); } public InputStream maybeRateLimitSnapshots(InputStream stream) { - return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos); + return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); + } + + @Override + public List<Setting<?>> getRestrictedSystemRepositorySettings() { + return Arrays.asList(SYSTEM_REPOSITORY_SETTING, READONLY_SETTING, REMOTE_STORE_INDEX_SHALLOW_COPY); } @Override @@ -3009,7 +3211,9 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In @Override public void verify(String seed, DiscoveryNode localNode) { - assertSnapshotOrGenericThread(); + if (isSystemRepository == false) { + assertSnapshotOrGenericThread(); + } if (isReadOnly()) { try { latestIndexBlobId(); @@ -3034,30 +3238,33 @@ public void verify(String seed, DiscoveryNode localNode) { exp ); } - try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { - final String seedRead = Streams.readFully(masterDat).utf8ToString(); - if (seedRead.equals(seed) == false) { + + if (isSystemRepository == false) { + try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { + final String seedRead = Streams.readFully(masterDat).utf8ToString(); + if (seedRead.equals(seed) == false) { + throw new RepositoryVerificationException( + metadata.name(), + "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + ); + } + } catch (NoSuchFileException e) { throw new RepositoryVerificationException( metadata.name(), - "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + "a file written by cluster-manager to the store [" + + blobStore() + + "] cannot be accessed on the node [" + + localNode + + "]. " + + "This might indicate that the store [" + + blobStore() + + "] is not shared between this node and the cluster-manager node or " + + "that permissions on the store don't allow reading files written by the cluster-manager node", + e ); + } catch (Exception e) { + throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } - } catch (NoSuchFileException e) { - throw new RepositoryVerificationException( - metadata.name(), - "a file written by cluster-manager to the store [" - + blobStore() - + "] cannot be accessed on the node [" - + localNode - + "]. " - + "This might indicate that the store [" - + blobStore() - + "] is not shared between this node and the cluster-manager node or " - + "that permissions on the store don't allow reading files written by the cluster-manager node", - e - ); - } catch (Exception e) { - throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } } } @@ -3154,7 +3361,12 @@ private void writeShardIndexBlobAtomic( () -> new ParameterizedMessage("[{}] Writing shard index [{}] to [{}]", metadata.name(), indexGeneration, shardContainer.path()) ); final String blobName = INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(String.valueOf(indexGeneration)); - writeAtomic(shardContainer, blobName, INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor), true); + writeAtomic( + shardContainer, + blobName, + INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS), + true + ); } // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all @@ -3175,12 +3387,7 @@ private static List<String> unusedBlobs( blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) ) == false) || (remoteStoreLockManagerFactory != null - ? (blob.startsWith(SHALLOW_SNAPSHOT_PREFIX) - && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) - ) == false) - : false) + && extractShallowSnapshotUUID(blob).map(survivingSnapshotUUIDs::contains).orElse(false)) || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) || FsBlobContainer.isTempBlobName(blob) ) @@ -3322,6 +3529,13 @@ private static void failStoreIfCorrupted(Store store, Exception e) { } } + private static Optional<String> extractShallowSnapshotUUID(String blobName) { + if (blobName.startsWith(SHALLOW_SNAPSHOT_PREFIX)) { + return Optional.of(blobName.substring(SHALLOW_SNAPSHOT_PREFIX.length(), blobName.length() - ".dat".length())); + } + return Optional.empty(); + } + /** * The result of removing a snapshot from a shard folder in the repository. */ @@ -3346,4 +3560,22 @@ private static final class ShardSnapshotMetaDeleteResult { this.blobsToDelete = blobsToDelete; } } + + enum BlobStoreTransferContext { + REMOTE_UPLOAD("remote_upload"), + REMOTE_DOWNLOAD("remote_download"), + SNAPSHOT("snapshot"), + SNAPSHOT_RESTORE("snapshot_restore"); + + private final String name; + + BlobStoreTransferContext(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index cb3d779ece4a9..3e6052a5ef820 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -42,22 +42,28 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.Compressor; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.lucene.store.IndexOutputOutputStream; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.gateway.CorruptStateException; +import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.snapshots.SnapshotInfo; import java.io.IOException; @@ -67,6 +73,8 @@ import java.util.Locale; import java.util.Map; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; + /** * Snapshot metadata file format used in v2.0 and above * @@ -75,7 +83,7 @@ public final class ChecksumBlobStoreFormat<T extends ToXContent> { // Serialization parameters to specify correct context for metadata serialization - private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + public static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; static { Map<String, String> snapshotOnlyParams = new HashMap<>(); @@ -162,12 +170,126 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr * @param compressor whether to use compression */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { + write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + * <p> + * The blob will optionally by compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param params ToXContent params + */ + public void write( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final ToXContent.Params params + ) throws IOException { final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); + final BytesReference bytes = serialize(obj, blobName, compressor, params); blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } - public BytesReference serialize(final T obj, final String blobName, final Compressor compressor) throws IOException { + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#NORMAL} + */ + public void writeAsync( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener<Void> listener, + final ToXContent.Params params + ) throws IOException { + // use NORMAL priority by default + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.NORMAL, listener, params); + } + + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#URGENT} + * <p> + * <b>NOTE:</b> We use this method to upload urgent priority objects like cluster state to remote stores. + * Use {@link #writeAsync(ToXContent, BlobContainer, String, Compressor, ActionListener, ToXContent.Params)} for + * other use cases. + */ + public void writeAsyncWithUrgentPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener<Void> listener, + final ToXContent.Params params + ) throws IOException { + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.URGENT, listener, params); + } + + /** + * Method to writes blob with resolving the blob name using {@link #blobName} method with specified + * {@link WritePriority}. Leverages the multipart upload if supported by the blobContainer. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param priority write priority to be used + * @param listener listener to listen to write result + * @param params ToXContent params + */ + private void writeAsyncWithPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final WritePriority priority, + ActionListener<Void> listener, + final ToXContent.Params params + ) throws IOException { + if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { + write(obj, blobContainer, name, compressor, params); + listener.onResponse(null); + return; + } + final String blobName = blobName(name); + final BytesReference bytes = serialize(obj, blobName, compressor, params); + final String resourceDescription = "ChecksumBlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; + try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { + long expectedChecksum; + try { + expectedChecksum = checksumOfChecksum(input.clone(), 8); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum", + resourceDescription, + e + ); + } + + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length(), + true, + priority, + (size, position) -> new OffsetRangeIndexInputStream(input, size, position), + expectedChecksum, + ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); + } + } + } + + public BytesReference serialize(final T obj, final String blobName, final Compressor compressor, final ToXContent.Params params) + throws IOException { try (BytesStreamOutput outputStream = new BytesStreamOutput()) { try ( OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( @@ -185,13 +307,13 @@ public void close() throws IOException { // in order to write the footer we need to prevent closing the actual index input. } }; - XContentBuilder builder = XContentFactory.contentBuilder( + XContentBuilder builder = MediaTypeRegistry.contentBuilder( XContentType.SMILE, compressor.threadLocalOutputStream(indexOutputOutputStream) ) ) { builder.startObject(); - obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + obj.toXContent(builder, params); builder.endObject(); } CodecUtil.writeFooter(indexOutput); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java index 080485f750173..7e6e3a0ad3b79 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/FileRestoreContext.java @@ -34,9 +34,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.iterable.Iterables; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java index 54f226e81025e..0651ff586d412 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -34,12 +34,10 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.UUIDs; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryInfo; import org.opensearch.repositories.RepositoryStatsSnapshot; -import org.opensearch.threadpool.ThreadPool; import java.util.Map; @@ -53,29 +51,24 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { public MeteredBlobStoreRepository( RepositoryMetadata metadata, - boolean compress, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings, Map<String, String> location ) { - super(metadata, compress, namedXContentRegistry, clusterService, recoverySettings); - ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); - this.repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), - metadata.name(), - metadata.type(), - location, - threadPool.absoluteTimeInMillis() - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); + this.repositoryInfo = new RepositoryInfo(metadata.name(), metadata.type(), location); } - public RepositoryStatsSnapshot statsSnapshot() { - return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION, false); + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + super.reload(repositoryMetadata); + + // Not adding any additional reload logic here is intentional as the constructor only + // initializes the repositoryInfo from the repo metadata, which cannot be changed. } - public RepositoryStatsSnapshot statsSnapshotForArchival(long clusterVersion) { - RepositoryInfo stoppedRepoInfo = repositoryInfo.stopped(threadPool.absoluteTimeInMillis()); - return new RepositoryStatsSnapshot(stoppedRepoInfo, stats(), clusterVersion, true); + public RepositoryStatsSnapshot statsSnapshot() { + return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION); } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java b/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java new file mode 100644 index 0000000000000..df61c1ca3263b --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/blobstore/RemoteStoreShardCleanupTask.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.core.index.shard.ShardId; + +import java.util.Map; +import java.util.Set; + +import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; +import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; + +/** + A Runnable wrapper to make sure that for a given shard only one cleanup task runs at a time. + */ +public class RemoteStoreShardCleanupTask implements Runnable { + private final Runnable task; + private final String shardIdentifier; + final static Set<String> ongoingRemoteDirectoryCleanups = newConcurrentSet(); + final static Map<String, Runnable> pendingRemoteDirectoryCleanups = newConcurrentMap(); + private static final Logger staticLogger = LogManager.getLogger(RemoteStoreShardCleanupTask.class); + + public RemoteStoreShardCleanupTask(Runnable task, String indexUUID, ShardId shardId) { + this.task = task; + this.shardIdentifier = indexShardIdentifier(indexUUID, shardId); + } + + private static String indexShardIdentifier(String indexUUID, ShardId shardId) { + return String.join("/", indexUUID, String.valueOf(shardId.id())); + } + + @Override + public void run() { + // TODO: this is the best effort at the moment since there is still a known race condition scenario in this + // method which needs to be handled where one of the thread just came out of while loop and removed the + // entry from ongoingRemoteDirectoryCleanup, and another thread added new pending task in the map. + // we need to introduce semaphores/locks to avoid that situation which introduces the overhead of lock object + // cleanups. however, there will be no scenario where two threads run cleanup for same shard at same time. + // <issue-link> + if (pendingRemoteDirectoryCleanups.put(shardIdentifier, task) == null) { + if (ongoingRemoteDirectoryCleanups.add(shardIdentifier)) { + while (pendingRemoteDirectoryCleanups.containsKey(shardIdentifier)) { + Runnable newTask = pendingRemoteDirectoryCleanups.get(shardIdentifier); + pendingRemoteDirectoryCleanups.remove(shardIdentifier); + newTask.run(); + } + ongoingRemoteDirectoryCleanups.remove(shardIdentifier); + } else { + staticLogger.debug("one task is already ongoing for shard {}, we can leave entry in pending", shardIdentifier); + } + } else { + staticLogger.debug("one cleanup task for shard {} is already in pending, we can skip this task", shardIdentifier); + } + } +} diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index 0b9989ff64d9c..4a9a91336ec1d 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -41,8 +41,8 @@ import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; @@ -50,6 +50,8 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; import java.util.function.Function; /** @@ -61,7 +63,6 @@ * <dt>{@code concurrent_streams}</dt><dd>Number of concurrent read/write stream (per repository on each node). Defaults to 5.</dd> * <dt>{@code chunk_size}</dt><dd>Large file can be divided into chunks. This parameter specifies the chunk size. * Defaults to not chucked.</dd> - * <dt>{@code compress}</dt><dd>If set to true metadata files will be stored compressed. Defaults to false.</dd> * </dl> * * @opensearch.internal @@ -101,11 +102,11 @@ public class FsRepository extends BlobStoreRepository { public static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path"); - private final Environment environment; + protected final Environment environment; - private ByteSizeValue chunkSize; + protected ByteSizeValue chunkSize; - private final BlobPath basePath; + protected BlobPath basePath; /** * Constructs a shared file system repository. @@ -117,8 +118,27 @@ public FsRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, calculateCompress(metadata, environment), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; + validateLocation(); + readMetadata(); + } + + protected void readMetadata() { + if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + } else { + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); + } + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + } + + protected void validateLocation() { String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { logger.warn( @@ -151,24 +171,6 @@ public FsRepository( ); } } - - if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - } else { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); - } - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - } - - private static boolean calculateCompress(RepositoryMetadata metadata, Environment environment) { - return COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) - : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); } @Override @@ -187,4 +189,12 @@ protected ByteSizeValue chunkSize() { public BlobPath basePath() { return basePath; } + + @Override + public List<Setting<?>> getRestrictedSystemRepositorySettings() { + List<Setting<?>> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(LOCATION_SETTING); + return restrictedSettings; + } } diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java new file mode 100644 index 0000000000000..e8020a432a58a --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.util.Random; + +/** + * Extension of {@link FsRepository} that can be reloaded inplace , supports failing operation and slowing it down + * + * @opensearch.internal + */ +public class ReloadableFsRepository extends FsRepository { + public static final String TYPE = "reloadable-fs"; + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public static final Setting<Integer> REPOSITORIES_FAILRATE_SETTING = Setting.intSetting( + "repositories.fail.rate", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + + public static final Setting<Integer> REPOSITORIES_SLOWDOWN_SETTING = Setting.intSetting( + "repositories.slowdown", + 0, + 0, + 100, + Setting.Property.NodeScope + ); + + /** + * Constructs a shared file system repository that is reloadable in-place. + */ + public ReloadableFsRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + fail = new FailSwitch(); + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown = new SlowDownWriteSwitch(); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + readRepositoryMetadata(); + } + + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + super.reload(repositoryMetadata); + readRepositoryMetadata(); + validateLocation(); + readMetadata(); + } + + private void readRepositoryMetadata() { + fail.failRate(REPOSITORIES_FAILRATE_SETTING.get(metadata.settings())); + slowDown.setSleepSeconds(REPOSITORIES_SLOWDOWN_SETTING.get(metadata.settings())); + } + + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail, slowDown); + } + + // A random integer from min-max (inclusive). + public static int randomIntBetween(int min, int max) { + Random random = Randomness.get(); + return random.nextInt(max - min + 1) + min; + } + + static class FailSwitch { + private volatile int failRate; + private volatile boolean onceFailedFailAlways = false; + + public boolean fail() { + final int rnd = randomIntBetween(1, 100); + boolean fail = rnd <= failRate; + if (fail && onceFailedFailAlways) { + failAlways(); + } + return fail; + } + + public void failAlways() { + failRate = 100; + } + + public void failRate(int rate) { + failRate = rate; + } + + public void onceFailedFailAlways() { + onceFailedFailAlways = true; + } + } + + static class SlowDownWriteSwitch { + private volatile int sleepSeconds; + + public void setSleepSeconds(int sleepSeconds) { + this.sleepSeconds = sleepSeconds; + } + + public int getSleepSeconds() { + return sleepSeconds; + } + } + + private static class ThrowingBlobStore extends FsBlobStore { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, FailSwitch fail, SlowDownWriteSwitch slowDown) + throws IOException { + super(bufferSizeInBytes, path, readonly); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail, slowDown); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + } + + private static class ThrowingBlobContainer extends FsBlobContainer { + + private final FailSwitch fail; + private final SlowDownWriteSwitch slowDown; + + public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, FailSwitch fail, SlowDownWriteSwitch slowDown) { + super(blobStore, blobPath, path); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); + } + + private void checkFailRateAndSleep(String blobName) throws IOException { + if (fail.fail() && blobName.contains(".dat") == false) { + throw new IOException("blob container throwing error"); + } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + checkFailRateAndSleep(blobName); + super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } +} diff --git a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java index dcee6500325b9..11a116e8c858d 100644 --- a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java @@ -36,9 +36,8 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaType; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.io.OutputStream; @@ -132,7 +131,7 @@ public XContentBuilder newBuilder(@Nullable MediaType requestContentType, @Nulla responseContentType = requestContentType; } else { // default to JSON output when all else fails - responseContentType = MediaTypeParserRegistry.getDefaultMediaType(); + responseContentType = MediaTypeRegistry.getDefaultMediaType(); } } @@ -145,12 +144,7 @@ public XContentBuilder newBuilder(@Nullable MediaType requestContentType, @Nulla } OutputStream unclosableOutputStream = Streams.flushOnCloseStream(bytesOutput()); - XContentBuilder builder = new XContentBuilder( - XContentFactory.xContent(responseContentType), - unclosableOutputStream, - includes, - excludes - ); + XContentBuilder builder = new XContentBuilder(responseContentType.xContent(), unclosableOutputStream, includes, excludes); if (pretty) { builder.prettyPrint().lfAtEnd(); } diff --git a/server/src/main/java/org/opensearch/rest/MethodHandlers.java b/server/src/main/java/org/opensearch/rest/MethodHandlers.java index 8c29bf2e66036..30221705e1aba 100644 --- a/server/src/main/java/org/opensearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/opensearch/rest/MethodHandlers.java @@ -6,82 +6,24 @@ * compatible open source license. */ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - package org.opensearch.rest; -import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; -import java.util.HashMap; -import java.util.Map; import java.util.Set; /** - * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. - * - * @opensearch.api + * A collection of REST method handlers. */ -final class MethodHandlers { - - private final String path; - private final Map<RestRequest.Method, RestHandler> methodHandlers; - - MethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { - this.path = path; - this.methodHandlers = new HashMap<>(methods.length); - for (RestRequest.Method method : methods) { - methodHandlers.put(method, handler); - } - } - - /** - * Add a handler for an additional array of methods. Note that {@code MethodHandlers} - * does not allow replacing the handler for an already existing method. - */ - MethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { - for (RestRequest.Method method : methods) { - RestHandler existing = methodHandlers.putIfAbsent(method, handler); - if (existing != null) { - throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); - } - } - return this; - } - +@PublicApi(since = "2.12.0") +public interface MethodHandlers { /** - * Returns the handler for the given method or {@code null} if none exists. + * Return a set of all valid HTTP methods for the particular path. */ - @Nullable - RestHandler getHandler(RestRequest.Method method) { - return methodHandlers.get(method); - } + Set<RestRequest.Method> getValidMethods(); /** - * Return a set of all valid HTTP methods for the particular path + * Returns the relative HTTP path of the set of method handlers. */ - Set<RestRequest.Method> getValidMethods() { - return methodHandlers.keySet(); - } + String getPath(); } diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index a2938810db1c2..95abb9b3daeca 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -38,32 +38,34 @@ import org.opensearch.OpenSearchException; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Nullable; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.path.PathTrie; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.Streams; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.Streams; import org.opensearch.http.HttpServerTransport; import org.opensearch.identity.IdentityService; import org.opensearch.identity.Subject; import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.RestTokenExtractor; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.usage.UsageService; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -76,12 +78,12 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY; -import static org.opensearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; import static org.opensearch.core.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.opensearch.core.rest.RestStatus.METHOD_NOT_ALLOWED; import static org.opensearch.core.rest.RestStatus.NOT_ACCEPTABLE; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; /** * OpenSearch REST controller @@ -106,7 +108,7 @@ public class RestController implements HttpServerTransport.Dispatcher { } } - private final PathTrie<MethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); + private final PathTrie<RestMethodHandlers> handlers = new PathTrie<>(RestUtils.REST_DECODER); private final UnaryOperator<RestHandler> handlerWrapper; @@ -143,6 +145,16 @@ public RestController( ); } + /** + * Returns an iterator over registered REST method handlers. + * @return {@link Iterator} of {@link MethodHandlers} + */ + public Iterator<MethodHandlers> getAllHandlers() { + List<MethodHandlers> methodHandlers = new ArrayList<>(); + handlers.retrieveAll().forEachRemaining(methodHandlers::add); + return methodHandlers.iterator(); + } + /** * Registers a REST handler to be executed when the provided {@code method} and {@code path} match the request. * @@ -220,7 +232,7 @@ protected void registerHandler(RestRequest.Method method, String path, RestHandl private void registerHandlerNoWrap(RestRequest.Method method, String path, RestHandler maybeWrappedHandler) { handlers.insertOrUpdate( path, - new MethodHandlers(path, maybeWrappedHandler, method), + new RestMethodHandlers(path, maybeWrappedHandler, method), (mHandlers, newMHandler) -> mHandlers.addMethods(maybeWrappedHandler, method) ); } @@ -288,7 +300,7 @@ private void dispatchRequest(RestRequest request, RestChannel channel, RestHandl sendContentTypeErrorMessage(request.getAllHeaderValues("Content-Type"), channel); return; } - if (handler.supportsContentStream() && mediaType != XContentType.JSON && mediaType != XContentType.SMILE) { + if (handler.supportsContentStream() && mediaType != MediaTypeRegistry.JSON && mediaType != XContentType.SMILE) { channel.sendResponse( BytesRestResponse.createSimpleErrorResponse( channel, @@ -391,10 +403,10 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel // Resolves the HTTP method and fails if the method is invalid requestMethod = request.method(); // Loop through all possible handlers, attempting to dispatch the request - Iterator<MethodHandlers> allHandlers = getAllHandlers(request.params(), rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(request.params(), rawPath); while (allHandlers.hasNext()) { final RestHandler handler; - final MethodHandlers handlers = allHandlers.next(); + final RestMethodHandlers handlers = allHandlers.next(); if (handlers == null) { handler = null; } else { @@ -422,7 +434,7 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel handleBadRequest(uri, requestMethod, channel); } - Iterator<MethodHandlers> getAllHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { + Iterator<RestMethodHandlers> getAllRestMethodHandlers(@Nullable Map<String, String> requestParamsRef, String rawPath) { final Supplier<Map<String, String>> paramsSupplier; if (requestParamsRef == null) { paramsSupplier = () -> null; @@ -523,7 +535,7 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel /** * Attempts to extract auth token and login. * - * @returns false if there was an error and the request should not continue being dispatched + * @return false if there was an error and the request should not continue being dispatched * */ private boolean handleAuthenticateUser(final RestRequest request, final RestChannel channel) { try { @@ -560,7 +572,7 @@ private boolean handleAuthenticateUser(final RestRequest request, final RestChan */ private Set<RestRequest.Method> getValidHandlerMethodSet(String rawPath) { Set<RestRequest.Method> validMethods = new HashSet<>(); - Iterator<MethodHandlers> allHandlers = getAllHandlers(null, rawPath); + Iterator<RestMethodHandlers> allHandlers = getAllRestMethodHandlers(null, rawPath); while (allHandlers.hasNext()) { final MethodHandlers methodHandlers = allHandlers.next(); if (methodHandlers != null) { diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 7832649e8ad32..294dc3ffbe329 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -108,7 +108,7 @@ default List<ReplacedRoute> replacedRoutes() { } /** - * Controls whether requests handled by this class are allowed to to access system indices by default. + * Controls whether requests handled by this class are allowed to access system indices by default. * @return {@code true} if requests handled by this class should be allowed to access system indices. */ default boolean allowSystemIndexAccessByDefault() { diff --git a/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java new file mode 100644 index 0000000000000..a430d8ace447c --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/RestMethodHandlers.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.rest; + +import org.opensearch.common.Nullable; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs. + */ +final class RestMethodHandlers implements MethodHandlers { + + private final String path; + private final Map<RestRequest.Method, RestHandler> methodHandlers; + + RestMethodHandlers(String path, RestHandler handler, RestRequest.Method... methods) { + this.path = path; + this.methodHandlers = new HashMap<>(methods.length); + for (RestRequest.Method method : methods) { + methodHandlers.put(method, handler); + } + } + + /** + * Add a handler for an additional array of methods. Note that {@code MethodHandlers} + * does not allow replacing the handler for an already existing method. + */ + public RestMethodHandlers addMethods(RestHandler handler, RestRequest.Method... methods) { + for (RestRequest.Method method : methods) { + RestHandler existing = methodHandlers.putIfAbsent(method, handler); + if (existing != null) { + throw new IllegalArgumentException("Cannot replace existing handler for [" + path + "] for method: " + method); + } + } + return this; + } + + /** + * Returns the handler for the given method or {@code null} if none exists. + */ + @Nullable + public RestHandler getHandler(RestRequest.Method method) { + return methodHandlers.get(method); + } + + /** + * Return a set of all valid HTTP methods for the particular path. + */ + public Set<RestRequest.Method> getValidMethods() { + return methodHandlers.keySet(); + } + + /** + * Returns the relative HTTP path of the set of method handlers. + */ + public String getPath() { + return path; + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestRequest.java b/server/src/main/java/org/opensearch/rest/RestRequest.java index 7382701b6f787..2c397f7fc6e8e 100644 --- a/server/src/main/java/org/opensearch/rest/RestRequest.java +++ b/server/src/main/java/org/opensearch/rest/RestRequest.java @@ -37,18 +37,18 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpRequest; @@ -65,14 +65,15 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; -import static org.opensearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.opensearch.common.unit.TimeValue.parseTimeValue; +import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; /** * REST Request * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestRequest implements ToXContent.Params { // tchar pattern as defined by RFC7230 section 3.2.6 @@ -118,14 +119,14 @@ private RestRequest( HttpChannel httpChannel, long requestId ) { - final MediaType xContentType; + final MediaType mediaType; try { - xContentType = parseContentType(headers.get("Content-Type")); + mediaType = parseContentType(headers.get("Content-Type")); } catch (final IllegalArgumentException e) { throw new ContentTypeHeaderException(e); } - if (xContentType != null) { - this.mediaType.set(xContentType); + if (mediaType != null) { + this.mediaType.set(mediaType); } this.xContentRegistry = xContentRegistry; this.httpRequest = httpRequest; @@ -232,8 +233,9 @@ public static RestRequest requestWithoutParameters( /** * The method used. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Method { GET, POST, @@ -337,7 +339,7 @@ public final long getRequestId() { } /** - * The {@link XContentType} that was parsed from the {@code Content-Type} header. This value will be {@code null} in the case of + * The {@link MediaType} that was parsed from the {@code Content-Type} header. This value will be {@code null} in the case of * a request without a valid {@code Content-Type} header, a request without content ({@link #hasContent()}, or a plain text request */ @Nullable @@ -462,7 +464,7 @@ public String[] paramAsStringArray(String key, String[] defaultValue) { if (value == null) { return defaultValue; } - return org.opensearch.core.common.Strings.splitStringByCommaToArray(value); + return Strings.splitStringByCommaToArray(value); } public String[] paramAsStringArrayOrEmptyIfAll(String key) { diff --git a/server/src/main/java/org/opensearch/rest/RestRequestFilter.java b/server/src/main/java/org/opensearch/rest/RestRequestFilter.java index dbb968401f9eb..7987138df9bc5 100644 --- a/server/src/main/java/org/opensearch/rest/RestRequestFilter.java +++ b/server/src/main/java/org/opensearch/rest/RestRequestFilter.java @@ -33,12 +33,12 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/RestResponse.java b/server/src/main/java/org/opensearch/rest/RestResponse.java index eb4ad99f03cfc..2eff746e8508c 100644 --- a/server/src/main/java/org/opensearch/rest/RestResponse.java +++ b/server/src/main/java/org/opensearch/rest/RestResponse.java @@ -33,8 +33,8 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.rest.RestStatus; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/rest/action/RestActionListener.java b/server/src/main/java/org/opensearch/rest/action/RestActionListener.java index d1c1c98b1b827..846265b19e726 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestActionListener.java +++ b/server/src/main/java/org/opensearch/rest/action/RestActionListener.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; diff --git a/server/src/main/java/org/opensearch/rest/action/RestActions.java b/server/src/main/java/org/opensearch/rest/action/RestActions.java index 7508871d3ab75..b074a4bf4e01d 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestActions.java +++ b/server/src/main/java/org/opensearch/rest/action/RestActions.java @@ -34,14 +34,15 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.FailedNodeException; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.common.lucene.uid.Versions; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContent.Params; import org.opensearch.core.xcontent.XContentBuilder; @@ -54,7 +55,6 @@ import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/RestCancellableNodeClient.java b/server/src/main/java/org/opensearch/rest/action/RestCancellableNodeClient.java index ad84b6db0ed3e..339482af1dd74 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestCancellableNodeClient.java +++ b/server/src/main/java/org/opensearch/rest/action/RestCancellableNodeClient.java @@ -32,18 +32,18 @@ package org.opensearch.rest.action; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.client.Client; import org.opensearch.client.FilterClient; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskId; import org.opensearch.http.HttpChannel; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import java.util.ArrayList; import java.util.HashSet; diff --git a/server/src/main/java/org/opensearch/rest/action/RestMainAction.java b/server/src/main/java/org/opensearch/rest/action/RestMainAction.java index 54bd09d8e534e..1554ed7c1ef64 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestMainAction.java +++ b/server/src/main/java/org/opensearch/rest/action/RestMainAction.java @@ -36,12 +36,12 @@ import org.opensearch.action.main.MainRequest; import org.opensearch.action.main.MainResponse; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/RestStatusToXContentListener.java b/server/src/main/java/org/opensearch/rest/action/RestStatusToXContentListener.java index 74b4409f90c70..ae6795dd89b7b 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestStatusToXContentListener.java +++ b/server/src/main/java/org/opensearch/rest/action/RestStatusToXContentListener.java @@ -32,11 +32,11 @@ package org.opensearch.rest.action; import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import java.util.function.Function; diff --git a/server/src/main/java/org/opensearch/rest/action/RestToXContentListener.java b/server/src/main/java/org/opensearch/rest/action/RestToXContentListener.java index be7414c33c2cb..883f197807187 100644 --- a/server/src/main/java/org/opensearch/rest/action/RestToXContentListener.java +++ b/server/src/main/java/org/opensearch/rest/action/RestToXContentListener.java @@ -32,13 +32,13 @@ package org.opensearch.rest.action; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; /** * A REST based action listener that assumes the response is of type {@link ToXContent} and automatically diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCancelTasksAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCancelTasksAction.java index 379c7ac2fc570..061ee022c1984 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -36,9 +36,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskId; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index d8c8db039899d..f373d141a3c28 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -42,7 +43,6 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index bc49f59c7de7d..893e5414ffaa0 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -42,13 +42,13 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 967a228ebd628..dbd74c79102f4 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -37,10 +37,10 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; -import org.opensearch.core.ParseField; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java index 7e9bbf4993d65..c8421dcd28a03 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -43,13 +43,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetTaskAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetTaskAction.java index 963ab43acef48..57a23e38a409f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetTaskAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestGetTaskAction.java @@ -35,10 +35,10 @@ import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestListTasksAction.java index cdca1ae975ae7..ec094aa693cc9 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestListTasksAction.java @@ -32,23 +32,23 @@ package org.opensearch.rest.action.admin.cluster; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import org.opensearch.rest.action.RestToXContentListener; -import org.opensearch.tasks.TaskId; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java index b5a0626ca74e9..f505f5cdf787a 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesHotThreadsAction.java @@ -38,11 +38,11 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestResponseListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesUsageAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesUsageAction.java index 858990874de2e..6704bc975eb0b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesUsageAction.java @@ -36,12 +36,12 @@ import org.opensearch.action.admin.cluster.node.usage.NodesUsageResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; import org.opensearch.rest.action.RestBuilderListener; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index 47ab7ae585039..c86b880774343 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -33,8 +33,8 @@ import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 72b2b025e8f5b..ad08b373f5562 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -37,8 +37,9 @@ import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.core.ParseField; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.settings.SecureString; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; @@ -46,7 +47,6 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestRequestFilter; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; import org.opensearch.rest.action.RestBuilderListener; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java index 28edba4db387d..138f9fdf5c813 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -53,7 +53,7 @@ public final class RestRemoteClusterInfoAction extends BaseRestHandler { @Override public List<Route> routes() { - return singletonList(new Route(GET, "_remote/info")); + return singletonList(new Route(GET, "/_remote/info")); } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index c74bfff344e68..21003f565be44 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -36,17 +36,17 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; import static java.util.Collections.singletonList; -import static org.opensearch.rest.RestRequest.Method.DELETE; import static org.opensearch.core.rest.RestStatus.ACCEPTED; +import static org.opensearch.rest.RestRequest.Method.DELETE; /** * Transport action to delete dangling index diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index 76a554f52cedb..ea1e25717b9c0 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -32,22 +32,22 @@ package org.opensearch.rest.action.admin.cluster.dangling; -import static java.util.Collections.singletonList; -import static org.opensearch.rest.RestRequest.Method.POST; -import static org.opensearch.core.rest.RestStatus.ACCEPTED; - -import java.io.IOException; -import java.util.List; - import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.core.rest.RestStatus.ACCEPTED; +import static org.opensearch.rest.RestRequest.Method.POST; + /** * Transport action to import dangling index * diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java index 06f1d5f46f90b..f3e66bd20cd86 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestForceMergeAction.java @@ -76,6 +76,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); + mergeRequest.primaryOnly(request.paramAsBoolean("primary_only", mergeRequest.primaryOnly())); if (mergeRequest.onlyExpungeDeletes() && mergeRequest.maxNumSegments() != ForceMergeRequest.Defaults.MAX_NUM_SEGMENTS) { deprecationLogger.deprecate( "force_merge_expunge_deletes_and_max_num_segments_deprecation", diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java index 10674ba253641..9225504c7d906 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -40,13 +40,13 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.regex.Regex; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java index 172d5f071ebb8..0ad54f01a64a1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComponentTemplateAction.java @@ -36,9 +36,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -46,10 +46,10 @@ import java.util.List; import java.util.Set; -import static org.opensearch.rest.RestRequest.Method.GET; -import static org.opensearch.rest.RestRequest.Method.HEAD; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.HEAD; /** * Transport action to get component template diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java index 86dcaaf5914dd..3a0cebf253697 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java @@ -36,9 +36,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -46,10 +46,10 @@ import java.util.List; import java.util.Set; -import static org.opensearch.rest.RestRequest.Method.GET; -import static org.opensearch.rest.RestRequest.Method.HEAD; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.HEAD; /** * Transport action to get composable index template diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java index 9a1c07748e974..0aa1fbbfc7540 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetFieldMappingAction.java @@ -41,12 +41,12 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; @@ -55,9 +55,9 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.rest.RestRequest.Method.GET; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; /** * Transport action to get field mapping diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index e409546d676ae..7b1da2db3be3b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -38,9 +38,9 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -49,10 +49,10 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.rest.RestRequest.Method.GET; -import static org.opensearch.rest.RestRequest.Method.HEAD; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.HEAD; /** * The REST handler for get template and head template APIs. diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java index 9f95541aab332..0431b7f18104c 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestGetMappingAction.java @@ -42,12 +42,12 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActionListener; import org.opensearch.rest.action.RestBuilderListener; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesShardStoresAction.java index eff3cfe601ec1..4e9d1397ef11a 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestIndicesShardStoresAction.java @@ -50,8 +50,8 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.rest.RestRequest.Method.GET; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; /** * Rest action for {@link IndicesShardStoresAction} diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java index 8fdf000139d89..0d805f5f3bfb8 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestPutMappingAction.java @@ -66,10 +66,10 @@ public class RestPutMappingAction extends BaseRestHandler { public List<Route> routes() { return unmodifiableList( asList( - new Route(POST, "/{index}/_mapping/"), - new Route(PUT, "/{index}/_mapping/"), - new Route(POST, "/{index}/_mappings/"), - new Route(PUT, "/{index}/_mappings/") + new Route(POST, "/{index}/_mapping"), + new Route(PUT, "/{index}/_mapping"), + new Route(POST, "/{index}/_mappings"), + new Route(PUT, "/{index}/_mappings") ) ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRefreshAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRefreshAction.java index 90e06a2446057..d16587312a6b7 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRefreshAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestRefreshAction.java @@ -37,9 +37,9 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java index 9cb4a90eeb834..3b7c254b21aae 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -38,13 +38,13 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestToXContentListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestValidateQueryAction.java index 211322e0d7419..7aebcb6e6301b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -52,9 +52,9 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; +import static org.opensearch.core.rest.RestStatus.OK; import static org.opensearch.rest.RestRequest.Method.GET; import static org.opensearch.rest.RestRequest.Method.POST; -import static org.opensearch.core.rest.RestStatus.OK; /** * Transport action to validate a query diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java new file mode 100644 index 0000000000000..47be439a97fc4 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestViewAction.java @@ -0,0 +1,240 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.indices; + +import org.opensearch.action.admin.indices.view.CreateViewAction; +import org.opensearch.action.admin.indices.view.DeleteViewAction; +import org.opensearch.action.admin.indices.view.GetViewAction; +import org.opensearch.action.admin.indices.view.ListViewNamesAction; +import org.opensearch.action.admin.indices.view.SearchViewAction; +import org.opensearch.action.admin.indices.view.UpdateViewAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.ValidationException; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.rest.action.RestStatusToXContentListener; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.rest.action.search.RestSearchAction; + +import java.io.IOException; +import java.util.List; +import java.util.function.IntConsumer; + +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; + +/** All rest handlers for view actions */ +@ExperimentalApi +public class RestViewAction { + + public static final String VIEW_NAME = "view_name"; + public static final String VIEW_NAME_PARAMETER = "{" + VIEW_NAME + "}"; + + /** Handler for create view */ + @ExperimentalApi + public static class CreateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views").method(POST).uniqueName(CreateViewAction.NAME).build()); + } + + @Override + public String getName() { + return CreateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request createViewAction = CreateViewAction.Request.fromXContent(parser); + + final ValidationException validationResult = createViewAction.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().createView(createViewAction, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for delete view */ + @ExperimentalApi + public static class DeleteViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(DELETE).uniqueName(DeleteViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return DeleteViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final DeleteViewAction.Request deleteRequest = new DeleteViewAction.Request(viewId); + + final ValidationException validationResult = deleteRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().deleteView(deleteRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for update view */ + @ExperimentalApi + public static class UpdateViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(PUT).uniqueName(UpdateViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return UpdateViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + try (final XContentParser parser = request.contentParser()) { + final CreateViewAction.Request updateRequest = UpdateViewAction.Request.fromXContent(parser, viewId); + + final ValidationException validationResult = updateRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().updateView(updateRequest, new RestToXContentListener<>(channel)); + } + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class GetViewHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER).method(GET).uniqueName(GetViewAction.NAME).build() + ); + } + + @Override + public String getName() { + return GetViewAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final GetViewAction.Request getRequest = new GetViewAction.Request(viewId); + + final ValidationException validationResult = getRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> client.admin().indices().getView(getRequest, new RestToXContentListener<>(channel)); + } + } + + /** Handler for get view */ + @ExperimentalApi + public static class ListViewNamesHandler extends BaseRestHandler { + + @Override + public List<Route> routes() { + return List.of(new NamedRoute.Builder().path("/views/").method(GET).uniqueName(ListViewNamesAction.NAME).build()); + } + + @Override + public String getName() { + return ListViewNamesAction.NAME; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.listViewNames(new ListViewNamesAction.Request(), new RestToXContentListener<>(channel)); + } + } + + /** Handler for search view */ + @ExperimentalApi + public static class SearchViewHandler extends BaseRestHandler { + @Override + public List<Route> routes() { + return List.of( + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(GET) + .uniqueName(SearchViewAction.NAME) + .build(), + new NamedRoute.Builder().path("/views/" + VIEW_NAME_PARAMETER + "/_search") + .method(POST) + .uniqueName(SearchViewAction.NAME) + .build() + ); + } + + @Override + public String getName() { + return SearchViewAction.NAME; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String viewId = request.param(VIEW_NAME); + + final SearchViewAction.Request viewSearchRequest = new SearchViewAction.Request(viewId, new SearchRequest()); + final IntConsumer setSize = size -> viewSearchRequest.source().size(size); + + request.withContentOrSourceParamParserOrNull( + parser -> RestSearchAction.parseSearchRequest( + viewSearchRequest, + request, + parser, + client.getNamedWriteableRegistry(), + setSize + ) + ); + + final ValidationException validationResult = viewSearchRequest.validate(); + if (validationResult != null) { + throw validationResult; + } + + return channel -> { + final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(SearchViewAction.INSTANCE, viewSearchRequest, new RestStatusToXContentListener<>(channel)); + }; + } + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/AbstractCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/AbstractCatAction.java index d1d16bd1af17e..6f4e060363bfb 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/AbstractCatAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/AbstractCatAction.java @@ -36,10 +36,10 @@ import org.opensearch.common.io.Streams; import org.opensearch.common.io.UTF8StreamWriter; import org.opensearch.core.common.io.stream.BytesStream; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java index 702ffff1c9330..07b0fbbe4a911 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestAllocationAction.java @@ -43,8 +43,8 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestActionListener; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatAction.java index 1652f24bdf574..b6e1b0e99fa2c 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatAction.java @@ -33,10 +33,10 @@ package org.opensearch.rest.action.cat; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java index 4f95e10ae3622..26efd9929afea 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatRecoveryAction.java @@ -48,6 +48,7 @@ import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; +import java.time.Instant; import java.util.Comparator; import java.util.List; import java.util.Locale; @@ -170,9 +171,9 @@ public int compare(RecoveryState o1, RecoveryState o2) { t.startRow(); t.addCell(index); t.addCell(state.getShardId().id()); - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); + t.addCell(XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().startTime()))); t.addCell(state.getTimer().startTime()); - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); + t.addCell(XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().stopTime()))); t.addCell(state.getTimer().stopTime()); t.addCell(new TimeValue(state.getTimer().time())); t.addCell(state.getRecoverySource().getType().toString().toLowerCase(Locale.ROOT)); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java index 52890274d4198..aa325443ba6c9 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java @@ -14,10 +14,10 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Table; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentOpenSearchExtension; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.indices.replication.SegmentReplicationState; @@ -27,6 +27,7 @@ import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; +import java.time.Instant; import java.util.List; import java.util.Locale; import java.util.Map; @@ -170,7 +171,7 @@ public Table buildSegmentReplicationTable(RestRequest request, SegmentReplicatio t.addCell(state.getTargetNode().getHostName()); t.addCell(shardStats.getCheckpointsBehindCount()); t.addCell(new ByteSizeValue(shardStats.getBytesBehindCount())); - t.addCell(new TimeValue(shardStats.getCurrentReplicationTimeMillis())); + t.addCell(new TimeValue(shardStats.getCurrentReplicationLagMillis())); t.addCell(new TimeValue(shardStats.getLastCompletedReplicationTimeMillis())); t.addCell(perGroupStats.getRejectedRequestCount()); if (detailed) { @@ -180,8 +181,8 @@ public Table buildSegmentReplicationTable(RestRequest request, SegmentReplicatio t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); t.addCell(state.getIndex().recoveredBytes()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); - t.addCell(XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); + t.addCell(XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().startTime()))); + t.addCell(XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().stopTime()))); t.addCell(state.getIndex().totalRecoverFiles()); t.addCell(state.getIndex().totalFileCount()); t.addCell(new ByteSizeValue(state.getIndex().totalRecoverBytes())); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java index a04bac0c30bc9..04bbdeeadc4c4 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestFielddataAction.java @@ -37,7 +37,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Table; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index a424f52c42b92..9dc711f804144 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -33,8 +33,6 @@ package org.opensearch.rest.action.cat; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; @@ -56,6 +54,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexSettings; import org.opensearch.rest.RestRequest; @@ -581,6 +581,29 @@ protected Table getTableWithHeader(final RestRequest request) { "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops" ); table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops"); + table.addCell( + "search.concurrent_query_current", + "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); + + table.addCell( + "search.concurrent_query_time", + "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); + + table.addCell( + "search.concurrent_query_total", + "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" + ); + table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); + + table.addCell( + "search.concurrent_avg_slice_count", + "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); table.addCell( "search.scroll_current", @@ -890,6 +913,18 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCurrent()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCurrent()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java index 7b84b3f655522..5ead69320fefd 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodeAttrsAction.java @@ -40,9 +40,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.Strings; import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.common.Strings; import org.opensearch.monitor.process.ProcessInfo; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 6346e5d23cd34..e11012a23fce7 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -44,12 +44,12 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.Strings; import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.http.HttpInfo; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.cache.request.RequestCacheStats; @@ -303,6 +303,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -529,6 +545,10 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java index ba9606e8eb444..5fc6c961b4637 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java @@ -120,23 +120,7 @@ private Table buildTable(final RestRequest request, Map<String, IndexSegments> i Table table = getTableWithHeader(request); DiscoveryNodes nodes = this.nodesInCluster.get(); - table.startRow(); - table.addCell("index", "default:true;alias:i,idx;desc:index name"); - table.addCell("shard", "default:true;alias:s,sh;desc:shard name"); - table.addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica"); - table.addCell("ip", "default:true;desc:ip of node where it lives"); - table.addCell("id", "default:false;desc:unique id of node where it lives"); - table.addCell("segment", "default:true;alias:seg;desc:segment name"); - table.addCell("generation", "default:true;alias:g,gen;text-align:right;desc:segment generation"); - table.addCell("docs.count", "default:true;alias:dc,docsCount;text-align:right;desc:number of docs in segment"); - table.addCell("docs.deleted", "default:true;alias:dd,docsDeleted;text-align:right;desc:number of deleted docs in segment"); - table.addCell("size", "default:true;alias:si;text-align:right;desc:segment size in bytes"); - table.addCell("size.memory", "default:true;alias:sm,sizeMemory;text-align:right;desc:segment memory in bytes"); - table.addCell("committed", "default:true;alias:ic,isCommitted;desc:is segment committed"); - table.addCell("searchable", "default:true;alias:is,isSearchable;desc:is segment searched"); - table.addCell("version", "default:true;alias:v,ver;desc:version"); - table.addCell("compound", "default:true;alias:ico,isCompound;desc:is segment compound"); - table.endRow(); + for (IndexSegments indexSegments : indicesSegments.values()) { Map<Integer, IndexShardSegments> shards = indexSegments.getShards(); for (IndexShardSegments indexShardSegments : shards.values()) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index c5750ef4093c5..4cd10c6874e0a 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -219,6 +219,22 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -264,6 +280,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("path.data", "alias:pd,dataPath;default:false;text-align:right;desc:shard data path"); table.addCell("path.state", "alias:ps,statsPath;default:false;text-align:right;desc:shard state path"); + table.addCell("docs.deleted", "alias:dd,docsDeleted;default:false;text-align:right;desc:number of deleted docs in shard"); table.endHeaders(); return table; @@ -399,6 +416,11 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); @@ -422,6 +444,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(shardStats, ShardStats::getDataPath, s -> s)); table.addCell(getOrNull(shardStats, ShardStats::getStatePath, s -> s)); + table.addCell(getOrNull(commonStats, CommonStats::getDocs, DocsStats::getDeleted)); table.endRow(); } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java index 6a5d4e40eb452..4f1090b163ee6 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java @@ -36,19 +36,19 @@ import org.opensearch.common.Table; import org.opensearch.common.io.Streams; import org.opensearch.common.io.UTF8StreamWriter; -import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.common.regex.Regex; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.BytesStream; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; @@ -69,8 +69,8 @@ public class RestTable { public static RestResponse buildResponse(Table table, RestChannel channel) throws Exception { RestRequest request = channel.request(); - MediaType xContentType = getXContentType(request); - if (xContentType != null) { + MediaType mediaType = getXContentType(request); + if (mediaType != null) { return buildXContentBuilder(table, channel); } return buildTextPlainResponse(table, channel); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java index d30086fb8cd55..560b88787ae09 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTasksAction.java @@ -38,10 +38,10 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.Strings; import org.opensearch.common.Table; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java index 877c6d1b83b29..0e9ad8760d4b8 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java @@ -35,8 +35,8 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.client.node.NodeClient; -import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplate; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java index 652bc448144e2..0393dd15c8238 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java @@ -163,6 +163,10 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks"); table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads"); table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks"); + table.addCell( + "total_wait_time", + "alias:twt;default:false;text-align:right;desc:total time tasks spent waiting in thread_pool queue" + ); table.addCell("core", "alias:cr;default:false;text-align:right;desc:core number of threads in a scaling thread pool"); table.addCell("max", "alias:mx;default:false;text-align:right;desc:maximum number of threads in a scaling thread pool"); table.addCell("size", "alias:sz;default:false;text-align:right;desc:number of threads in a fixed thread pool"); @@ -267,6 +271,7 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); + table.addCell(poolStats == null ? null : poolStats.getWaitTime()); table.addCell(core); table.addCell(max); table.addCell(size); diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java index bd7dd59b5d0ff..136a34789f4da 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestGetAction.java @@ -36,10 +36,10 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.VersionType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestActions; import org.opensearch.rest.action.RestToXContentListener; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -49,10 +49,10 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.rest.RestRequest.Method.GET; -import static org.opensearch.rest.RestRequest.Method.HEAD; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.HEAD; /** * Transport action to get a document diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java index b4abe449e027f..9bbe081a44204 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestGetSourceAction.java @@ -38,8 +38,8 @@ import org.opensearch.action.get.GetResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; @@ -54,9 +54,9 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; +import static org.opensearch.core.rest.RestStatus.OK; import static org.opensearch.rest.RestRequest.Method.GET; import static org.opensearch.rest.RestRequest.Method.HEAD; -import static org.opensearch.core.rest.RestStatus.OK; /** * The REST handler for get source and head source APIs. @@ -111,7 +111,7 @@ public RestResponse buildResponse(final GetResponse response) throws Exception { final XContentBuilder builder = channel.newBuilder(request.getMediaType(), false); final BytesReference source = response.getSourceInternal(); try (InputStream stream = source.streamInput()) { - builder.rawValue(stream, XContentHelper.xContentType(source)); + builder.rawValue(stream, MediaTypeRegistry.xContentType(source)); } return new BytesRestResponse(OK, builder); } diff --git a/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java index 56b3a88966472..55859bca58087 100644 --- a/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/ingest/RestPutPipelineAction.java @@ -34,9 +34,9 @@ import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; diff --git a/server/src/main/java/org/opensearch/rest/action/ingest/RestSimulatePipelineAction.java b/server/src/main/java/org/opensearch/rest/action/ingest/RestSimulatePipelineAction.java index 1aa2c11e3f05e..8a5aa539d02fa 100644 --- a/server/src/main/java/org/opensearch/rest/action/ingest/RestSimulatePipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/ingest/RestSimulatePipelineAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java index dedcabded9071..7e1e4da206ab8 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java @@ -14,12 +14,12 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.RestBuilderListener; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java index 5be5a54edddf5..4b11670450727 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestMultiSearchAction.java @@ -38,13 +38,13 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedBiConsumer; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestPutSearchPipelineAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestPutSearchPipelineAction.java index 73eead1b91d57..8a31d39810bed 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestPutSearchPipelineAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestPutSearchPipelineAction.java @@ -10,8 +10,8 @@ import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 744bdcb3a3acc..80dc34c4d5d68 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -40,8 +40,8 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.node.NodeClient; import org.opensearch.common.Booleans; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryBuilder; import org.opensearch.rest.BaseRestHandler; @@ -86,10 +86,13 @@ public class RestSearchAction extends BaseRestHandler { */ public static final String TOTAL_HITS_AS_INT_PARAM = "rest_total_hits_as_int"; public static final String TYPED_KEYS_PARAM = "typed_keys"; + public static final String INCLUDE_NAMED_QUERIES_SCORE_PARAM = "include_named_queries_score"; private static final Set<String> RESPONSE_PARAMS; static { - final Set<String> responseParams = new HashSet<>(Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM)); + final Set<String> responseParams = new HashSet<>( + Arrays.asList(TYPED_KEYS_PARAM, TOTAL_HITS_AS_INT_PARAM, INCLUDE_NAMED_QUERIES_SCORE_PARAM) + ); RESPONSE_PARAMS = Collections.unmodifiableSet(responseParams); } @@ -180,6 +183,12 @@ public static void parseSearchRequest( searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } + if (request.hasParam("phase_took")) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.paramAsBoolean("phase_took", true)); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. @@ -203,6 +212,7 @@ public static void parseSearchRequest( searchRequest.pipeline(request.param("search_pipeline")); checkRestTotalHits(request, searchRequest); + request.paramAsBoolean(INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); if (searchRequest.pointInTimeBuilder() != null) { preparePointInTime(searchRequest, request, namedWriteableRegistry); @@ -280,6 +290,10 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false)); } + if (request.hasParam("include_named_queries_score")) { + searchSourceBuilder.includeNamedQueriesScores(request.paramAsBoolean("include_named_queries_score", false)); + } + if (request.hasParam("track_total_hits")) { if (Booleans.isBoolean(request.param("track_total_hits"))) { searchSourceBuilder.trackTotalHits(request.paramAsBoolean("track_total_hits", true)); diff --git a/server/src/main/java/org/opensearch/script/AggregationScript.java b/server/src/main/java/org/opensearch/script/AggregationScript.java index ef300e9473d3e..2e2f4af027e1b 100644 --- a/server/src/main/java/org/opensearch/script/AggregationScript.java +++ b/server/src/main/java/org/opensearch/script/AggregationScript.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.ScorerAware; import org.opensearch.index.fielddata.ScriptDocValues; @@ -49,8 +50,9 @@ /** * Scripts for aggregations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AggregationScript implements ScorerAware { public static final String[] PARAMETERS = {}; @@ -168,8 +170,9 @@ public double runAsDouble() { /** * A factory to construct {@link AggregationScript} instances. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface LeafFactory { AggregationScript newInstance(LeafReaderContext ctx) throws IOException; diff --git a/server/src/main/java/org/opensearch/script/FieldScript.java b/server/src/main/java/org/opensearch/script/FieldScript.java index 82b5c9a088a2c..054b60bbaf3e3 100644 --- a/server/src/main/java/org/opensearch/script/FieldScript.java +++ b/server/src/main/java/org/opensearch/script/FieldScript.java @@ -33,6 +33,7 @@ package org.opensearch.script; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.search.lookup.LeafSearchLookup; @@ -47,8 +48,9 @@ /** * A script to produce dynamic values for return fields. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class FieldScript { public static final String[] PARAMETERS = {}; @@ -113,8 +115,9 @@ public void setDocument(int docid) { /** * A factory to construct {@link FieldScript} instances. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface LeafFactory { FieldScript newInstance(LeafReaderContext ctx) throws IOException; } diff --git a/server/src/main/java/org/opensearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/opensearch/script/JodaCompatibleZonedDateTime.java deleted file mode 100644 index 08306b3f275a8..0000000000000 --- a/server/src/main/java/org/opensearch/script/JodaCompatibleZonedDateTime.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.script; - -import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.time.DateFormatter; - -import java.time.DayOfWeek; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.Month; -import java.time.OffsetDateTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.time.chrono.ChronoZonedDateTime; -import java.time.chrono.Chronology; -import java.time.format.DateTimeFormatter; -import java.time.temporal.Temporal; -import java.time.temporal.TemporalAccessor; -import java.time.temporal.TemporalAdjuster; -import java.time.temporal.TemporalAmount; -import java.time.temporal.TemporalField; -import java.time.temporal.TemporalQuery; -import java.time.temporal.TemporalUnit; -import java.time.temporal.ValueRange; -import java.util.Objects; - -/** - * A wrapper around ZonedDateTime that exposes joda methods for backcompat. - * - * @opensearch.internal - */ -public class JodaCompatibleZonedDateTime - implements - Comparable<ChronoZonedDateTime<?>>, - ChronoZonedDateTime<LocalDate>, - Temporal, - TemporalAccessor { - - private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("strict_date_time"); - private ZonedDateTime dt; - - public JodaCompatibleZonedDateTime(Instant instant, ZoneId zone) { - this.dt = ZonedDateTime.ofInstant(instant, zone); - } - - // access the underlying ZonedDateTime - public ZonedDateTime getZonedDateTime() { - return dt; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null) return false; - if (o.getClass() == JodaCompatibleZonedDateTime.class) { - JodaCompatibleZonedDateTime that = (JodaCompatibleZonedDateTime) o; - return Objects.equals(dt, that.dt); - } else if (o.getClass() == ZonedDateTime.class) { - ZonedDateTime that = (ZonedDateTime) o; - return Objects.equals(dt, that); - } - return false; - } - - @Override - public int hashCode() { - return dt.hashCode(); - } - - @Override - public String toString() { - return DATE_FORMATTER.format(dt); - } - - @Override - public String format(DateTimeFormatter formatter) { - return dt.format(formatter); - } - - @Override - public ValueRange range(TemporalField field) { - return dt.range(field); - } - - @Override - public int get(TemporalField field) { - return dt.get(field); - } - - @Override - public long getLong(TemporalField field) { - return dt.getLong(field); - } - - @Override - public Chronology getChronology() { - return dt.getChronology(); - } - - @Override - public ZoneOffset getOffset() { - return dt.getOffset(); - } - - @Override - public boolean isSupported(TemporalField field) { - return dt.isSupported(field); - } - - @Override - public boolean isSupported(TemporalUnit unit) { - return dt.isSupported(unit); - } - - @Override - public long toEpochSecond() { - return dt.toEpochSecond(); - } - - @Override - public int compareTo(ChronoZonedDateTime<?> other) { - return dt.compareTo(other); - } - - @Override - public boolean isBefore(ChronoZonedDateTime<?> other) { - return dt.isBefore(other); - } - - @Override - public boolean isAfter(ChronoZonedDateTime<?> other) { - return dt.isAfter(other); - } - - @Override - public boolean isEqual(ChronoZonedDateTime<?> other) { - return dt.isEqual(other); - } - - @Override - public LocalTime toLocalTime() { - return dt.toLocalTime(); - } - - public int getDayOfMonth() { - return dt.getDayOfMonth(); - } - - public int getDayOfYear() { - return dt.getDayOfYear(); - } - - public int getHour() { - return dt.getHour(); - } - - @Override - public LocalDate toLocalDate() { - return dt.toLocalDate(); - } - - @Override - public LocalDateTime toLocalDateTime() { - return dt.toLocalDateTime(); - } - - public int getMinute() { - return dt.getMinute(); - } - - public Month getMonth() { - return dt.getMonth(); - } - - public int getMonthValue() { - return dt.getMonthValue(); - } - - public int getNano() { - return dt.getNano(); - } - - public int getSecond() { - return dt.getSecond(); - } - - public int getYear() { - return dt.getYear(); - } - - @Override - public ZoneId getZone() { - return dt.getZone(); - } - - @Override - public ZonedDateTime minus(TemporalAmount delta) { - return dt.minus(delta); - } - - @Override - public ZonedDateTime minus(long amount, TemporalUnit unit) { - return dt.minus(amount, unit); - } - - @Override - public <R> R query(TemporalQuery<R> query) { - return dt.query(query); - } - - @Override - public long until(Temporal temporal, TemporalUnit temporalUnit) { - return dt.until(temporal, temporalUnit); - } - - public ZonedDateTime minusYears(long amount) { - return dt.minusYears(amount); - } - - public ZonedDateTime minusMonths(long amount) { - return dt.minusMonths(amount); - } - - public ZonedDateTime minusWeeks(long amount) { - return dt.minusWeeks(amount); - } - - public ZonedDateTime minusDays(long amount) { - return dt.minusDays(amount); - } - - public ZonedDateTime minusHours(long amount) { - return dt.minusHours(amount); - } - - public ZonedDateTime minusMinutes(long amount) { - return dt.minusMinutes(amount); - } - - public ZonedDateTime minusSeconds(long amount) { - return dt.minusSeconds(amount); - } - - public ZonedDateTime minusNanos(long amount) { - return dt.minusNanos(amount); - } - - @Override - public ZonedDateTime plus(TemporalAmount amount) { - return dt.plus(amount); - } - - @Override - public ZonedDateTime plus(long amount, TemporalUnit unit) { - return dt.plus(amount, unit); - } - - public ZonedDateTime plusDays(long amount) { - return dt.plusDays(amount); - } - - public ZonedDateTime plusHours(long amount) { - return dt.plusHours(amount); - } - - public ZonedDateTime plusMinutes(long amount) { - return dt.plusMinutes(amount); - } - - public ZonedDateTime plusMonths(long amount) { - return dt.plusMonths(amount); - } - - public ZonedDateTime plusNanos(long amount) { - return dt.plusNanos(amount); - } - - public ZonedDateTime plusSeconds(long amount) { - return dt.plusSeconds(amount); - } - - public ZonedDateTime plusWeeks(long amount) { - return dt.plusWeeks(amount); - } - - public ZonedDateTime plusYears(long amount) { - return dt.plusYears(amount); - } - - @Override - public Instant toInstant() { - return dt.toInstant(); - } - - public OffsetDateTime toOffsetDateTime() { - return dt.toOffsetDateTime(); - } - - @SuppressForbidden(reason = "only exposing the method as a passthrough") - public ZonedDateTime truncatedTo(TemporalUnit unit) { - return dt.truncatedTo(unit); - } - - @Override - public ZonedDateTime with(TemporalAdjuster adjuster) { - return dt.with(adjuster); - } - - @Override - public ZonedDateTime with(TemporalField field, long newValue) { - return dt.with(field, newValue); - } - - public ZonedDateTime withDayOfMonth(int value) { - return dt.withDayOfMonth(value); - } - - public ZonedDateTime withDayOfYear(int value) { - return dt.withDayOfYear(value); - } - - @Override - public ZonedDateTime withEarlierOffsetAtOverlap() { - return dt.withEarlierOffsetAtOverlap(); - } - - public ZonedDateTime withFixedOffsetZone() { - return dt.withFixedOffsetZone(); - } - - public ZonedDateTime withHour(int value) { - return dt.withHour(value); - } - - @Override - public ZonedDateTime withLaterOffsetAtOverlap() { - return dt.withLaterOffsetAtOverlap(); - } - - public ZonedDateTime withMinute(int value) { - return dt.withMinute(value); - } - - public ZonedDateTime withMonth(int value) { - return dt.withMonth(value); - } - - public ZonedDateTime withNano(int value) { - return dt.withNano(value); - } - - public ZonedDateTime withSecond(int value) { - return dt.withSecond(value); - } - - public ZonedDateTime withYear(int value) { - return dt.withYear(value); - } - - @Override - public ZonedDateTime withZoneSameLocal(ZoneId zone) { - return dt.withZoneSameLocal(zone); - } - - @Override - public ZonedDateTime withZoneSameInstant(ZoneId zone) { - return dt.withZoneSameInstant(zone); - } - - public DayOfWeek getDayOfWeekEnum() { - return dt.getDayOfWeek(); - } -} diff --git a/server/src/main/java/org/opensearch/script/NumberSortScript.java b/server/src/main/java/org/opensearch/script/NumberSortScript.java index fc4cfdb83f7cb..8d0eddc93842b 100644 --- a/server/src/main/java/org/opensearch/script/NumberSortScript.java +++ b/server/src/main/java/org/opensearch/script/NumberSortScript.java @@ -31,11 +31,12 @@ package org.opensearch.script; -import java.io.IOException; -import java.util.Map; import org.apache.lucene.index.LeafReaderContext; import org.opensearch.search.lookup.SearchLookup; +import java.io.IOException; +import java.util.Map; + /** * Script for number sorts * diff --git a/server/src/main/java/org/opensearch/script/ScoreScript.java b/server/src/main/java/org/opensearch/script/ScoreScript.java index 5c6553ffc2a28..70de636a655f2 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScript.java +++ b/server/src/main/java/org/opensearch/script/ScoreScript.java @@ -33,11 +33,14 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Scorable; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName; import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.LeafTermFrequencyLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; @@ -107,6 +110,9 @@ public Explanation get(double score, Explanation subQueryExplanation) { /** A leaf lookup for the bound segment this script will operate on. */ private final LeafSearchLookup leafLookup; + /** A leaf term frequency lookup for the bound segment this script will operate on. */ + private final LeafTermFrequencyLookup leafTermFrequencyLookup; + private DoubleSupplier scoreSupplier = () -> 0.0; private final int docBase; @@ -115,16 +121,18 @@ public Explanation get(double score, Explanation subQueryExplanation) { private String indexName = null; private Version indexVersion = null; - public ScoreScript(Map<String, Object> params, SearchLookup lookup, LeafReaderContext leafContext) { + public ScoreScript(Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher, LeafReaderContext leafContext) { // null check needed b/c of expression engine subclass if (lookup == null) { assert params == null; assert leafContext == null; this.params = null; this.leafLookup = null; + this.leafTermFrequencyLookup = null; this.docBase = 0; } else { this.leafLookup = lookup.getLeafSearchLookup(leafContext); + this.leafTermFrequencyLookup = new LeafTermFrequencyLookup(indexSearcher, leafLookup); params = new HashMap<>(params); params.putAll(leafLookup.asMap()); this.params = new DynamicMap(params, PARAMS_FUNCTIONS); @@ -144,6 +152,10 @@ public Map<String, ScriptDocValues<?>> getDoc() { return leafLookup.doc(); } + public Object getTermFrequency(TermFrequencyFunctionName functionName, String field, String val) throws IOException { + return leafTermFrequencyLookup.getTermFrequency(functionName, field, val, docId); + } + /** Set the current document to run the script on next. */ public void setDocument(int docid) { this.docId = docid; @@ -268,7 +280,7 @@ public interface LeafFactory { */ public interface Factory extends ScriptFactory { - ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup); + ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher); } diff --git a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java index b94ff77a1d0b7..2396aa369e98d 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java @@ -45,8 +45,12 @@ import org.opensearch.index.mapper.DateFieldMapper; import java.time.ZoneId; +import java.time.ZonedDateTime; import static org.opensearch.common.util.BitMixer.mix32; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.SUM_TOTAL_TERM_FREQ; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TERM_FREQ; +import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TOTAL_TERM_FREQ; /** * Utilities for scoring scripts @@ -69,6 +73,69 @@ public static double sigmoid(double value, double k, double a) { return Math.pow(value, a) / (Math.pow(k, a) + Math.pow(value, a)); } + /** + * Retrieves the term frequency within a field for a specific term. + * + * @opensearch.internal + */ + public static final class TermFreq { + private final ScoreScript scoreScript; + + public TermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public int termFreq(String field, String term) { + try { + return (int) scoreScript.getTermFrequency(TERM_FREQ, field, term); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + + /** + * Retrieves the total term frequency within a field for a specific term. + * + * @opensearch.internal + */ + public static final class TotalTermFreq { + private final ScoreScript scoreScript; + + public TotalTermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public long totalTermFreq(String field, String term) { + try { + return (long) scoreScript.getTermFrequency(TOTAL_TERM_FREQ, field, term); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + + /** + * Retrieves the sum of total term frequencies within a field. + * + * @opensearch.internal + */ + public static final class SumTotalTermFreq { + private final ScoreScript scoreScript; + + public SumTotalTermFreq(ScoreScript scoreScript) { + this.scoreScript = scoreScript; + } + + public long sumTotalTermFreq(String field) { + try { + return (long) scoreScript.getTermFrequency(SUM_TOTAL_TERM_FREQ, field, null); + } catch (Exception e) { + throw ExceptionsHelper.convertToOpenSearchException(e); + } + } + } + /** * random score based on the documents' values of the given field * @@ -276,11 +343,14 @@ public double decayNumericGauss(double docValue) { /** * Limitations: since script functions don't have access to DateFieldMapper, * decay functions on dates are limited to dates in the default format and default time zone, + * Further, since script module gets initialized before the featureflags are loaded, + * we cannot use the feature flag to gate the usage of the new default date format. * Also, using calculations with <code>now</code> are not allowed. * */ private static final ZoneId defaultZoneId = ZoneId.of("UTC"); - private static final DateMathParser dateParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + // ToDo: use new default date formatter once feature flag is removed + private static final DateMathParser dateParser = DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); /** * Linear date decay @@ -301,7 +371,7 @@ public DecayDateLinear(String originStr, String scaleStr, String offsetStr, doub this.scaling = scale / (1.0 - decay); } - public double decayDateLinear(JodaCompatibleZonedDateTime docValueDate) { + public double decayDateLinear(ZonedDateTime docValueDate) { long docValue = docValueDate.toInstant().toEpochMilli(); // as java.lang.Math#abs(long) is a forbidden API, have to use this comparison instead long diff = (docValue >= origin) ? (docValue - origin) : (origin - docValue); @@ -329,7 +399,7 @@ public DecayDateExp(String originStr, String scaleStr, String offsetStr, double this.scaling = Math.log(decay) / scale; } - public double decayDateExp(JodaCompatibleZonedDateTime docValueDate) { + public double decayDateExp(ZonedDateTime docValueDate) { long docValue = docValueDate.toInstant().toEpochMilli(); long diff = (docValue >= origin) ? (docValue - origin) : (origin - docValue); long distance = Math.max(0, diff - offset); @@ -356,7 +426,7 @@ public DecayDateGauss(String originStr, String scaleStr, String offsetStr, doubl this.scaling = 0.5 * Math.pow(scale, 2.0) / Math.log(decay); } - public double decayDateGauss(JodaCompatibleZonedDateTime docValueDate) { + public double decayDateGauss(ZonedDateTime docValueDate) { long docValue = docValueDate.toInstant().toEpochMilli(); long diff = (docValue >= origin) ? (docValue - origin) : (origin - docValue); long distance = Math.max(0, diff - offset); diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index 37555dbec4dbc..9e74314c281cd 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -33,28 +33,29 @@ package org.opensearch.script; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.AbstractObjectParser; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.io.InputStream; @@ -69,9 +70,9 @@ * {@link Script} represents used-defined input that can be used to * compile and execute a script from the {@link ScriptService} * based on the {@link ScriptType}. - * + * <p> * There are three types of scripts specified by {@link ScriptType}. - * + * <p> * The following describes the expected parameters for each type of script: * * <ul> @@ -96,8 +97,9 @@ * </ul> * </ul> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Script implements ToXContentObject, Writeable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(Script.class); @@ -181,8 +183,8 @@ private void setInline(XContentParser parser) { if (parser.currentToken() == Token.START_OBJECT) { // this is really for search templates, that need to be converted to json format XContentBuilder builder = XContentFactory.jsonBuilder(); - idOrCode = Strings.toString(builder.copyCurrentStructure(parser)); - options.put(CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); + idOrCode = builder.copyCurrentStructure(parser).toString(); + options.put(CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()); } else { idOrCode = parser.text(); } @@ -346,16 +348,16 @@ public static Script parse(Settings settings) { /** * This will parse XContent into a {@link Script}. The following formats can be parsed: - * + * <p> * The simple format defaults to an {@link ScriptType#INLINE} with no compiler options or user-defined params: - * + * <p> * Example: * {@code * "return Math.log(doc.popularity) * 100;" * } * * The complex format where {@link ScriptType} and idOrCode are required while lang, options and params are not required. - * + * <p> * {@code * { * // Exactly one of "id" or "source" must be specified @@ -389,7 +391,7 @@ public static Script parse(Settings settings) { * * This also handles templates in a special way. If a complexly formatted query is specified as another complex * JSON object the query is assumed to be a template, and the format will be preserved. - * + * <p> * {@code * { * "source" : { "query" : ... }, @@ -603,7 +605,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will build scripts into the following XContent structure: - * + * <p> * {@code * { * "<(id, source)>" : "<idOrCode>", @@ -633,10 +635,10 @@ public void writeTo(StreamOutput out) throws IOException { * } * * Note that lang, options, and params will only be included if there have been any specified. - * + * <p> * This also handles templates in a special way. If the {@link Script#CONTENT_TYPE_OPTION} option * is provided and the {@link ScriptType#INLINE} is specified then the template will be preserved as a raw field. - * + * <p> * {@code * { * "source" : { "query" : ... }, diff --git a/server/src/main/java/org/opensearch/script/ScriptCache.java b/server/src/main/java/org/opensearch/script/ScriptCache.java index 874888b53b978..fb57e7cdfa5bd 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCache.java +++ b/server/src/main/java/org/opensearch/script/ScriptCache.java @@ -34,14 +34,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.cache.Cache; import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import java.util.Map; import java.util.Objects; @@ -158,7 +158,7 @@ public ScriptContextStats stats(String context) { /** * Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so. * This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket - * + * <p> * It can be thought of as a bucket with water, every time the bucket is checked, water is added proportional to the amount of time that * elapsed since the last time it was checked. If there is enough water, some is removed and the request is allowed. If there is not * enough water the request is denied. Just like a normal bucket, if water is added that overflows the bucket, the extra water/capacity diff --git a/server/src/main/java/org/opensearch/script/ScriptCacheStats.java b/server/src/main/java/org/opensearch/script/ScriptCacheStats.java index be7a1c9542a60..d06ffa70235f8 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCacheStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptCacheStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.DeprecatedApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -52,6 +53,7 @@ * * @deprecated This class is deprecated in favor of ScriptStats and ScriptContextStats. It is removed in OpenSearch 2.0. */ +@DeprecatedApi(since = "2.0.0") @Deprecated public class ScriptCacheStats implements Writeable, ToXContentFragment { private final Map<String, ScriptStats> context; diff --git a/server/src/main/java/org/opensearch/script/ScriptContext.java b/server/src/main/java/org/opensearch/script/ScriptContext.java index 27ad1f3ce03c8..2180b6059dbef 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContext.java +++ b/server/src/main/java/org/opensearch/script/ScriptContext.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; @@ -39,7 +40,7 @@ /** * The information necessary to compile and run a script. - * + * <p> * A {@link ScriptContext} contains the information related to a single use case and the interfaces * and methods necessary for a {@link ScriptEngine} to implement. * <p> @@ -70,8 +71,9 @@ * If the variable name starts with an underscore, for example, {@code _score}, the needs method would * be {@code boolean needs_score()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ScriptContext<FactoryType> { /** A unique identifier for this context. */ diff --git a/server/src/main/java/org/opensearch/script/ScriptContextInfo.java b/server/src/main/java/org/opensearch/script/ScriptContextInfo.java index d3a64c207e0ce..ee4c0aa09388e 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContextInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptContextInfo.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -60,8 +61,9 @@ /** * Information about a script context * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptContextInfo implements ToXContentObject, Writeable { public final String name; public final ScriptMethodInfo execute; @@ -202,8 +204,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Script method information * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ScriptMethodInfo implements ToXContentObject, Writeable { public final String name, returnType; public final List<ParameterInfo> parameters; diff --git a/server/src/main/java/org/opensearch/script/ScriptContextStats.java b/server/src/main/java/org/opensearch/script/ScriptContextStats.java index 16f4a7e313326..15a5596609586 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContextStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptContextStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * Stats for a script context * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptContextStats implements Writeable, ToXContentFragment, Comparable<ScriptContextStats> { private final String context; private final long compilations; diff --git a/server/src/main/java/org/opensearch/script/ScriptEngine.java b/server/src/main/java/org/opensearch/script/ScriptEngine.java index 418fbed52da30..560727bc8fa97 100644 --- a/server/src/main/java/org/opensearch/script/ScriptEngine.java +++ b/server/src/main/java/org/opensearch/script/ScriptEngine.java @@ -32,6 +32,8 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; import java.io.IOException; import java.util.Map; @@ -40,8 +42,9 @@ /** * A script language implementation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ScriptEngine extends Closeable { /** diff --git a/server/src/main/java/org/opensearch/script/ScriptException.java b/server/src/main/java/org/opensearch/script/ScriptException.java index c2ca58be9507d..2b8f869819ae4 100644 --- a/server/src/main/java/org/opensearch/script/ScriptException.java +++ b/server/src/main/java/org/opensearch/script/ScriptException.java @@ -33,13 +33,12 @@ package org.opensearch.script; import org.opensearch.OpenSearchException; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Arrays; @@ -165,7 +164,7 @@ public String toJsonString() { json.startObject(); toXContent(json, ToXContent.EMPTY_PARAMS); json.endObject(); - return Strings.toString(json); + return json.toString(); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java index 64332910147de..1a6626db64811 100644 --- a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java @@ -32,8 +32,9 @@ package org.opensearch.script; -import org.opensearch.core.ParseField; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -56,11 +57,11 @@ /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level <code>types_allowed</code> list, * meant to reflect the setting <code>script.allowed_types</code> with the allowed types (eg <code>inline</code>, <code>stored</code>). - * + * <p> * The top-level <code>language_contexts</code> list of objects have the <code>language</code> (eg. <code>painless</code>, * <code>mustache</code>) and a list of <code>contexts</code> available for the language. It is the responsibility of the caller to ensure * these contexts are filtered by the <code>script.allowed_contexts</code> setting. - * + * <p> * The json serialization of the object has the form: * <code> * { @@ -91,8 +92,9 @@ * } * </code> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptLanguagesInfo implements ToXContentObject, Writeable { private static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); private static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); diff --git a/server/src/main/java/org/opensearch/script/ScriptMetadata.java b/server/src/main/java/org/opensearch/script/ScriptMetadata.java index 7036e9ad4d53e..fd92d8f7f02db 100644 --- a/server/src/main/java/org/opensearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/opensearch/script/ScriptMetadata.java @@ -38,11 +38,11 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -183,9 +183,9 @@ static ScriptMetadata deleteStoredScript(ScriptMetadata previous, String id) { /** * This will parse XContent into {@link ScriptMetadata}. - * + * <p> * The following format will be parsed: - * + * <p> * {@code * { * "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", @@ -356,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from {@link ScriptMetadata}. The following format will be written: - * + * <p> * {@code * { * "<id>" : "<{@link StoredScriptSource#toXContent(XContentBuilder, Params)}>", diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index 11e72ee6a8b66..d3c8861dbc5d7 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; @@ -47,12 +46,14 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import java.io.Closeable; @@ -75,8 +76,9 @@ /** * Service for scripting * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptService implements Closeable, ClusterStateApplier { private static final Logger logger = LogManager.getLogger(ScriptService.class); diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index aeb3645242799..850f7f9d07070 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -48,8 +49,9 @@ /** * Stats for scripts * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptStats implements Writeable, ToXContentFragment { private final List<ScriptContextStats> contextStats; private final long compilations; diff --git a/server/src/main/java/org/opensearch/script/ScriptType.java b/server/src/main/java/org/opensearch/script/ScriptType.java index 5f505c781bd0a..c39edcbcb12c4 100644 --- a/server/src/main/java/org/opensearch/script/ScriptType.java +++ b/server/src/main/java/org/opensearch/script/ScriptType.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ * It's also used to by {@link ScriptService} to determine whether or not a {@link Script} is * allowed to be executed based on both default and user-defined settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ScriptType implements Writeable { /** diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java index 9d3af9e4c9caf..fc19022657f9e 100644 --- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java @@ -35,25 +35,24 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.Diff; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -67,8 +66,9 @@ * {@link StoredScriptSource} represents user-defined parameters for a script * saved in the {@link ClusterState}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> implements Writeable, ToXContentObject { /** @@ -124,9 +124,9 @@ private void setSource(XContentParser parser) { try { if (parser.currentToken() == Token.START_OBJECT) { // this is really for search templates, that need to be converted to json format - XContentBuilder builder = XContentFactory.jsonBuilder(); - source = Strings.toString(builder.copyCurrentStructure(parser)); - options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); + source = builder.copyCurrentStructure(parser).toString(); + options.put(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()); } else { source = parser.text(); } @@ -310,7 +310,7 @@ public static StoredScriptSource parse(BytesReference content, MediaType mediaTy /** * This will parse XContent into a {@link StoredScriptSource}. The following format is what will be parsed: - * + * <p> * {@code * { * "script" : { @@ -389,7 +389,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from a {@link StoredScriptSource}. The following format will be written: - * + * <p> * {@code * { * "script" : { diff --git a/server/src/main/java/org/opensearch/script/StringSortScript.java b/server/src/main/java/org/opensearch/script/StringSortScript.java index 5b7ac1fc853e5..4491f318d841a 100644 --- a/server/src/main/java/org/opensearch/script/StringSortScript.java +++ b/server/src/main/java/org/opensearch/script/StringSortScript.java @@ -31,11 +31,12 @@ package org.opensearch.script; -import java.io.IOException; -import java.util.Map; import org.apache.lucene.index.LeafReaderContext; import org.opensearch.search.lookup.SearchLookup; +import java.io.IOException; +import java.util.Map; + /** * Script for sorting strings * diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index f377a5e315e1b..061aa2f6e5896 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -45,11 +45,11 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.lease.Releasables; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -148,6 +148,8 @@ final class DefaultSearchContext extends SearchContext { private SortAndFormats sort; private Float minimumScore; private boolean trackScores = false; // when sorting, track scores as well... + + private boolean includeNamedQueriesScore = false; private int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; private FieldDoc searchAfter; private CollapseContext collapse; @@ -183,6 +185,8 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; private final Function<SearchSourceBuilder, InternalAggregation.ReduceContextBuilder> requestToAggReduceContextBuilder; + private final boolean concurrentSearchSettingsEnabled; + private final SetOnce<Boolean> requestShouldUseConcurrentSearch = new SetOnce<>(); DefaultSearchContext( ReaderContext readerContext, @@ -213,13 +217,14 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; this.engineSearcher = readerContext.acquireSearcher("search"); + this.concurrentSearchSettingsEnabled = evaluateConcurrentSegmentSearchSettings(executor); this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, - executor, + concurrentSearchSettingsEnabled ? executor : null, this ); this.relativeTimeSupplier = relativeTimeSupplier; @@ -632,6 +637,17 @@ public boolean trackScores() { return this.trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; @@ -874,22 +890,29 @@ public Profilers getProfilers() { } /** - * Returns concurrent segment search status for the search context + * Returns concurrent segment search status for the search context. This should only be used after request parsing, during which requestShouldUseConcurrentSearch will be set. */ @Override - public boolean isConcurrentSegmentSearchEnabled() { - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - && (clusterService != null) - && (searcher().getExecutor() != null)) { - return indexService.getIndexSettings() - .getSettings() - .getAsBoolean( - IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), - clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); - } else { - return false; - } + public boolean shouldUseConcurrentSearch() { + assert requestShouldUseConcurrentSearch.get() != null : "requestShouldUseConcurrentSearch must be set"; + return concurrentSearchSettingsEnabled && Boolean.TRUE.equals(requestShouldUseConcurrentSearch.get()); + } + + /** + * Evaluate if parsed request supports concurrent segment search + */ + public void evaluateRequestShouldUseConcurrentSearch() { + if (sort != null && sort.isSortOnTimeSeriesField()) { + requestShouldUseConcurrentSearch.set(false); + } else if (aggregations() != null + && aggregations().factories() != null + && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { + requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); + } else { + requestShouldUseConcurrentSearch.set(true); + } } public void setProfilers(Profilers profilers) { @@ -919,7 +942,7 @@ public ReaderContext readerContext() { @Override public InternalAggregation.ReduceContext partialOnShard() { InternalAggregation.ReduceContext rc = requestToAggReduceContextBuilder.apply(request.source()).forPartialReduction(); - rc.setSliceLevel(isConcurrentSegmentSearchEnabled()); + rc.setSliceLevel(shouldUseConcurrentSearch()); return rc; } @@ -932,4 +955,38 @@ public void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollector public BucketCollectorProcessor bucketCollectorProcessor() { return bucketCollectorProcessor; } + + /** + * Evaluate based on cluster and index settings if concurrent segment search should be used for this request context + * @return true: use concurrent search + * false: otherwise + */ + private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { + if ((clusterService != null) && (concurrentSearchExecutor != null)) { + return indexService.getIndexSettings() + .getSettings() + .getAsBoolean( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), + clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + } else { + return false; + } + } + + @Override + public int getTargetMaxSliceCount() { + if (shouldUseConcurrentSearch() == false) { + throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled"); + } + return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING); + } + + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return indexShard.isTimeSeriesDescSortOptimizationEnabled() + && sort != null + && sort.isSortOnTimeSeriesField() + && sort.sort.getSort()[0].getReverse() == false; + } } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index de0fb813eb652..9fae14f69b0af 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -34,16 +34,16 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; import org.opensearch.common.Numbers; -import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.joda.Joda; -import org.opensearch.common.joda.JodaDateFormatter; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.GeoTileUtils; @@ -65,8 +65,9 @@ /** * A formatter for values as returned by the fielddata/doc-values APIs. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface DocValueFormat extends NamedWriteable { long MASK_2_63 = 0x8000000000000000L; BigInteger BIGINTEGER_2_64_MINUS_ONE = BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE); // 2^64 -1 @@ -244,13 +245,19 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu } public DateTime(StreamInput in) throws IOException { - String datePattern = in.readString(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.formatter = DateFormatter.forPattern(in.readString(), in.readOptionalString()); + } else { + this.formatter = DateFormatter.forPattern(in.readString()); + } + + this.parser = formatter.toDateMathParser(); String zoneId = in.readString(); this.timeZone = ZoneId.of(zoneId); this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - final boolean isJoda = in.readBoolean(); - this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); - this.parser = formatter.toDateMathParser(); + if (in.getVersion().before(Version.V_3_0_0)) { + in.readBoolean(); // ignore deprecated joda + } } @Override @@ -260,11 +267,19 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(formatter.pattern()); + if (out.getVersion().before(Version.V_2_12_0) && formatter.equals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER)) { + out.writeString(DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.pattern()); // required for backwards compatibility + } else { + out.writeString(formatter.pattern()); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalString(formatter.printPattern()); + } out.writeString(timeZone.getId()); out.writeVInt(resolution.ordinal()); - // in order not to loose information if the formatter is a joda we send a flag - out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. + if (out.getVersion().before(Version.V_3_0_0)) { + out.writeBoolean(false); // ignore deprecated joda flag + } } public DateMathParser getDateMathParser() { diff --git a/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java b/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java new file mode 100644 index 0000000000000..35e68f78774e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.search; + +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * This is a catch-all SearchExtBuilder implementation that is used when an appropriate SearchExtBuilder + * is not found during SearchResponse's fromXContent operation. + */ +public final class GenericSearchExtBuilder extends SearchExtBuilder { + + public final static ParseField EXT_BUILDER_NAME = new ParseField("generic_ext"); + + private final Object genericObj; + private final ValueType valueType; + + enum ValueType { + SIMPLE(0), + MAP(1), + LIST(2); + + private final int value; + + ValueType(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + + static ValueType fromInt(int value) { + switch (value) { + case 0: + return SIMPLE; + case 1: + return MAP; + case 2: + return LIST; + default: + throw new IllegalArgumentException("Unsupported value: " + value); + } + } + } + + public GenericSearchExtBuilder(Object genericObj, ValueType valueType) { + this.genericObj = genericObj; + this.valueType = valueType; + } + + public GenericSearchExtBuilder(StreamInput in) throws IOException { + valueType = ValueType.fromInt(in.readInt()); + switch (valueType) { + case SIMPLE: + genericObj = in.readGenericValue(); + break; + case MAP: + genericObj = in.readMap(); + break; + case LIST: + genericObj = in.readList(r -> r.readGenericValue()); + break; + default: + throw new IllegalStateException("Unable to construct GenericSearchExtBuilder from incoming stream."); + } + } + + public static GenericSearchExtBuilder fromXContent(XContentParser parser) throws IOException { + // Look at the parser's next token. + // If it's START_OBJECT, parse as map, if it's START_ARRAY, parse as list, else + // parse as simpleVal + XContentParser.Token token = parser.currentToken(); + ValueType valueType; + Object genericObj; + if (token == XContentParser.Token.START_OBJECT) { + genericObj = parser.map(); + valueType = ValueType.MAP; + } else if (token == XContentParser.Token.START_ARRAY) { + genericObj = parser.list(); + valueType = ValueType.LIST; + } else if (token.isValue()) { + genericObj = parser.objectText(); + valueType = ValueType.SIMPLE; + } else { + throw new XContentParseException("Unknown token: " + token); + } + + return new GenericSearchExtBuilder(genericObj, valueType); + } + + @Override + public String getWriteableName() { + return EXT_BUILDER_NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(valueType.getValue()); + switch (valueType) { + case SIMPLE: + out.writeGenericValue(genericObj); + break; + case MAP: + out.writeMap((Map<String, Object>) genericObj); + break; + case LIST: + out.writeCollection((List<Object>) genericObj, StreamOutput::writeGenericValue); + break; + default: + throw new IllegalStateException("Unknown valueType: " + valueType); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + switch (valueType) { + case SIMPLE: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), genericObj); + case MAP: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), (Map<String, Object>) genericObj); + case LIST: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), (List<Object>) genericObj); + default: + return null; + } + } + + // We need this for the equals method. + Object getValue() { + return genericObj; + } + + @Override + public int hashCode() { + return Objects.hash(this.valueType, this.genericObj); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof GenericSearchExtBuilder)) { + return false; + } + return Objects.equals(getValue(), ((GenericSearchExtBuilder) obj).getValue()) + && Objects.equals(valueType, ((GenericSearchExtBuilder) obj).valueType); + } +} diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index 0fbd41f062710..a99da674836f2 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -42,6 +42,7 @@ import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -59,8 +60,9 @@ /** * Defines what values to pick in the case a document contains multiple values for a particular field. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum MultiValueMode implements Writeable { /** * Pick the sum of all the values. @@ -542,7 +544,7 @@ public static MultiValueMode fromString(String sortMode) { * Return a {@link NumericDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDocValues select(final SortedNumericDocValues values) { @@ -583,12 +585,12 @@ protected long pick(SortedNumericDocValues values) throws IOException { /** * Return a {@link NumericDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + * <p> * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: SUM, AVG, MIN, MAX - * + * <p> * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -658,7 +660,7 @@ protected long pick( * Return a {@link NumericDoubleValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDoubleValues select(final SortedNumericDoubleValues values) { @@ -683,6 +685,11 @@ public boolean advanceExact(int target) throws IOException { public double doubleValue() throws IOException { return this.value; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } } @@ -694,12 +701,12 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { /** * Return a {@link NumericDoubleValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + * <p> * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: SUM, AVG, MIN, MAX - * + * <p> * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -743,6 +750,11 @@ public boolean advanceExact(int parentDoc) throws IOException { public double doubleValue() throws IOException { return lastEmittedValue; } + + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } @@ -761,7 +773,7 @@ protected double pick( * Return a {@link BinaryDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: MIN, MAX */ public BinaryDocValues select(final SortedBinaryDocValues values, final BytesRef missingValue) { @@ -816,12 +828,12 @@ protected BytesRef pick(SortedBinaryDocValues values) throws IOException { /** * Return a {@link BinaryDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + * <p> * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then <code>missingValue</code> is returned. - * + * <p> * Allowed Modes: MIN, MAX - * + * <p> * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -889,7 +901,7 @@ protected BytesRef pick( /** * Return a {@link SortedDocValues} instance that can be used to sort documents * with this mode and the provided values. - * + * <p> * Allowed Modes: MIN, MAX */ public SortedDocValues select(final SortedSetDocValues values) { @@ -949,11 +961,11 @@ protected int pick(SortedSetDocValues values) throws IOException { /** * Return a {@link SortedDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + * <p> * For every root document, the values of its inner documents will be aggregated. - * + * <p> * Allowed Modes: MIN, MAX - * + * <p> * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ diff --git a/server/src/main/java/org/opensearch/search/RescoreDocIds.java b/server/src/main/java/org/opensearch/search/RescoreDocIds.java index 880f34a924f2a..a916e96638366 100644 --- a/server/src/main/java/org/opensearch/search/RescoreDocIds.java +++ b/server/src/main/java/org/opensearch/search/RescoreDocIds.java @@ -32,6 +32,7 @@ package org.opensearch.search; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -46,8 +47,9 @@ * (i.e., documents that will be rescored by query rescorers) need to be serialized/ deserialized between search phases. * A {@link RescoreDocIds} encapsulates the top K results for each rescorer by its ordinal index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class RescoreDocIds implements Writeable { public static final RescoreDocIds EMPTY = new RescoreDocIds(Collections.emptyMap()); diff --git a/server/src/main/java/org/opensearch/search/Scroll.java b/server/src/main/java/org/opensearch/search/Scroll.java index 562979b98ec7d..3e634a378717f 100644 --- a/server/src/main/java/org/opensearch/search/Scroll.java +++ b/server/src/main/java/org/opensearch/search/Scroll.java @@ -32,10 +32,11 @@ package org.opensearch.search; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import java.io.IOException; import java.util.Objects; @@ -44,8 +45,9 @@ * A scroll enables scrolling of search request. It holds a {@link #keepAlive()} time that * will control how long to keep the scrolling resources open. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Scroll implements Writeable { private final TimeValue keepAlive; diff --git a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java index 4d86c6c2e2277..557269a1e45b1 100644 --- a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.search; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,17 +48,18 @@ * Any state needs to be serialized as part of the {@link Writeable#writeTo(StreamOutput)} method and * read from the incoming stream, usually done adding a constructor that takes {@link StreamInput} as * an argument. - * + * <p> * Registration happens through {@link SearchPlugin#getSearchExts()}, which also needs a {@link CheckedFunction} that's able to parse * the incoming request from the REST layer into the proper {@link SearchExtBuilder} subclass. - * + * <p> * {@link #getWriteableName()} must return the same name as the one used for the registration * of the {@link SearchExtSpec}. * * @see SearchExtSpec * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchExtBuilder implements NamedWriteable, ToXContentFragment { public abstract int hashCode(); diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index 4524359359d6d..6391353cfe5b1 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -37,63 +37,67 @@ import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; -import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.text.Text; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.mapper.IgnoredFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.fetch.subphase.highlight.HighlightField; import org.opensearch.search.lookup.SourceLookup; import org.opensearch.transport.RemoteClusterAware; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.opensearch.common.lucene.Lucene.readExplanation; import static org.opensearch.common.lucene.Lucene.writeExplanation; -import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; /** * A single search hit. * * @see SearchHits * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchHit implements Writeable, ToXContentObject, Iterable<DocumentField> { private final transient int docId; @@ -118,7 +122,7 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do private SearchSortValues sortValues = SearchSortValues.EMPTY; - private String[] matchedQueries = Strings.EMPTY_ARRAY; + private Map<String, Float> matchedQueries = new HashMap<>(); private Explanation explanation; @@ -201,10 +205,20 @@ public SearchHit(StreamInput in) throws IOException { sortValues = new SearchSortValues(in); size = in.readVInt(); - if (size > 0) { - matchedQueries = new String[size]; + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + if (size > 0) { + Map<String, Float> tempMap = in.readMap(StreamInput::readString, StreamInput::readFloat); + matchedQueries = tempMap.entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .collect( + Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (oldValue, newValue) -> oldValue, LinkedHashMap::new) + ); + } + } else { + matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = in.readString(); + matchedQueries.put(in.readString(), Float.NaN); } } // we call the setter here because that also sets the local index parameter @@ -222,36 +236,6 @@ public SearchHit(StreamInput in) throws IOException { } } - private Map<String, DocumentField> readFields(StreamInput in) throws IOException { - Map<String, DocumentField> fields; - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = new DocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - fields = new HashMap<>(size); - for (int i = 0; i < size; i++) { - DocumentField field = new DocumentField(in); - fields.put(field.getName(), field); - } - fields = unmodifiableMap(fields); - } - return fields; - } - - private void writeFields(StreamOutput out, Map<String, DocumentField> fields) throws IOException { - if (fields == null) { - out.writeVInt(0); - } else { - out.writeVInt(fields.size()); - for (DocumentField field : fields.values()) { - field.writeTo(out); - } - } - } - private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @Override @@ -284,11 +268,13 @@ public void writeTo(StreamOutput out) throws IOException { } sortValues.writeTo(out); - if (matchedQueries.length == 0) { - out.writeVInt(0); + out.writeVInt(matchedQueries.size()); + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + if (!matchedQueries.isEmpty()) { + out.writeMap(matchedQueries, StreamOutput::writeString, StreamOutput::writeFloat); + } } else { - out.writeVInt(matchedQueries.length); - for (String matchedFilter : matchedQueries) { + for (String matchedFilter : matchedQueries.keySet()) { out.writeString(matchedFilter); } } @@ -383,7 +369,7 @@ public BytesReference getSourceRef() { } try { - this.source = CompressorFactory.uncompressIfNeeded(this.source); + this.source = CompressorRegistry.uncompressIfNeeded(this.source); return this.source; } catch (IOException e) { throw new OpenSearchParseException("failed to decompress source", e); @@ -456,11 +442,11 @@ public DocumentField field(String fieldName) { } /* - * Adds a new DocumentField to the map in case both parameters are not null. - * */ + * Adds a new DocumentField to the map in case both parameters are not null. + * */ public void setDocumentField(String fieldName, DocumentField field) { if (fieldName == null || field == null) return; - if (documentFields.size() == 0) this.documentFields = new HashMap<>(); + if (documentFields.isEmpty()) this.documentFields = new HashMap<>(); this.documentFields.put(fieldName, field); } @@ -473,7 +459,7 @@ public DocumentField removeDocumentField(String fieldName) { * were required to be loaded. */ public Map<String, DocumentField> getFields() { - if (metaFields.size() > 0 || documentFields.size() > 0) { + if (!metaFields.isEmpty() || !documentFields.isEmpty()) { final Map<String, DocumentField> fields = new HashMap<>(); fields.putAll(metaFields); fields.putAll(documentFields); @@ -558,14 +544,45 @@ public String getClusterAlias() { } public void matchedQueries(String[] matchedQueries) { - this.matchedQueries = matchedQueries; + if (matchedQueries != null) { + for (String query : matchedQueries) { + this.matchedQueries.put(query, Float.NaN); + } + } + } + + public void matchedQueriesWithScores(Map<String, Float> matchedQueries) { + if (matchedQueries != null) { + this.matchedQueries = matchedQueries; + } } /** * The set of query and filter names the query matched with. Mainly makes sense for compound filters and queries. */ public String[] getMatchedQueries() { - return this.matchedQueries; + return matchedQueries == null ? new String[0] : matchedQueries.keySet().toArray(new String[0]); + } + + /** + * Returns the score of the provided named query if it matches. + * <p> + * If the 'include_named_queries_score' is not set, this method will return {@link Float#NaN} + * for each named query instead of a numerical score. + * </p> + * + * @param name The name of the query to retrieve the score for. + * @return The score of the named query, or {@link Float#NaN} if 'include_named_queries_score' is not set. + */ + public Float getMatchedQueryScore(String name) { + return getMatchedQueriesAndScores().get(name); + } + + /** + * @return The map of the named queries that matched and their associated score. + */ + public Map<String, Float> getMatchedQueriesAndScores() { + return matchedQueries == null ? Collections.emptyMap() : matchedQueries; } /** @@ -652,7 +669,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t for (DocumentField field : metaFields.values()) { // ignore empty metadata fields - if (field.getValues().size() == 0) { + if (field.getValues().isEmpty()) { continue; } // _ignored is the only multi-valued meta field @@ -668,10 +685,10 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } if (documentFields.isEmpty() == false && // ignore fields all together if they are all empty - documentFields.values().stream().anyMatch(df -> df.getValues().size() > 0)) { + documentFields.values().stream().anyMatch(df -> !df.getValues().isEmpty())) { builder.startObject(Fields.FIELDS); for (DocumentField field : documentFields.values()) { - if (field.getValues().size() > 0) { + if (!field.getValues().isEmpty()) { field.toXContent(builder, params); } } @@ -685,12 +702,21 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t builder.endObject(); } sortValues.toXContent(builder, params); - if (matchedQueries.length > 0) { - builder.startArray(Fields.MATCHED_QUERIES); - for (String matchedFilter : matchedQueries) { - builder.value(matchedFilter); + if (!matchedQueries.isEmpty()) { + boolean includeMatchedQueriesScore = params.paramAsBoolean(RestSearchAction.INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); + if (includeMatchedQueriesScore) { + builder.startObject(Fields.MATCHED_QUERIES); + for (Map.Entry<String, Float> entry : matchedQueries.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + } else { + builder.startArray(Fields.MATCHED_QUERIES); + for (String matchedFilter : matchedQueries.keySet()) { + builder.value(matchedFilter); + } + builder.endArray(); } - builder.endArray(); } if (getExplanation() != null) { builder.field(Fields._EXPLANATION); @@ -795,7 +821,27 @@ public static void declareInnerHitsParseFields(ObjectParser<Map<String, Object>, (p, c) -> parseInnerHits(p), new ParseField(Fields.INNER_HITS) ); - parser.declareStringArray((map, list) -> map.put(Fields.MATCHED_QUERIES, list), new ParseField(Fields.MATCHED_QUERIES)); + parser.declareField((p, map, context) -> { + XContentParser.Token token = p.currentToken(); + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + if (token == XContentParser.Token.START_OBJECT) { + String fieldName = null; + while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = p.currentName(); + } else if (token.isValue()) { + matchedQueries.put(fieldName, p.floatValue()); + } + } + } else if (token == XContentParser.Token.START_ARRAY) { + while (p.nextToken() != XContentParser.Token.END_ARRAY) { + matchedQueries.put(p.text(), Float.NaN); + } + } else { + throw new IllegalStateException("expected object or array but got [" + token + "]"); + } + map.put(Fields.MATCHED_QUERIES, matchedQueries); + }, new ParseField(Fields.MATCHED_QUERIES), ObjectParser.ValueType.OBJECT_ARRAY); parser.declareField( (map, list) -> map.put(Fields.SORT, list), SearchSortValues::fromXContent, @@ -826,7 +872,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { assert shardId.getIndexName().equals(index); searchHit.shard(new SearchShardTarget(nodeId, shardId, clusterAlias, OriginalIndices.NONE)); } else { - // these fields get set anyways when setting the shard target, + // these fields get set anyway when setting the shard target, // but we set them explicitly when we don't have enough info to rebuild the shard target searchHit.index = index; searchHit.clusterAlias = clusterAlias; @@ -840,10 +886,7 @@ public static SearchHit createFromMap(Map<String, Object> values) { searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); searchHit.explanation(get(Fields._EXPLANATION, values, null)); searchHit.setInnerHits(get(Fields.INNER_HITS, values, null)); - List<String> matchedQueries = get(Fields.MATCHED_QUERIES, values, null); - if (matchedQueries != null) { - searchHit.matchedQueries(matchedQueries.toArray(new String[0])); - } + searchHit.matchedQueriesWithScores(get(Fields.MATCHED_QUERIES, values, null)); return searchHit; } @@ -963,7 +1006,7 @@ public boolean equals(Object obj) { && Objects.equals(documentFields, other.documentFields) && Objects.equals(metaFields, other.metaFields) && Objects.equals(getHighlightFields(), other.getHighlightFields()) - && Arrays.equals(matchedQueries, other.matchedQueries) + && Objects.equals(getMatchedQueriesAndScores(), other.getMatchedQueriesAndScores()) && Objects.equals(explanation, other.explanation) && Objects.equals(shard, other.shard) && Objects.equals(innerHits, other.innerHits) @@ -983,7 +1026,7 @@ public int hashCode() { documentFields, metaFields, getHighlightFields(), - Arrays.hashCode(matchedQueries), + getMatchedQueriesAndScores(), explanation, shard, innerHits, @@ -995,8 +1038,9 @@ public int hashCode() { /** * Encapsulates the nested identity of a hit. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class NestedIdentity implements Writeable, ToXContentFragment { private static final String _NESTED = "_nested"; @@ -1035,7 +1079,7 @@ public int getOffset() { /** * Returns the next child nested level if there is any, otherwise <code>null</code> is returned. - * + * <p> * In the case of mappings with multiple levels of nested object fields */ public NestedIdentity getChild() { @@ -1113,6 +1157,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/SearchHits.java b/server/src/main/java/org/opensearch/search/SearchHits.java index c16a724eb0c75..8232643b353f5 100644 --- a/server/src/main/java/org/opensearch/search/SearchHits.java +++ b/server/src/main/java/org/opensearch/search/SearchHits.java @@ -36,10 +36,11 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -57,8 +58,9 @@ /** * Encapsulates the results of a search operation * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchHits implements Writeable, ToXContentFragment, Iterable<SearchHit> { public static SearchHits empty() { return empty(true); diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index d401a3143338b..88218896dceae 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -37,14 +37,13 @@ import org.opensearch.common.Nullable; import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.ShapesAvailability; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ParseFieldRegistry; import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.BoolQueryBuilder; @@ -327,7 +326,7 @@ public class SearchModule { /** * Constructs a new SearchModule object - * + * <p> * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided. * When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings. * @param settings Current settings @@ -1279,7 +1278,7 @@ private SearchPlugin.ExecutorServiceProvider registerIndexSearcherExecutorProvid } } - if (provider == null && FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + if (provider == null) { provider = (ThreadPool threadPool) -> threadPool.executor(INDEX_SEARCHER); } return provider; diff --git a/server/src/main/java/org/opensearch/search/SearchParseException.java b/server/src/main/java/org/opensearch/search/SearchParseException.java index ea21162dc7aea..389df1001d0c9 100644 --- a/server/src/main/java/org/opensearch/search/SearchParseException.java +++ b/server/src/main/java/org/opensearch/search/SearchParseException.java @@ -35,9 +35,9 @@ import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/SearchPhaseResult.java b/server/src/main/java/org/opensearch/search/SearchPhaseResult.java index 1b4cebbe91a3e..a351b3bd2dda6 100644 --- a/server/src/main/java/org/opensearch/search/SearchPhaseResult.java +++ b/server/src/main/java/org/opensearch/search/SearchPhaseResult.java @@ -33,13 +33,14 @@ package org.opensearch.search; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.search.fetch.FetchSearchResult; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; -import org.opensearch.transport.TransportResponse; import java.io.IOException; @@ -51,8 +52,9 @@ * across search phases to ensure the same point in time snapshot is used for querying and * fetching etc. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchPhaseResult extends TransportResponse { private SearchShardTarget searchShardTarget; diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index c9c70ed52c376..62eb597e387e6 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.DeletePitInfo; @@ -53,24 +52,27 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -85,9 +87,7 @@ import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.SearchOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.node.ResponseCollectorService; import org.opensearch.script.FieldScript; @@ -146,6 +146,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -249,7 +250,21 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final Setting<Boolean> CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( "search.concurrent_segment_search.enabled", - true, + false, + Property.Dynamic, + Property.NodeScope + ); + + // settings to configure maximum slice created per search request using OS custom slice computation mechanism. Default lucene + // mechanism will not be used if this setting is set with value > 0 + public static final String CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY = "search.concurrent.max_slice_count"; + public static final int CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE = 0; + + // value == 0 means lucene slice computation will be used + public static final Setting<Integer> CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING = Setting.intSetting( + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, Property.Dynamic, Property.NodeScope ); @@ -1223,6 +1238,7 @@ private void processFailure(ReaderContext context, Exception exc) { private void parseSource(DefaultSearchContext context, SearchSourceBuilder source, boolean includeAggregations) { // nothing to parse... if (source == null) { + context.evaluateRequestShouldUseConcurrentSearch(); return; } SearchShardTarget shardTarget = context.shardTarget(); @@ -1258,6 +1274,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } } context.trackScores(source.trackScores()); + context.includeNamedQueriesScore(source.includeNamedQueriesScore()); if (source.trackTotalHitsUpTo() != null && source.trackTotalHitsUpTo() != SearchContext.TRACK_TOTAL_HITS_ACCURATE && context.scrollContext() != null) { @@ -1269,9 +1286,6 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.minScore() != null) { context.minimumScore(source.minScore()); } - if (source.profile()) { - context.setProfilers(new Profilers(context.searcher())); - } if (source.timeout() != null) { context.timeout(source.timeout()); } @@ -1405,6 +1419,10 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc final CollapseContext collapseContext = source.collapse().build(queryShardContext); context.collapse(collapseContext); } + context.evaluateRequestShouldUseConcurrentSearch(); + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher(), context.shouldUseConcurrentSearch())); + } } /** @@ -1542,17 +1560,29 @@ private CanMatchResponse canMatch(ShardSearchRequest request, boolean checkRefre canMatch = aliasFilterCanMatch; } final FieldDoc searchAfterFieldDoc = getSearchAfterFieldDoc(request, context); - canMatch = canMatch && canMatchSearchAfter(searchAfterFieldDoc, minMax, sortBuilder); + final Integer trackTotalHitsUpto = request.source() == null ? null : request.source().trackTotalHitsUpTo(); + canMatch = canMatch && canMatchSearchAfter(searchAfterFieldDoc, minMax, sortBuilder, trackTotalHitsUpto); return new CanMatchResponse(canMatch || hasRefreshPending, minMax); } } } - public static boolean canMatchSearchAfter(FieldDoc searchAfter, MinAndMax<?> minMax, FieldSortBuilder primarySortField) { + public static boolean canMatchSearchAfter( + FieldDoc searchAfter, + MinAndMax<?> minMax, + FieldSortBuilder primarySortField, + Integer trackTotalHitsUpto + ) { // Check for sort.missing == null, since in case of missing values sort queries, if segment/shard's min/max // is out of search_after range, it still should be printed and hence we should not skip segment/shard. - if (searchAfter != null && minMax != null && primarySortField != null && primarySortField.missing() == null) { + // Skipping search on shard/segment entirely can cause mismatch on total_tracking_hits, hence skip only if + // track_total_hits is false. + if (searchAfter != null + && minMax != null + && primarySortField != null + && primarySortField.missing() == null + && Objects.equals(trackTotalHitsUpto, SearchContext.TRACK_TOTAL_HITS_DISABLED)) { final Object searchAfterPrimary = searchAfter.fields[0]; if (primarySortField.order() == SortOrder.DESC) { if (minMax.compareMin(searchAfterPrimary) > 0) { diff --git a/server/src/main/java/org/opensearch/search/SearchShardTarget.java b/server/src/main/java/org/opensearch/search/SearchShardTarget.java index 882ba8e069c2e..80b4feda374c6 100644 --- a/server/src/main/java/org/opensearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/opensearch/search/SearchShardTarget.java @@ -34,6 +34,7 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -47,8 +48,9 @@ /** * The target that the search request was executed on. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchShardTarget implements Writeable, Comparable<SearchShardTarget> { private final Text nodeId; diff --git a/server/src/main/java/org/opensearch/search/SearchSortValues.java b/server/src/main/java/org/opensearch/search/SearchSortValues.java index 398d593d5db0d..cbc3900f72f79 100644 --- a/server/src/main/java/org/opensearch/search/SearchSortValues.java +++ b/server/src/main/java/org/opensearch/search/SearchSortValues.java @@ -33,10 +33,11 @@ package org.opensearch.search; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -50,8 +51,9 @@ /** * Values to sort during search * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchSortValues implements ToXContentFragment, Writeable { private static final Object[] EMPTY_ARRAY = new Object[0]; diff --git a/server/src/main/java/org/opensearch/search/SearchSortValuesAndFormats.java b/server/src/main/java/org/opensearch/search/SearchSortValuesAndFormats.java index 1013a2bc4b36c..f9174e92b79e8 100644 --- a/server/src/main/java/org/opensearch/search/SearchSortValuesAndFormats.java +++ b/server/src/main/java/org/opensearch/search/SearchSortValuesAndFormats.java @@ -33,10 +33,11 @@ package org.opensearch.search; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import java.io.IOException; import java.math.BigInteger; @@ -45,8 +46,9 @@ /** * Doc value and formats to sort during search * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchSortValuesAndFormats implements Writeable { private final Object[] rawSortValues; private final Object[] formattedSortValues; diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregation.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregation.java index b723e14a84cbe..b41da3910030a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregation.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregation.java @@ -31,6 +31,7 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; @@ -40,8 +41,9 @@ /** * An aggregation. Extends {@link ToXContent} as it makes it easier to print out its content. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Aggregation extends ToXContentFragment { /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilder.java index ab5cdb83d0153..878f81001344c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationBuilder.java @@ -31,10 +31,11 @@ package org.opensearch.search.aggregations; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryRewriteContext; @@ -50,8 +51,9 @@ /** * A factory that knows how to create an {@link Aggregator} of a specific type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AggregationBuilder implements NamedWriteable, @@ -168,8 +170,9 @@ public PipelineTree buildPipelineTree() { * Unlike {@link CardinalityUpperBound} which is <strong>total</strong> * instead of <strong>per parent bucket</strong>. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum BucketCardinality { NONE, ONE, @@ -193,6 +196,6 @@ public static final class CommonFields extends ParseField.CommonFields { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java index 1f60ff6503ca8..0bb2d1d7ca933 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java @@ -12,21 +12,21 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.common.CheckedFunction; import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.profile.query.InternalProfileCollector; import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.List; /** * Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global * aggregation operators + * + * @opensearch.internal */ -class AggregationCollectorManager implements CollectorManager<Collector, ReduceableSearchResult> { - private final SearchContext context; +public abstract class AggregationCollectorManager implements CollectorManager<Collector, ReduceableSearchResult> { + protected final SearchContext context; private final CheckedFunction<SearchContext, List<Aggregator>, IOException> aggProvider; private final String collectorReason; @@ -42,12 +42,18 @@ class AggregationCollectorManager implements CollectorManager<Collector, Reducea @Override public Collector newCollector() throws IOException { - final Collector collector = createCollector(context, aggProvider.apply(context), collectorReason); + final Collector collector = createCollector(aggProvider.apply(context)); // For Aggregations we should not have a NO_OP_Collector assert collector != BucketCollector.NO_OP_COLLECTOR; return collector; } + public String getCollectorReason() { + return collectorReason; + } + + public abstract String getCollectorName(); + @Override public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IOException { final List<Aggregator> aggregators = context.bucketCollectorProcessor().toAggregators(collectors); @@ -63,31 +69,16 @@ public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IO } final InternalAggregations internalAggregations = InternalAggregations.from(internals); - // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce iff multiple slices - // were created to execute this request and it used concurrent segment search path - // TODO: Add the check for flag that the request was executed using concurrent search - if (collectors.size() > 1) { - // using reduce is fine here instead of topLevelReduce as pipeline aggregation is evaluated on the coordinator after all - // documents are collected across shards for an aggregation - return new AggregationReduceableSearchResult( - InternalAggregations.reduce(Collections.singletonList(internalAggregations), context.partialOnShard()) - ); - } else { - return new AggregationReduceableSearchResult(internalAggregations); - } + return buildAggregationResult(internalAggregations); + } + + protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { + return new AggregationReduceableSearchResult(internalAggregations); } - static Collector createCollector(SearchContext context, List<Aggregator> collectors, String reason) throws IOException { + static Collector createCollector(List<Aggregator> collectors) throws IOException { Collector collector = MultiBucketCollector.wrap(collectors); ((BucketCollector) collector).preCollection(); - if (context.getProfilers() != null) { - collector = new InternalProfileCollector( - collector, - reason, - // TODO: report on child aggs as well - Collections.emptyList() - ); - } return collector; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationProcessor.java index 5b3e2f2542dc2..25fea6ea2c1dc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationProcessor.java @@ -8,11 +8,15 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.internal.SearchContext; /** * Interface to define different stages of aggregation processing before and after document collection + * + * @opensearch.api */ +@PublicApi(since = "2.9.0") public interface AggregationProcessor { /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregations.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregations.java index 575b8bfdf6e94..90d77d5516415 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregations.java @@ -31,8 +31,9 @@ package org.opensearch.search.aggregations; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ /** * Represents a set of {@link Aggregation}s * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Aggregations implements Iterable<Aggregation>, ToXContentFragment { public static final String AGGREGATIONS_FIELD = "aggregations"; diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java index e157a8fe5ff8a..8744d1f6a07d3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java @@ -33,11 +33,12 @@ package org.opensearch.search.aggregations; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lease.Releasable; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -57,6 +58,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public abstract class Aggregator extends BucketCollector implements Releasable { /** @@ -64,9 +66,10 @@ public abstract class Aggregator extends BucketCollector implements Releasable { * * @see AggregationBuilder * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface Parser { /** * Returns the aggregator factory with which this parser is associated, may return {@code null} indicating the @@ -160,9 +163,10 @@ public BucketComparator bucketComparator(String key, SortOrder order) { /** * Compare two buckets by their ordinal. * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface BucketComparator { /** * Compare two buckets by their ordinal. diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index 1d315980512b4..47e9def094623 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ScoreMode; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -135,7 +135,7 @@ public ScoreMode scoreMode() { * Returns a converter for point values if it's safe to use the indexed data instead of * doc values. Generally, this means that the query has no filters or scripts, the aggregation is * top level, and the underlying field is indexed, and the index is sorted in the right order. - * + * <p> * If those conditions aren't met, return <code>null</code> to indicate a point reader cannot * be used in this case. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index 04fa34466e0ff..eeb0c606694b0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -32,13 +32,14 @@ package org.opensearch.search.aggregations; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.SuggestingErrorOnUnknown; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.SuggestingErrorOnUnknown; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -82,8 +83,9 @@ /** * An immutable collection of {@link AggregatorFactories}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregatorFactories { public static final Pattern VALID_AGG_NAME = Pattern.compile("[^\\[\\]>]+"); @@ -257,6 +259,15 @@ private AggregatorFactories(AggregatorFactory[] factories) { this.factories = factories; } + public boolean allFactoriesSupportConcurrentSearch() { + for (AggregatorFactory factory : factories) { + if (factory.supportsConcurrentSegmentSearch() == false || factory.evaluateChildFactories() == false) { + return false; + } + } + return true; + } + /** * Create all aggregators so that they can be consumed with multiple * buckets. @@ -331,8 +342,9 @@ public int countAggregators() { * A mutable collection of {@link AggregationBuilder}s and * {@link PipelineAggregationBuilder}s. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder implements Writeable, ToXContentObject { private final Set<String> names = new HashSet<>(); @@ -591,7 +603,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java index 05686f35c2166..6cc3a78fb1e36 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.internal.SearchContext; @@ -43,8 +44,9 @@ /** * Base factory to instantiate an internal aggregator * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class AggregatorFactory { protected final String name; protected final AggregatorFactory parent; @@ -114,4 +116,15 @@ public AggregatorFactory getParent() { public String getStatsSubtype() { return OTHER_SUBTYPE; } + + /** + * Implementation should override this method and return true if the Aggregator created by the factory works with concurrent segment search execution model + */ + protected boolean supportsConcurrentSegmentSearch() { + return false; + } + + public boolean evaluateChildFactories() { + return factories.allFactoriesSupportConcurrentSearch(); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java index 352ecf8bc94ad..135fda71a757a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.MultiCollector; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.MinimumScoreCollector; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.InternalProfileCollector; @@ -26,7 +27,10 @@ /** * Processor to perform collector level processing specific to {@link BucketCollector} in different stages like: a) PostCollection * after search on each leaf is completed and b) process the collectors to perform reduce after collection is completed + * + * @opensearch.api */ +@PublicApi(since = "2.10.0") public class BucketCollectorProcessor { /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/BucketOrder.java b/server/src/main/java/org/opensearch/search/aggregations/BucketOrder.java index 637829ea5127f..56f3820a60348 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/BucketOrder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/BucketOrder.java @@ -31,10 +31,10 @@ package org.opensearch.search.aggregations; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.opensearch.search.aggregations.support.AggregationPath; @@ -172,6 +172,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/CardinalityUpperBound.java b/server/src/main/java/org/opensearch/search/aggregations/CardinalityUpperBound.java index 15beeac82930a..8407a5eb6d3c0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/CardinalityUpperBound.java +++ b/server/src/main/java/org/opensearch/search/aggregations/CardinalityUpperBound.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.aggregations.bucket.BucketsAggregator; import org.opensearch.search.aggregations.bucket.filter.FilterAggregator; import org.opensearch.search.aggregations.bucket.range.RangeAggregator; @@ -42,8 +43,9 @@ * Upper bound of how many {@code owningBucketOrds} that an {@link Aggregator} * will have to collect into. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class CardinalityUpperBound { /** * {@link Aggregator}s with this cardinality won't collect any data at diff --git a/server/src/main/java/org/opensearch/search/aggregations/ConcurrentAggregationProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/ConcurrentAggregationProcessor.java index 336ad8739eb41..fbeb583984ac5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/ConcurrentAggregationProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/ConcurrentAggregationProcessor.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.Query; import org.opensearch.common.lucene.search.Queries; import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.InternalProfileCollectorManager; import org.opensearch.search.profile.query.InternalProfileComponent; import org.opensearch.search.query.QueryPhaseExecutionException; @@ -65,12 +64,12 @@ public void postProcess(SearchContext context) { try { if (globalCollectorManager != null) { Query query = context.buildFilteredQuery(Queries.newMatchAllQuery()); - globalCollectorManager = new InternalProfileCollectorManager( - globalCollectorManager, - CollectorResult.REASON_AGGREGATION_GLOBAL, - Collections.emptyList() - ); if (context.getProfilers() != null) { + globalCollectorManager = new InternalProfileCollectorManager( + globalCollectorManager, + ((AggregationCollectorManager) globalCollectorManager).getCollectorReason(), + Collections.emptyList() + ); context.getProfilers().addQueryProfiler().setCollector((InternalProfileComponent) globalCollectorManager); } final ReduceableSearchResult result = context.searcher().search(query, globalCollectorManager); diff --git a/server/src/main/java/org/opensearch/search/aggregations/DefaultAggregationProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/DefaultAggregationProcessor.java index 24b05ebcf3a61..c674f4b9b673a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/DefaultAggregationProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/DefaultAggregationProcessor.java @@ -11,10 +11,11 @@ import org.apache.lucene.search.Query; import org.opensearch.common.lucene.search.Queries; import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.profile.query.InternalProfileComponent; +import org.opensearch.search.profile.query.InternalProfileCollector; import org.opensearch.search.query.QueryPhaseExecutionException; import java.io.IOException; +import java.util.Collections; import java.util.List; /** @@ -74,7 +75,13 @@ public void postProcess(SearchContext context) { if (context.getProfilers() != null) { context.getProfilers() .addQueryProfiler() - .setCollector((InternalProfileComponent) globalCollectorManager.newCollector()); + .setCollector( + new InternalProfileCollector( + globalCollectorManager.newCollector(), + globalCollectorManager.getCollectorReason(), + Collections.emptyList() + ) + ); } context.searcher().search(query, globalCollectorManager.newCollector()); globalCollectorManager.reduce(List.of()).reduce(context.queryResult()); diff --git a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java index 56f53a57a8573..8814cc3c435e1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java @@ -14,6 +14,7 @@ import org.opensearch.search.profile.query.CollectorResult; import java.io.IOException; +import java.util.Collections; import java.util.Objects; /** @@ -22,10 +23,12 @@ public class GlobalAggCollectorManager extends AggregationCollectorManager { private Collector collector; + private final String collectorName; public GlobalAggCollectorManager(SearchContext context) throws IOException { super(context, context.aggregations().factories()::createTopLevelGlobalAggregators, CollectorResult.REASON_AGGREGATION_GLOBAL); collector = Objects.requireNonNull(super.newCollector(), "collector instance is null"); + collectorName = collector.toString(); } @Override @@ -38,4 +41,18 @@ public Collector newCollector() throws IOException { return super.newCollector(); } } + + @Override + protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { + // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices + // were created so that we can apply shard level bucket count thresholds in the reduce phase. + return new AggregationReduceableSearchResult( + InternalAggregations.reduce(Collections.singletonList(internalAggregations), context.partialOnShard()) + ); + } + + @Override + public String getCollectorName() { + return collectorName; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManagerWithSingleCollector.java b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManagerWithSingleCollector.java index f126f27c68855..973749c0d5189 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManagerWithSingleCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManagerWithSingleCollector.java @@ -26,10 +26,12 @@ public class GlobalAggCollectorManagerWithSingleCollector extends AggregationCollectorManager { private final Collector collector; + private final String collectorName; public GlobalAggCollectorManagerWithSingleCollector(SearchContext context) throws IOException { super(context, context.aggregations().factories()::createTopLevelGlobalAggregators, CollectorResult.REASON_AGGREGATION_GLOBAL); collector = Objects.requireNonNull(super.newCollector(), "collector instance is null"); + collectorName = collector.toString(); } @Override @@ -42,4 +44,9 @@ public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IO assert collectors.isEmpty() : "Reduce on GlobalAggregationCollectorManagerWithCollector called with non-empty collectors"; return super.reduce(List.of(collector)); } + + @Override + public String getCollectorName() { + return collectorName; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java index c6d86316fa230..49b85ccaea2a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java @@ -31,15 +31,18 @@ package org.opensearch.search.aggregations; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.ScriptService; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.aggregations.support.AggregationPath; @@ -59,14 +62,16 @@ /** * An internal implementation of {@link Aggregation}. Serves as a base class for all aggregation implementations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class InternalAggregation implements Aggregation, NamedWriteable { /** * Builds {@link ReduceContext}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface ReduceContextBuilder { /** * Build a {@linkplain ReduceContext} to perform a partial reduction. @@ -82,8 +87,9 @@ public interface ReduceContextBuilder { /** * The reduce context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ReduceContext { private final BigArrays bigArrays; private final ScriptService scriptService; @@ -160,6 +166,18 @@ public boolean isSliceLevel() { return this.isSliceLevel; } + /** + * For slice level partial reduce we will apply shard level `shard_size` and `shard_min_doc_count` limits + * whereas for coordinator level partial reduce it will use top level `size` and `min_doc_count` + */ + public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { + if (isSliceLevel()) { + return new LocalBucketCountThresholds(bucketCountThresholds.getShardMinDocCount(), bucketCountThresholds.getShardSize()); + } else { + return new LocalBucketCountThresholds(bucketCountThresholds.getMinDocCount(), bucketCountThresholds.getRequiredSize()); + } + } + public BigArrays bigArrays() { return bigArrays; } @@ -380,7 +398,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index e4d64e1e8517c..9d55ee4a04506 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -53,8 +54,9 @@ /** * An internal implementation of {@link Aggregations}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InternalAggregations extends Aggregations implements Writeable { public static final InternalAggregations EMPTY = new InternalAggregations(Collections.emptyList()); @@ -125,7 +127,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator<AggregationPa * Begin the reduction process. This should be the entry point for the "first" reduction, e.g. called by * SearchPhaseController or anywhere else that wants to initiate a reduction. It _should not_ be called * as an intermediate reduction step (e.g. in the middle of an aggregation tree). - * + * <p> * This method first reduces the aggregations, and if it is the final reduce, then reduce the pipeline * aggregations (both embedded parent/sibling as well as top-level sibling pipelines) */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/opensearch/search/aggregations/InternalOrder.java index 091f1797d8f78..13d0abd0176df 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalOrder.java @@ -31,11 +31,11 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.util.Comparators; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.util.Comparators; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java index 83a94afbe8e02..1461dd3009b44 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java @@ -32,16 +32,19 @@ package org.opensearch.search.aggregations; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.bucket.BucketsAggregator; import java.io.IOException; +import java.util.concurrent.atomic.LongAdder; import java.util.function.IntConsumer; /** @@ -121,19 +124,43 @@ protected void metadataToXContent(XContentBuilder builder, Params params) throws * It is used by aggregators to limit the number of bucket creation during * {@link Aggregator#buildAggregations} and {@link InternalAggregation#reduce}. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class MultiBucketConsumer implements IntConsumer { private final int limit; private final CircuitBreaker breaker; - // aggregations execute in a single thread so no atomic here + // aggregations execute in a single thread for both sequential + // and concurrent search, so no atomic here private int count; - private int callCount = 0; + + // will be updated by multiple threads in concurrent search + // hence making it as LongAdder + private final LongAdder callCount; + private volatile boolean circuitBreakerTripped; + private final int availProcessors; public MultiBucketConsumer(int limit, CircuitBreaker breaker) { this.limit = limit; this.breaker = breaker; + callCount = new LongAdder(); + availProcessors = Runtime.getRuntime().availableProcessors(); + } + + // only visible for testing + protected MultiBucketConsumer( + int limit, + CircuitBreaker breaker, + LongAdder callCount, + boolean circuitBreakerTripped, + int availProcessors + ) { + this.limit = limit; + this.breaker = breaker; + this.callCount = callCount; + this.circuitBreakerTripped = circuitBreakerTripped; + this.availProcessors = availProcessors; } @Override @@ -153,10 +180,27 @@ public void accept(int value) { ); } } - // check parent circuit breaker every 1024 calls - callCount++; - if ((callCount & 0x3FF) == 0) { - breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + callCount.increment(); + // tripping the circuit breaker for other threads in case of concurrent search + // if the circuit breaker has tripped for one of the threads already, more info + // can be found on: https://github.com/opensearch-project/OpenSearch/issues/7785 + if (circuitBreakerTripped) { + throw new CircuitBreakingException( + "Circuit breaker for this consumer has already been tripped by previous invocations. " + + "This can happen in case of concurrent segment search when multiple threads are " + + "executing the request and one of the thread has already tripped the circuit breaker", + breaker.getDurability() + ); + } + // check parent circuit breaker every 1024 to (1024 + available processors) calls + long sum = callCount.sum(); + if ((sum >= 1024) && (sum & 0x3FF) <= availProcessors) { + try { + breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } catch (CircuitBreakingException e) { + circuitBreakerTripped = true; + throw e; + } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java index 3729734c48ed7..8b0a1530b5505 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java @@ -14,6 +14,7 @@ import org.opensearch.search.profile.query.CollectorResult; import java.io.IOException; +import java.util.Collections; import java.util.Objects; /** @@ -22,10 +23,12 @@ public class NonGlobalAggCollectorManager extends AggregationCollectorManager { private Collector collector; + private final String collectorName; public NonGlobalAggCollectorManager(SearchContext context) throws IOException { super(context, context.aggregations().factories()::createTopLevelNonGlobalAggregators, CollectorResult.REASON_AGGREGATION); collector = Objects.requireNonNull(super.newCollector(), "collector instance is null"); + collectorName = collector.toString(); } @Override @@ -38,4 +41,18 @@ public Collector newCollector() throws IOException { return super.newCollector(); } } + + @Override + protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { + // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices + // were created so that we can apply shard level bucket count thresholds in the reduce phase. + return new AggregationReduceableSearchResult( + InternalAggregations.reduce(Collections.singletonList(internalAggregations), context.partialOnShard()) + ); + } + + @Override + public String getCollectorName() { + return collectorName; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManagerWithSingleCollector.java b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManagerWithSingleCollector.java index 433f6b6a05b22..a6eb00f2d70f7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManagerWithSingleCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManagerWithSingleCollector.java @@ -26,10 +26,12 @@ public class NonGlobalAggCollectorManagerWithSingleCollector extends AggregationCollectorManager { private final Collector collector; + private final String collectorName; public NonGlobalAggCollectorManagerWithSingleCollector(SearchContext context) throws IOException { super(context, context.aggregations().factories()::createTopLevelNonGlobalAggregators, CollectorResult.REASON_AGGREGATION); collector = Objects.requireNonNull(super.newCollector(), "collector instance is null"); + collectorName = collector.toString(); } @Override @@ -42,4 +44,9 @@ public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IO assert collectors.isEmpty() : "Reduce on NonGlobalAggregationCollectorManagerWithCollector called with non-empty collectors"; return super.reduce(List.of(collector)); } + + @Override + public String getCollectorName() { + return collectorName; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/PipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/PipelineAggregationBuilder.java index e71ea1054be9e..7ff823dbe57f0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/PipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/PipelineAggregationBuilder.java @@ -33,9 +33,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ValidateActions; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.Rewriteable; @@ -54,8 +55,9 @@ * A factory that knows how to create an {@link PipelineAggregator} of a * specific type. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class PipelineAggregationBuilder implements NamedWriteable, @@ -288,7 +290,7 @@ public PipelineAggregationBuilder subAggregations(Builder subFactories) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/SearchContextAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/SearchContextAggregations.java index 16339713dc83e..1cf054a7450bc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/SearchContextAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/SearchContextAggregations.java @@ -31,13 +31,16 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.annotation.PublicApi; + import static org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; /** * The aggregation context that is part of the search context. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchContextAggregations { private final AggregatorFactories factories; private final MultiBucketConsumer multiBucketConsumer; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index 2e7c4659bcb00..223be3ba2d1ae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -90,7 +90,7 @@ static class Entry { protected PackedLongValues.Builder docDeltasBuilder; protected PackedLongValues.Builder bucketsBuilder; protected long maxBucket = -1; - protected boolean finished = false; + protected boolean finished; protected LongHash selectedBuckets; /** @@ -101,6 +101,9 @@ static class Entry { public BestBucketsDeferringCollector(SearchContext context, boolean isGlobal) { this.searchContext = context; this.isGlobal = isGlobal; + // a postCollection call is not made by the IndexSearcher when there are no segments. + // In this case init the collector as finished. + this.finished = context.searcher().getLeafContexts().isEmpty(); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 5c335dfffeaa7..eef427754f535 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasable; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorBase; @@ -132,7 +132,7 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do /** * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(long[])} to merge the actual * ordinals and doc ID deltas. - * + * <p> * Refer to that method for documentation about the merge map. * * @deprecated use {@link mergeBuckets(long, LongUnaryOperator)} @@ -146,7 +146,7 @@ public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + * <p> * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(LongUnaryOperator)} to * merge the actual ordinals and doc ID deltas. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/DeferableBucketAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/DeferableBucketAggregator.java index 1ff065f39f359..5f2dc58236341 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/DeferableBucketAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/DeferableBucketAggregator.java @@ -34,8 +34,8 @@ import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; -import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.BucketCollector; +import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.MultiBucketCollector; import org.opensearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java new file mode 100644 index 0000000000000..e587b7f169e5f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -0,0 +1,497 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.FieldExistsQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Rounding; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.query.DateRangeIncludingNowQuery; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; +import org.opensearch.search.aggregations.bucket.composite.RoundingValuesSource; +import org.opensearch.search.aggregations.bucket.histogram.LongBounds; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalLong; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * Utility class to help rewrite aggregations into filters. + * Instead of aggregation collects documents one by one, filter may count all documents that match in one pass. + * <p> + * Currently supported rewrite: + * <ul> + * <li> date histogram : date range filter. + * Applied: DateHistogramAggregator, AutoDateHistogramAggregator, CompositeAggregator </li> + * </ul> + * + * @opensearch.internal + */ +public final class FastFilterRewriteHelper { + + private FastFilterRewriteHelper() {} + + private static final Logger logger = LogManager.getLogger(FastFilterRewriteHelper.class); + + private static final int MAX_NUM_FILTER_BUCKETS = 1024; + private static final Map<Class<?>, Function<Query, Query>> queryWrappers; + + // Initialize the wrapper map for unwrapping the query + static { + queryWrappers = new HashMap<>(); + queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); + queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); + queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); + queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); + } + + /** + * Recursively unwraps query into the concrete form + * for applying the optimization + */ + private static Query unwrapIntoConcreteQuery(Query query) { + while (queryWrappers.containsKey(query.getClass())) { + query = queryWrappers.get(query.getClass()).apply(query); + } + + return query; + } + + /** + * Finds the global min and max bounds of the field for the shard across all segments + * + * @return null if the field is empty or not indexed + */ + private static long[] getShardBounds(final SearchContext context, final String fieldName) throws IOException { + final List<LeafReaderContext> leaves = context.searcher().getIndexReader().leaves(); + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + for (LeafReaderContext leaf : leaves) { + final PointValues values = leaf.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } + return new long[] { min, max }; + } + + /** + * Finds the min and max bounds of the field for the segment + * + * @return null if the field is empty or not indexed + */ + private static long[] getSegmentBounds(final LeafReaderContext context, final String fieldName) throws IOException { + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + final PointValues values = context.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) { + return null; + } + return new long[] { min, max }; + } + + /** + * This method also acts as a pre-condition check for the optimization + * + * @return null if the processed query not as expected + */ + public static long[] getDateHistoAggBounds(final SearchContext context, final String fieldName) throws IOException { + final Query cq = unwrapIntoConcreteQuery(context.query()); + if (cq instanceof PointRangeQuery) { + final PointRangeQuery prq = (PointRangeQuery) cq; + final long[] indexBounds = getShardBounds(context, fieldName); + if (indexBounds == null) return null; + return getBoundsWithRangeQuery(prq, fieldName, indexBounds); + } else if (cq instanceof MatchAllDocsQuery) { + return getShardBounds(context, fieldName); + } else if (cq instanceof FieldExistsQuery) { + // when a range query covers all values of a shard, it will be rewrite field exists query + if (((FieldExistsQuery) cq).getField().equals(fieldName)) { + return getShardBounds(context, fieldName); + } + } + + return null; + } + + private static long[] getBoundsWithRangeQuery(PointRangeQuery prq, String fieldName, long[] indexBounds) { + // Ensure that the query and aggregation are on the same field + if (prq.getField().equals(fieldName)) { + // Minimum bound for aggregation is the max between query and global + long lower = Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]); + // Maximum bound for aggregation is the min between query and global + long upper = Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]); + if (lower > upper) { + return null; + } + return new long[] { lower, upper }; + } + + return null; + } + + /** + * Creates the date range filters for aggregations using the interval, min/max + * bounds and prepared rounding + */ + private static Weight[] createFilterForAggregations( + final SearchContext context, + final DateFieldMapper.DateFieldType fieldType, + final long interval, + final Rounding.Prepared preparedRounding, + long low, + final long high + ) throws IOException { + // Calculate the number of buckets using range and interval + long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + long prevRounded = roundedLow; + int bucketCount = 0; + while (roundedLow <= fieldType.convertNanosToMillis(high)) { + bucketCount++; + if (bucketCount > MAX_NUM_FILTER_BUCKETS) { + logger.debug("Max number of filters reached [{}], skip the fast filter optimization", MAX_NUM_FILTER_BUCKETS); + return null; + } + // Below rounding is needed as the interval could return in + // non-rounded values for something like calendar month + roundedLow = preparedRounding.round(roundedLow + interval); + if (prevRounded == roundedLow) break; // prevents getting into an infinite loop + prevRounded = roundedLow; + } + + Weight[] filters = null; + if (bucketCount > 0) { + filters = new Weight[bucketCount]; + roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + + int i = 0; + while (i < bucketCount) { + // Calculate the lower bucket bound + final byte[] lower = new byte[8]; + NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); + + // Calculate the upper bucket bound + roundedLow = preparedRounding.round(roundedLow + interval); + final byte[] upper = new byte[8]; + NumericUtils.longToSortableBytes(i + 1 == bucketCount ? high : + // Subtract -1 if the minimum is roundedLow as roundedLow itself + // is included in the next bucket + fieldType.convertRoundedMillisToNanos(roundedLow) - 1, upper, 0); + + filters[i++] = context.searcher().createWeight(new PointRangeQuery(fieldType.name(), lower, upper, 1) { + @Override + protected String toString(int dimension, byte[] value) { + return Long.toString(LongPoint.decodeDimension(value, 0)); + } + }, ScoreMode.COMPLETE_NO_SCORES, 1); + } + } + + return filters; + } + + /** + * Context object for fast filter optimization + * <p> + * Usage: first set aggregation type, then check isRewriteable, then buildFastFilter + */ + public static class FastFilterContext { + private boolean rewriteable = false; + private Weight[] filters = null; + private boolean filtersBuiltAtShardLevel = false; + + private AggregationType aggregationType; + private final SearchContext context; + + public FastFilterContext(SearchContext context) { + this.context = context; + } + + public AggregationType getAggregationType() { + return aggregationType; + } + + public void setAggregationType(AggregationType aggregationType) { + this.aggregationType = aggregationType; + } + + public boolean isRewriteable(final Object parent, final int subAggLength) { + boolean rewriteable = aggregationType.isRewriteable(parent, subAggLength); + logger.debug("Fast filter rewriteable: {} for shard {}", rewriteable, context.indexShard().shardId()); + this.rewriteable = rewriteable; + return rewriteable; + } + + public void buildFastFilter() throws IOException { + assert filters == null : "Filters should only be built once, but they are already built"; + this.filters = this.aggregationType.buildFastFilter(context); + if (filters != null) { + logger.debug("Fast filter built for shard {}", context.indexShard().shardId()); + filtersBuiltAtShardLevel = true; + } + } + + /** + * Built filters for a segment + */ + public Weight[] buildFastFilter(LeafReaderContext leaf) throws IOException { + Weight[] filters = this.aggregationType.buildFastFilter(leaf, context); + if (filters != null) { + logger.debug("Fast filter built for shard {} segment {}", context.indexShard().shardId(), leaf.ord); + } + return filters; + } + } + + /** + * Different types have different pre-conditions, filter building logic, etc. + */ + interface AggregationType { + + boolean isRewriteable(Object parent, int subAggLength); + + Weight[] buildFastFilter(SearchContext ctx) throws IOException; + + Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext ctx) throws IOException; + + default int getSize() { + return Integer.MAX_VALUE; + } + } + + /** + * For date histogram aggregation + */ + public static abstract class AbstractDateHistogramAggregationType implements AggregationType { + private final MappedFieldType fieldType; + private final boolean missing; + private final boolean hasScript; + private LongBounds hardBounds; + + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + this.fieldType = fieldType; + this.missing = missing; + this.hasScript = hasScript; + } + + public AbstractDateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + this(fieldType, missing, hasScript); + this.hardBounds = hardBounds; + } + + @Override + public boolean isRewriteable(Object parent, int subAggLength) { + if (parent == null && subAggLength == 0 && !missing && !hasScript) { + if (fieldType != null && fieldType instanceof DateFieldMapper.DateFieldType) { + return fieldType.isSearchable(); + } + } + return false; + } + + @Override + public Weight[] buildFastFilter(SearchContext context) throws IOException { + long[] bounds = getDateHistoAggBounds(context, fieldType.name()); + logger.debug("Bounds are {} for shard {}", bounds, context.indexShard().shardId()); + return buildFastFilter(context, bounds); + } + + @Override + public Weight[] buildFastFilter(LeafReaderContext leaf, SearchContext context) throws IOException { + long[] bounds = getSegmentBounds(leaf, fieldType.name()); + logger.debug("Bounds are {} for shard {} segment {}", bounds, context.indexShard().shardId(), leaf.ord); + return buildFastFilter(context, bounds); + } + + private Weight[] buildFastFilter(SearchContext context, long[] bounds) throws IOException { + bounds = processHardBounds(bounds); + if (bounds == null) { + return null; + } + assert bounds[0] <= bounds[1] : "Low bound should be less than high bound"; + + final Rounding rounding = getRounding(bounds[0], bounds[1]); + final OptionalLong intervalOpt = Rounding.getInterval(rounding); + if (intervalOpt.isEmpty()) { + return null; + } + final long interval = intervalOpt.getAsLong(); + + // process the after key of composite agg + processAfterKey(bounds, interval); + + return FastFilterRewriteHelper.createFilterForAggregations( + context, + (DateFieldMapper.DateFieldType) fieldType, + interval, + getRoundingPrepared(), + bounds[0], + bounds[1] + ); + } + + protected abstract Rounding getRounding(final long low, final long high); + + protected abstract Rounding.Prepared getRoundingPrepared(); + + protected void processAfterKey(long[] bound, long interval) {} + + protected long[] processHardBounds(long[] bounds) { + if (bounds != null) { + // Update min/max limit if user specified any hard bounds + if (hardBounds != null) { + if (hardBounds.getMin() > bounds[0]) { + bounds[0] = hardBounds.getMin(); + } + if (hardBounds.getMax() - 1 < bounds[1]) { + bounds[1] = hardBounds.getMax() - 1; // hard bounds max is exclusive + } + if (bounds[0] > bounds[1]) { + return null; + } + } + } + return bounds; + } + + public DateFieldMapper.DateFieldType getFieldType() { + assert fieldType instanceof DateFieldMapper.DateFieldType; + return (DateFieldMapper.DateFieldType) fieldType; + } + } + + public static boolean isCompositeAggRewriteable(CompositeValuesSourceConfig[] sourceConfigs) { + return sourceConfigs.length == 1 && sourceConfigs[0].valuesSource() instanceof RoundingValuesSource; + } + + public static long getBucketOrd(long bucketOrd) { + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + } + + return bucketOrd; + } + + /** + * Try to get the bucket doc counts from the fast filters for the aggregation + * <p> + * Usage: invoked at segment level — in getLeafCollector of aggregator + * + * @param incrementDocCount takes in the bucket key value and the bucket count + */ + public static boolean tryFastFilterAggregation( + final LeafReaderContext ctx, + FastFilterContext fastFilterContext, + final BiConsumer<Long, Integer> incrementDocCount + ) throws IOException { + if (fastFilterContext == null) return false; + if (!fastFilterContext.rewriteable) { + return false; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + logger.debug( + "Shard {} segment {} has at least one document with _doc_count field, skip fast filter optimization", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + return false; + } + + // if no filters built at shard level (see getDateHistoAggBounds method for possible reasons) + // check if the query is functionally match-all at segment level + if (!fastFilterContext.filtersBuiltAtShardLevel && !segmentMatchAll(fastFilterContext.context, ctx)) { + return false; + } + Weight[] filters = fastFilterContext.filters; + if (filters == null) { + logger.debug( + "Shard {} segment {} functionally match all documents. Build the fast filter", + fastFilterContext.context.indexShard().shardId(), + ctx.ord + ); + filters = fastFilterContext.buildFastFilter(ctx); + if (filters == null) { + return false; + } + } + + final int[] counts = new int[filters.length]; + int i; + for (i = 0; i < filters.length; i++) { + counts[i] = filters[i].count(ctx); + if (counts[i] == -1) { + // Cannot use the optimization if any of the counts + // is -1 indicating the segment might have deleted documents + return false; + } + } + + int s = 0; + int size = fastFilterContext.aggregationType.getSize(); + for (i = 0; i < filters.length; i++) { + if (counts[i] > 0) { + long bucketKey = i; // the index of filters is the key for filters aggregation + if (fastFilterContext.aggregationType instanceof AbstractDateHistogramAggregationType) { + final DateFieldMapper.DateFieldType fieldType = + ((AbstractDateHistogramAggregationType) fastFilterContext.aggregationType).getFieldType(); + bucketKey = fieldType.convertNanosToMillis( + NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) + ); + } + incrementDocCount.accept(bucketKey, counts[i]); + s++; + if (s > size) { + break; + } + } + } + + logger.debug("Fast filter optimization applied to shard {} segment {}", fastFilterContext.context.indexShard().shardId(), ctx.ord); + return true; + } + + private static boolean segmentMatchAll(SearchContext ctx, LeafReaderContext leafCtx) throws IOException { + Weight weight = ctx.searcher().createWeight(ctx.query(), ScoreMode.COMPLETE_NO_SCORES, 1f); + return weight != null && weight.count(leafCtx) == leafCtx.reader().numDocs(); + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index d37780f9808dc..dfb8f6be7155d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -37,9 +37,9 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeDocValue; import org.opensearch.common.util.OpenSearchSloppyMath; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.xcontent.ObjectParser.ValueType; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.geometry.Rectangle; import java.io.IOException; @@ -101,7 +101,7 @@ private GeoTileUtils() {} /** * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. - * + * <p> * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). * * @param parser {@link XContentParser} to parse the value from diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/LocalBucketCountThresholds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/LocalBucketCountThresholds.java new file mode 100644 index 0000000000000..9d8654565b391 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/LocalBucketCountThresholds.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; + +/** + * BucketCountThresholds type that holds the local (either shard level or request level) bucket count thresholds in minDocCount and requireSize fields. + * Similar to {@link TermsAggregator.BucketCountThresholds} however only provides getters for the local members and no setters. + * + * @opensearch.api + */ +@PublicApi(since = "1.0.0") +public class LocalBucketCountThresholds { + + private final long minDocCount; + private final int requiredSize; + + public LocalBucketCountThresholds(long localminDocCount, int localRequiredSize) { + this.minDocCount = localminDocCount; + this.requiredSize = localRequiredSize; + } + + public int getRequiredSize() { + return requiredSize; + } + + public long getMinDocCount() { + return minDocCount; + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 8d35c1edc8cb0..3e9424eda92a9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -55,16 +55,17 @@ public MergingBucketsDeferringCollector(SearchContext context, boolean isGlobal) /** * Merges/prunes the existing bucket ordinals and docDeltas according to the provided mergeMap. - * + * <p> * The mergeMap is an array where the index position represents the current bucket ordinal, and * the value at that position represents the ordinal the bucket should be merged with. If * the value is set to -1 it is removed entirely. - * + * <p> * For example, if the mergeMap [1,1,3,-1,3] is provided: - * - Buckets `0` and `1` will be merged to bucket ordinal `1` - * - Bucket `2` and `4` will be merged to ordinal `3` - * - Bucket `3` will be removed entirely - * + * <ul> + * <li> Buckets `0` and `1` will be merged to bucket ordinal `1`</li> + * <li> Bucket `2` and `4` will be merged to ordinal `3`</li> + * <li> Bucket `3` will be removed entirely</li> + * </ul> * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. * @@ -80,7 +81,7 @@ public void mergeBuckets(long[] mergeMap) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + * <p> * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index 5b8f38b9873ea..ef1795f425240 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -35,11 +35,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.xcontent.ObjectParser.NamedObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index fe1270e10c80e..99ffb563ba2a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -91,4 +91,8 @@ public Aggregator createInternal( return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java index 917105f4b7011..54fd261f88c35 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -39,9 +39,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.StringFieldType; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 5e8791441d83a..e57acba5bc0ad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -44,7 +44,9 @@ import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregatorFactory; import org.opensearch.search.aggregations.bucket.nested.NestedAggregatorFactory; +import org.opensearch.search.aggregations.bucket.nested.ReverseNestedAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import java.io.IOException; @@ -240,14 +242,16 @@ public BucketCardinality bucketCardinality() { * this aggregator or the instance of the parent's factory that is incompatible with * the composite aggregation. */ - private AggregatorFactory checkParentIsNullOrNested(AggregatorFactory factory) { + private static AggregatorFactory checkParentIsSafe(AggregatorFactory factory) { if (factory == null) { return null; - } else if (factory instanceof NestedAggregatorFactory) { - return checkParentIsNullOrNested(factory.getParent()); - } else { - return factory; - } + } else if (factory instanceof NestedAggregatorFactory + || factory instanceof FilterAggregatorFactory + || factory instanceof ReverseNestedAggregatorFactory) { + return checkParentIsSafe(factory.getParent()); + } else { + return factory; + } } private static void validateSources(List<CompositeValuesSourceBuilder<?>> sources) { @@ -278,7 +282,7 @@ protected AggregatorFactory doBuild( AggregatorFactory parent, AggregatorFactories.Builder subfactoriesBuilder ) throws IOException { - AggregatorFactory invalid = checkParentIsNullOrNested(parent); + AggregatorFactory invalid = checkParentIsSafe(parent); if (invalid != null) { throw new IllegalArgumentException( "[composite] aggregation cannot be used with a parent aggregation of" diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 09691a69c75f4..4af14ab014db5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -77,4 +77,10 @@ protected Aggregator createInternal( ) throws IOException { return new CompositeAggregator(name, factories, searchContext, parent, metadata, size, sources, afterKey); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return false; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 8b487fc499602..b97c814cdf645 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; -import org.opensearch.lucene.queries.SearchAfterSortedDocQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.CollectionTerminatedException; @@ -46,6 +45,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -56,9 +56,12 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RoaringDocIdSet; +import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; import org.opensearch.index.IndexSortConfig; +import org.opensearch.lucene.queries.SearchAfterSortedDocQuery; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -70,7 +73,9 @@ import org.opensearch.search.aggregations.MultiBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; +import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.searchafter.SearchAfterBuilder; import org.opensearch.search.sort.SortAndFormats; @@ -79,6 +84,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.LongUnaryOperator; @@ -110,6 +116,10 @@ final class CompositeAggregator extends BucketsAggregator { private boolean earlyTerminated; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + private LongKeyedBucketOrds bucketOrds = null; + private Rounding.Prepared preparedRounding = null; + CompositeAggregator( String name, AggregatorFactories factories, @@ -153,12 +163,64 @@ final class CompositeAggregator extends BucketsAggregator { } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.rawAfterKey = rawAfterKey; + + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + if (!FastFilterRewriteHelper.isCompositeAggRewriteable(sourceConfigs)) return; + fastFilterContext.setAggregationType(new CompositeAggregationType()); + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + // bucketOrds is used for saving date histogram results + bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), CardinalityUpperBound.ONE); + preparedRounding = ((CompositeAggregationType) fastFilterContext.getAggregationType()).getRoundingPrepared(); + fastFilterContext.buildFastFilter(); + } + } + + /** + * Currently the filter rewrite is only supported for date histograms + */ + private class CompositeAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + private final RoundingValuesSource valuesSource; + private long afterKey = -1L; + + public CompositeAggregationType() { + super(sourceConfigs[0].fieldType(), sourceConfigs[0].missingBucket(), sourceConfigs[0].hasScript()); + this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); + if (rawAfterKey != null) { + assert rawAfterKey.size() == 1 && formats.size() == 1; + this.afterKey = formats.get(0).parseLong(rawAfterKey.get(0).toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); + } + } + + public Rounding getRounding(final long low, final long high) { + return valuesSource.getRounding(); + } + + public Rounding.Prepared getRoundingPrepared() { + return valuesSource.getPreparedRounding(); + } + + @Override + protected void processAfterKey(long[] bound, long interval) { + // afterKey is the last bucket key in previous response, and the bucket key + // is the minimum of all values in the bucket, so need to add the interval + if (afterKey != -1L) { + bound[0] = afterKey + interval; + } + } + + @Override + public int getSize() { + return size; + } } @Override protected void doClose() { try { Releasables.close(queue); + Releasables.close(bucketOrds); } finally { Releasables.close(sources); } @@ -186,12 +248,14 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } int num = Math.min(size, queue.size()); - final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + long[] bucketOrdsToCollect = new long[queue.size()]; for (int i = 0; i < queue.size(); i++) { bucketOrdsToCollect[i] = i; } InternalAggregations[] subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect); + while (queue.size() > 0) { int slot = queue.pop(); CompositeKey key = queue.toCompositeKey(slot); @@ -207,6 +271,43 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I aggs ); } + + // Build results from fast filters optimization + if (bucketOrds != null) { + // CompositeKey is the value of bucket key + final Map<CompositeKey, InternalComposite.InternalBucket> bucketMap = new HashMap<>(); + // Some segments may not be optimized, so buckets may contain results from the queue. + for (InternalComposite.InternalBucket internalBucket : buckets) { + bucketMap.put(internalBucket.getRawKey(), internalBucket); + } + // Loop over the buckets in the bucketOrds, and populate the map accordingly + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(0); + while (ordsEnum.next()) { + Long bucketKeyValue = ordsEnum.value(); + CompositeKey key = new CompositeKey(bucketKeyValue); + if (bucketMap.containsKey(key)) { + long docCount = bucketDocCount(ordsEnum.ord()) + bucketMap.get(key).getDocCount(); + bucketMap.get(key).setDocCount(docCount); + } else { + InternalComposite.InternalBucket bucket = new InternalComposite.InternalBucket( + sourceNames, + formats, + key, + reverseMuls, + missingOrders, + bucketDocCount(ordsEnum.ord()), + buildEmptySubAggregations() + ); + bucketMap.put(key, bucket); + } + } + // since a map is not sorted structure, sort it before transform back to buckets + List<InternalComposite.InternalBucket> bucketList = new ArrayList<>(bucketMap.values()); + CollectionUtil.introSort(bucketList, InternalComposite.InternalBucket::compareKey); + buckets = bucketList.subList(0, Math.min(size, bucketList.size())).toArray(InternalComposite.InternalBucket[]::new); + num = buckets.length; + } + CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; return new InternalAggregation[] { new InternalComposite( @@ -295,7 +396,7 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException if (indexSortField.getReverse() != (source.reverseMul == -1)) { if (i == 0) { - // the leading index sort matches the leading source field but the order is reversed + // the leading index sort matches the leading source field, but the order is reversed, // so we don't check the other sources. return new Sort(indexSortField); } @@ -303,8 +404,8 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException } sortFields.add(indexSortField); if (sourceConfig.valuesSource() instanceof RoundingValuesSource) { - // the rounding "squashes" many values together, that breaks the ordering of sub-values - // so we ignore subsequent source even if they match the index sort. + // the rounding "squashes" many values together, that breaks the ordering of sub-values, + // so we ignore the subsequent sources even if they match the index sort. break; } } @@ -354,8 +455,8 @@ public int hashCode() { } @Override - public FieldComparator<?> getComparator(int numHits, boolean enableSkipping) { - return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), false) { + public FieldComparator<?> getComparator(int numHits, Pruning pruning) { + return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), Pruning.NONE) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new LongLeafComparator(context) { @@ -447,6 +548,16 @@ private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) t @Override protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + finishLeaf(); boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR; @@ -476,9 +587,10 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucket docIdSetBuilder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); } if (rawAfterKey != null && sortPrefixLen > 0) { - // We have an after key and index sort is applicable so we jump directly to the doc - // that is after the index sort prefix using the rawAfterKey and we start collecting - // document from there. + // We have an after key and index sort is applicable, so we jump directly to the doc + // after the index sort prefix using the rawAfterKey and we start collecting + // documents from there. + assert indexSortPrefix != null; processLeafFromQuery(ctx, indexSortPrefix); throw new CollectionTerminatedException(); } else { @@ -506,6 +618,8 @@ public void collect(int doc, long bucket) throws IOException { try { long docCount = docCountProvider.getDocCount(doc); if (queue.addIfCompetitive(indexSortPrefix, docCount)) { + // one doc may contain multiple values, we iterate over and collect one by one + // so the same doc can appear multiple times here if (builder != null && lastDoc != doc) { builder.add(doc); lastDoc = doc; @@ -568,7 +682,7 @@ private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollec @Override public void collect(int doc, long zeroBucket) throws IOException { assert zeroBucket == 0; - Integer slot = queue.compareCurrent(); + Integer slot = queue.getCurrentSlot(); if (slot != null) { // The candidate key is a top bucket. // We can defer the collection of this document/bucket to the sub collector diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java index 5ddeb22d33a6f..338ebdc66eef7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java @@ -44,7 +44,7 @@ * * @opensearch.internal */ -class CompositeKey implements Writeable { +public class CompositeKey implements Writeable { private final Comparable[] values; CompositeKey(Comparable... values) { @@ -64,11 +64,11 @@ Comparable[] values() { return values; } - int size() { + public int size() { return values.length; } - Comparable get(int pos) { + public Comparable get(int pos) { assert pos < values.length; return values[pos]; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 11fc267e7d745..2c4d451322bca 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -35,10 +35,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.util.PriorityQueue; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.LongArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.LongArray; import org.opensearch.search.aggregations.LeafBucketCollector; import java.io.IOException; @@ -47,6 +47,8 @@ /** * A specialized {@link PriorityQueue} implementation for composite buckets. + * Can think of this as a max heap that holds the top small buckets slots in order. + * Each slot holds the values of the composite bucket key it represents. * * @opensearch.internal */ @@ -77,7 +79,7 @@ public int hashCode() { private final BigArrays bigArrays; private final int maxSize; - private final Map<Slot, Integer> map; + private final Map<Slot, Integer> map; // to quickly find the slot for a value private final SingleDimensionValuesSource<?>[] arrays; private LongArray docCounts; @@ -108,7 +110,7 @@ public int hashCode() { @Override protected boolean lessThan(Integer a, Integer b) { - return compare(a, b) > 0; + return compare(a, b) > 0; // max heap } /** @@ -119,10 +121,10 @@ boolean isFull() { } /** - * Compares the current candidate with the values in the queue and returns + * Try to get the slot of the current/candidate values in the queue and returns * the slot if the candidate is already in the queue or null if the candidate is not present. */ - Integer compareCurrent() { + Integer getCurrentSlot() { return map.get(new Slot(CANDIDATE_SLOT)); } @@ -281,32 +283,34 @@ boolean addIfCompetitive(long inc) { */ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { // checks if the candidate key is competitive - Integer topSlot = compareCurrent(); - if (topSlot != null) { + Integer curSlot = getCurrentSlot(); + if (curSlot != null) { // this key is already in the top N, skip it - docCounts.increment(topSlot, inc); + docCounts.increment(curSlot, inc); return true; } + if (afterKeyIsSet) { int cmp = compareCurrentWithAfter(); if (cmp <= 0) { if (indexSortSourcePrefix < 0 && cmp == indexSortSourcePrefix) { - // the leading index sort is in the reverse order of the leading source + // the leading index sort is and the leading source order are both reversed, // so we can early terminate when we reach a document that is smaller // than the after key (collected on a previous page). throw new CollectionTerminatedException(); } - // key was collected on a previous page, skip it (>= afterKey). + // the key was collected on a previous page, skip it. return false; } } + + // the heap is full, check if the candidate key larger than max heap top if (size() >= maxSize) { - // the tree map is full, check if the candidate key should be kept int cmp = compare(CANDIDATE_SLOT, top()); if (cmp > 0) { if (cmp <= indexSortSourcePrefix) { - // index sort guarantees that there is no key greater or equal than the - // current one in the subsequent documents so we can early terminate. + // index sort guarantees the following documents will have a key larger than the current candidate, + // so we can early terminate. throw new CollectionTerminatedException(); } // the candidate key is not competitive, skip it. @@ -324,7 +328,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { } else { newSlot = size(); } - // move the candidate key to its new slot + // move the candidate key to its new slot by copy its values to the new slot copyCurrent(newSlot, inc); map.put(new Slot(newSlot), newSlot); add(newSlot); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 788a4ddc15374..5289b3a34ab34 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -156,7 +156,7 @@ public MissingOrder missingOrder() { /** * Returns true if the source contains a script that can change the value. */ - protected boolean hasScript() { + public boolean hasScript() { return hasScript; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 3ef1488ab1c2f..3926ce9bbecb7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -33,12 +33,12 @@ package org.opensearch.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; -import org.opensearch.core.ParseField; import org.opensearch.common.Rounding; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -216,7 +216,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + * <p> * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -229,7 +229,7 @@ public DateHistogramValuesSourceBuilder calendarInterval(DateHistogramInterval i /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + * <p> * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation @@ -298,7 +298,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { // TODO once composite is plugged in to the values source registry or at least understands Date values source types use it // here Rounding.Prepared preparedRounding = rounding.prepareForUnknown(); - RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding); + RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding, rounding); // is specified in the builder. final DocValueFormat docValueFormat = format == null ? DocValueFormat.RAW : valuesSourceConfig.format(); final MappedFieldType fieldType = valuesSourceConfig.fieldType(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java index 10eebe204d601..970f07b6a9d74 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -36,10 +36,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index 6331496196b2a..3e5c53d470f79 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -39,9 +39,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.StringFieldType; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index a6872e5278588..208b8e12fe948 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; +import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 27619044d8995..43f1ad32a66f4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -142,10 +142,10 @@ public String getWriteableName() { @Override public InternalComposite create(List<InternalBucket> newBuckets) { - /** - * This is used by pipeline aggregations to filter/remove buckets so we - * keep the <code>afterKey</code> of the original aggregation in order - * to be able to retrieve the next page even if all buckets have been filtered. + /* + This is used by pipeline aggregations to filter/remove buckets so we + keep the <code>afterKey</code> of the original aggregation in order + to be able to retrieve the next page even if all buckets have been filtered. */ return new InternalComposite( name, @@ -339,7 +339,7 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern KeyComparable<InternalBucket> { private final CompositeKey key; - private final long docCount; + private long docCount; private final InternalAggregations aggregations; private final transient int[] reverseMuls; private final transient MissingOrder[] missingOrders; @@ -436,6 +436,10 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + @Override public Aggregations getAggregations() { return aggregations; @@ -473,8 +477,8 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java index 8b162ffaf8604..48e080c1576dd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -45,10 +45,10 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java index 8382b191025fe..2a1544e218f2c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -70,9 +70,9 @@ public static ParsedComposite fromXContent(XContentParser parser, String name) t ParsedComposite aggregation = PARSER.parse(parser, null); aggregation.setName(name); if (aggregation.afterKey == null && aggregation.getBuckets().size() > 0) { - /** - * Previous versions (< 6.3) don't send <code>afterKey</code> - * in the response so we set it as the last returned buckets. + /* + Previous versions (< 6.3) don't send <code>afterKey</code> + in the response so we set it as the last returned buckets. */ aggregation.setAfterKey(aggregation.getBuckets().get(aggregation.getBuckets().size() - 1).key); } @@ -130,8 +130,8 @@ void setKey(Map<String, Object> key) { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java index 3d6730203b6ae..dc130eb54c0ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -68,6 +68,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade // no value for the field return DocIdSet.EMPTY; } + long lowerBucket = Long.MIN_VALUE; Comparable lowerValue = queue.getLowerValueLeadSource(); if (lowerValue != null) { @@ -76,7 +77,6 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } lowerBucket = (Long) lowerValue; } - long upperBucket = Long.MAX_VALUE; Comparable upperValue = queue.getUpperValueLeadSource(); if (upperValue != null) { @@ -85,6 +85,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } upperBucket = (Long) upperValue; } + DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), values, field) : null; Visitor visitor = new Visitor(context, queue, builder, values.getBytesPerDimension(), lowerBucket, upperBucket); try { @@ -146,6 +147,7 @@ public void visit(int docID, byte[] packedValue) throws IOException { } long bucket = bucketFunction.applyAsLong(packedValue); + // process previous bucket when new bucket appears if (first == false && bucket != lastBucket) { final DocIdSet docIdSet = bucketDocsBuilder.build(); if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && @@ -182,13 +184,13 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue return PointValues.Relation.CELL_OUTSIDE_QUERY; } } - if (upperBucket != Long.MAX_VALUE) { long minBucket = bucketFunction.applyAsLong(minPackedValue); if (minBucket > upperBucket) { return PointValues.Relation.CELL_OUTSIDE_QUERY; } } + return PointValues.Relation.CELL_CROSSES_QUERY; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 89315724ff9ed..3f5cf919f1755 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -47,17 +47,19 @@ * * @opensearch.internal */ -class RoundingValuesSource extends ValuesSource.Numeric { +public class RoundingValuesSource extends ValuesSource.Numeric { private final ValuesSource.Numeric vs; - private final Rounding.Prepared rounding; + private final Rounding.Prepared preparedRounding; + private final Rounding rounding; /** - * - * @param vs The original values source - * @param rounding How to round the values + * @param vs The original values source + * @param preparedRounding How to round the values + * @param rounding The rounding strategy */ - RoundingValuesSource(Numeric vs, Rounding.Prepared rounding) { + RoundingValuesSource(Numeric vs, Rounding.Prepared preparedRounding, Rounding rounding) { this.vs = vs; + this.preparedRounding = preparedRounding; this.rounding = rounding; } @@ -71,8 +73,16 @@ public boolean isBigInteger() { return false; } + public Rounding.Prepared getPreparedRounding() { + return preparedRounding; + } + + public Rounding getRounding() { + return rounding; + } + public long round(long value) { - return rounding.round(value); + return preparedRounding.round(value); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index 9f48b54350e25..fe0801d6d230e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -36,8 +36,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.opensearch.common.Nullable; -import org.opensearch.common.util.BigArrays; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.BigArrays; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.LeafBucketCollector; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java index bd0a4f13ddf08..9442529bf9342 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -62,7 +62,7 @@ abstract class SortedDocsProducer { * Visits all non-deleted documents in <code>iterator</code> and fills the provided <code>queue</code> * with the top composite buckets extracted from the collection. * Documents that contain a top composite bucket are added in the provided <code>builder</code> if it is not null. - * + * <p> * Returns true if the queue is full and the current <code>leadSourceBucket</code> did not produce any competitive * composite buckets. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 21b9419e8f93a..f82e0efc4c31e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/UnsignedLongValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/UnsignedLongValuesSource.java index a45f1a9a5840b..797b61f46240e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/UnsignedLongValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/UnsignedLongValuesSource.java @@ -15,10 +15,10 @@ import org.apache.lucene.search.Query; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Numbers; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 4ab573cf0a6b6..db21b384c77ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -56,7 +56,7 @@ public class FilterAggregatorFactory extends AggregatorFactory { private Weight weight; - private Query filter; + private final Query filter; public FilterAggregatorFactory( String name, @@ -75,7 +75,7 @@ public FilterAggregatorFactory( * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. - * + * <p> * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ @@ -85,7 +85,7 @@ public Weight getWeight() { try { weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { - throw new AggregationInitializationException("Failed to initialse filter", e); + throw new AggregationInitializationException("Failed to initialise filter", e); } } return weight; @@ -98,7 +98,11 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map<String, Object> metadata ) throws IOException { - return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, cardinality, metadata); + return new FilterAggregator(name, this::getWeight, factories, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregator.java index 01777940009d5..7b86d0ed15cf8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -35,11 +35,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 795f81a08d8d5..a8e157a1cbb79 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -92,7 +92,7 @@ public FiltersAggregatorFactory( * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. - * + * <p> * Note: With concurrent segment search use case, multiple aggregation collectors executing * on different threads will try to fetch the weights. To handle the race condition there is * a synchronization block @@ -146,4 +146,8 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java index 419ae9f16d9e6..47de1fcda29c9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java @@ -82,4 +82,9 @@ public Aggregator createInternal( } return new GlobalAggregator(name, factories, searchContext, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 882b18364f0a7..40944cdd92dee 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.core.ParseField; import org.opensearch.common.Rounding; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index 556ff1838cc66..12aefc540e75c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -33,14 +33,16 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.Prepared; -import org.opensearch.core.common.util.ByteArray; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.util.ByteArray; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -51,6 +53,7 @@ import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator; import org.opensearch.search.aggregations.bucket.DeferringBucketCollector; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.MergingBucketsDeferringCollector; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; @@ -128,6 +131,10 @@ static AutoDateHistogramAggregator build( protected final RoundingInfo[] roundingInfos; protected final int targetBuckets; + protected int roundingIdx; + protected Rounding.Prepared preparedRounding; + + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; private AutoDateHistogramAggregator( String name, @@ -148,8 +155,59 @@ private AutoDateHistogramAggregator( this.formatter = valuesSourceConfig.format(); this.roundingInfos = roundingInfos; this.roundingPreparer = roundingPreparer; + this.preparedRounding = prepareRounding(0); + + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + fastFilterContext.setAggregationType( + new AutoHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null + ) + ); + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter(); + } + } + + private class AutoHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public AutoHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + super(fieldType, missing, hasScript); + } + + @Override + protected Rounding getRounding(final long low, final long high) { + // max - min / targetBuckets = bestDuration + // find the right innerInterval this bestDuration belongs to + // since we cannot exceed targetBuckets, bestDuration should go up, + // so the right innerInterval should be an upper bound + long bestDuration = (high - low) / targetBuckets; + // reset so this function is idempotent + roundingIdx = 0; + while (roundingIdx < roundingInfos.length - 1) { + final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; + final int temp = curRoundingInfo.innerIntervals[curRoundingInfo.innerIntervals.length - 1]; + // If the interval duration is covered by the maximum inner interval, + // we can start with this outer interval for creating the buckets + if (bestDuration <= temp * curRoundingInfo.roughEstimateDurationMillis) { + break; + } + roundingIdx++; + } + + preparedRounding = prepareRounding(roundingIdx); + return roundingInfos[roundingIdx].rounding; + } + + @Override + protected Prepared getRoundingPrepared() { + return preparedRounding; + } } + protected abstract LongKeyedBucketOrds getBucketOrds(); + @Override public final ScoreMode scoreMode() { if (valuesSource != null && valuesSource.needsScores()) { @@ -176,7 +234,25 @@ public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBuc if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - return getLeafCollector(valuesSource.longValues(ctx), sub); + + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(getBucketOrds().add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + + final SortedNumericDocValues values = valuesSource.longValues(ctx); + final LeafBucketCollector iteratingCollector = getLeafCollector(values, sub); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + iteratingCollector.collect(doc, owningBucketOrd); + } + }; } protected final InternalAggregation[] buildAggregations( @@ -247,8 +323,6 @@ protected final void merge(long[] mergeMap, long newNumBuckets) { * @opensearch.internal */ private static class FromSingle extends AutoDateHistogramAggregator { - private int roundingIdx; - private Rounding.Prepared preparedRounding; /** * Map from value to bucket ordinals. * <p> @@ -286,10 +360,14 @@ private static class FromSingle extends AutoDateHistogramAggregator { metadata ); - preparedRounding = prepareRounding(0); bucketOrds = new LongKeyedBucketOrds.FromSingle(context.bigArrays()); } + @Override + protected LongKeyedBucketOrds getBucketOrds() { + return bucketOrds; + } + @Override protected LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) throws IOException { return new LeafBucketCollectorBase(sub, values) { @@ -507,6 +585,11 @@ private static class FromMany extends AutoDateHistogramAggregator { liveBucketCountUnderestimate = context.bigArrays().newIntArray(1, true); } + @Override + protected LongKeyedBucketOrds getBucketOrds() { + return bucketOrds; + } + @Override protected LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) throws IOException { return new LeafBucketCollectorBase(sub, values) { @@ -546,7 +629,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket()} rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index 7434ef84ee92f..059b88c9475ed 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -124,4 +124,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 6e7fc962a692a..a978b5cfa3e3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.search.aggregations.bucket.histogram; import org.opensearch.common.Rounding; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -245,7 +245,7 @@ public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterv /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + * <p> * This is mutually exclusive with {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -258,7 +258,7 @@ public DateHistogramAggregationBuilder calendarInterval(DateHistogramInterval in /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + * <p> * This is mutually exclusive with {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 040621ce8ec34..0e830106c8284 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,20 +33,23 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; -import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; @@ -66,7 +69,6 @@ * @opensearch.internal */ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAggregator { - private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; @@ -76,13 +78,13 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg private final Rounding.Prepared preparedRounding; private final BucketOrder order; private final boolean keyed; - private final long minDocCount; private final LongBounds extendedBounds; private final LongBounds hardBounds; - private final LongKeyedBucketOrds bucketOrds; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + DateHistogramAggregator( String name, AggregatorFactories factories, @@ -99,7 +101,6 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg CardinalityUpperBound cardinality, Map<String, Object> metadata ) throws IOException { - super(name, factories, aggregationContext, parent, CardinalityUpperBound.MANY, metadata); this.rounding = rounding; this.preparedRounding = preparedRounding; @@ -114,6 +115,36 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg this.formatter = valuesSourceConfig.format(); bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); + + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(context); + fastFilterContext.setAggregationType( + new DateHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null, + hardBounds + ) + ); + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter(); + } + } + + private class DateHistogramAggregationType extends FastFilterRewriteHelper.AbstractDateHistogramAggregationType { + + public DateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript, LongBounds hardBounds) { + super(fieldType, missing, hasScript, hardBounds); + } + + @Override + protected Rounding getRounding(long low, long high) { + return rounding; + } + + @Override + protected Rounding.Prepared getRoundingPrepared() { + return preparedRounding; + } } @Override @@ -129,6 +160,17 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + SortedNumericDocValues values = valuesSource.longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override @@ -216,9 +258,7 @@ public void collectDebugInfo(BiConsumer<String, Object> add) { } /** - * Returns the size of the bucket in specified units. - * - * If unitSize is null, returns 1.0 + * @return the size of the bucket in specified units, or 1.0 if unitSize is null */ @Override public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index dd74d83c665de..807ec1ab4e4b7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -148,4 +148,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 9e8509e257bc6..9f907bcacadf9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -33,11 +33,11 @@ package org.opensearch.search.aggregations.bucket.histogram; import org.opensearch.common.Rounding; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -129,7 +129,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Converts this DateHistogramInterval into a millisecond representation. If this is a calendar * interval, it is an approximation of milliseconds based on the fixed equivalent (e.g. `1h` is treated as 60 * fixed minutes, rather than the hour at a specific point in time. - * + * <p> * This is merely a convenience helper for quick comparisons and should not be used for situations that * require precise durations. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java index a2c63cf25c0c2..1d21152b6f622 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java @@ -34,7 +34,7 @@ /** * A shared interface for aggregations that parse and use "interval" parameters. - * + * <p> * Provides definitions for the new fixed and calendar intervals, and deprecated * defintions for the old interval/dateHisto interval parameters * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index bb325a737721c..83c20ba1c1d04 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -32,15 +32,15 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.core.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.DateTimeUnit; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -53,13 +53,13 @@ /** * A class that handles all the parsing, bwc and deprecations surrounding date histogram intervals. - * + * <p> * - Provides parser helpers for the deprecated interval/dateHistogramInterval parameters. * - Provides parser helpers for the new calendar/fixed interval parameters * - Can read old intervals from a stream and convert to new intervals * - Can write new intervals to old format when streaming out * - Provides a variety of helper methods to interpret the intervals as different types, depending on caller's need - * + * <p> * After the deprecated parameters are removed, this class can be simplified greatly. The legacy options * will be removed, and the mutual-exclusion checks can be done in the setters directly removing the need * for the enum and the complicated "state machine" logic @@ -220,7 +220,7 @@ public DateHistogramInterval getAsCalendarInterval() { /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + * <p> * This is mutually exclusive with {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} * * @param interval The fixed interval to use @@ -250,7 +250,7 @@ public DateHistogramInterval getAsFixedInterval() { /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + * <p> * This is mutually exclusive with {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java index 69c70ed1bf7fd..235ae8fcee6d7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java @@ -49,7 +49,7 @@ /** * Represent hard_bounds and extended_bounds in histogram aggregations. - * + * <p> * This class is similar to {@link LongBounds} used in date histograms, but is using longs to store data. LongBounds and DoubleBounds are * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 321c16cdba970..7506dcde23641 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -149,4 +149,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 52f689eb7c229..8ebd67bc1ebe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -491,7 +491,7 @@ private void mergeBucketsWithPlan(List<Bucket> buckets, List<BucketRange> plan, * Makes a merge plan by simulating the merging of the two closest buckets, until the target number of buckets is reached. * Distance is determined by centroid comparison. * Then, this plan is actually executed and the underlying buckets are merged. - * + * <p> * Requires: <code>buckets</code> is sorted by centroid. */ private void mergeBucketsIfNeeded(List<Bucket> buckets, int targetNumBuckets, ReduceContext reduceContext) { @@ -567,7 +567,7 @@ private void mergeBucketsWithSameMin(List<Bucket> buckets, ReduceContext reduceC /** * When two adjacent buckets A, B overlap (A.max > B.min) then their boundary is set to * the midpoint: (A.max + B.min) / 2. - * + * <p> * After this adjustment, A will contain more values than indicated and B will have less. */ private void adjustBoundsForOverlappingBuckets(List<Bucket> buckets, ReduceContext reduceContext) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java index 478be3633cb21..ad6572916c84a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java @@ -33,8 +33,8 @@ package org.opensearch.search.aggregations.bucket.histogram; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.ParseField; import org.opensearch.common.Rounding; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -54,7 +54,7 @@ /** * Represent hard_bounds and extended_bounds in date-histogram aggregations. - * + * <p> * This class is similar to {@link DoubleBounds} used in histograms, but is using longs to store data. LongBounds and DoubleBounds are * * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index 66ff6baed66ec..94d88906b45ad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations.bucket.histogram; +import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 803c90be467b1..526945243c786 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -37,10 +37,10 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.InPlaceMergeSorter; import org.opensearch.common.Nullable; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.DoubleArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.DoubleArray; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -76,7 +76,7 @@ public class VariableWidthHistogramAggregator extends DeferableBucketAggregator /** * This aggregator goes through multiple phases of collection. Each phase has a different CollectionPhase::collectValue * implementation - * + * <p> * Running a clustering algorithm like K-Means is unfeasible because large indices don't fit into memory. * But having multiple collection phases lets us accurately bucket the docs in one pass. */ @@ -231,7 +231,7 @@ protected void swap(int i, int j) { * Produces a merge map where `mergeMap[i]` represents the index that <code>values[i]</code> * would be moved to <b>if</b> <code>values</code> were sorted * In other words, this method produces a merge map that will sort <code>values</code> - * + * <p> * See BucketsAggregator::mergeBuckets to learn more about the merge map */ public long[] generateMergeMap() { @@ -242,10 +242,10 @@ public long[] generateMergeMap() { /** * Sorting the documents by key lets us bucket the documents into groups with a single linear scan - * + * <p> * But we can't do this by just sorting <code>buffer</code>, because we also need to generate a merge map * for every change we make to the list, so that we can apply the changes to the underlying buckets as well. - * + * <p> * By just creating a merge map, we eliminate the need to actually sort <code>buffer</code>. We can just * use the merge map to find any doc's sorted index. */ @@ -347,7 +347,7 @@ private void createAndAppendNewCluster(double value) { /** * Move the last cluster to position <code>idx</code> * This is expensive because a merge map of size <code>numClusters</code> is created, so don't call this method too often - * + * <p> * TODO: Make this more efficient */ private void moveLastCluster(int index) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java index d9d9a74eb958f..b846bf72ef4c5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java @@ -116,4 +116,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java index cfa2bd3f7097c..3032d695a3ee2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java @@ -85,4 +85,9 @@ protected MissingAggregator doCreateInternal( .getAggregator(MissingAggregationBuilder.REGISTRY_KEY, config) .build(name, factories, config, searchContext, parent, cardinality, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java index 2d517a1220e98..cfa1d32a52501 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -43,8 +43,8 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BitSet; -import org.opensearch.core.ParseField; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.ParseField; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java index ca1018795b518..a43d41882e475 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java @@ -100,4 +100,8 @@ public InternalAggregation buildEmptyAggregation() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 8b0429d2379c6..2ba5bdc164247 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -36,8 +36,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.util.BitSet; -import org.opensearch.core.ParseField; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.ParseField; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java index 27cd8a2688836..816f05052b6a2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java @@ -83,6 +83,11 @@ public Aggregator createInternal( } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Unmapped class for reverse nested agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index bfd7845e7e16f..41f2768eb7544 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -122,4 +122,9 @@ protected Aggregator doCreateInternal( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java index 0ee440ecc8487..fc4b4273df703 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java @@ -103,4 +103,8 @@ protected Aggregator doCreateInternal( .build(name, factories, config.getValuesSource(), config.format(), ranges, keyed, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java index d243a89c632d7..dcf6b84164991 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java @@ -72,4 +72,8 @@ public DateRangeAggregatorFactory( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index e0d506d95a75d..e681c714cffbe 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.bucket.range; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.DistanceUnit; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java index 3208d35c6a407..728f43094cf7e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java @@ -172,6 +172,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The source location for the distance calculation * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index f011f950cf289..1470712567775 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -33,12 +33,12 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.network.InetAddresses; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.network.InetAddresses; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 803bceaf57fb5..c58b2e881803c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -73,4 +73,9 @@ public RangeAggregatorFactory( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index f0a3a9999ad94..080142185f82e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -42,10 +42,10 @@ import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.OpenSearchException; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.ObjectArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.ObjectArray; import org.opensearch.search.aggregations.BucketCollector; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.MultiBucketCollector; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 41ef823a375c0..0f3c9872353c1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -159,4 +159,9 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java index 92f847a4a6b5d..cb880759887e4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java @@ -39,8 +39,8 @@ import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; -import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BytesRefHash; import org.opensearch.index.fielddata.AbstractNumericDocValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 533948c7483e8..a886bdb3ae188 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -35,9 +35,9 @@ import org.apache.lucene.misc.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.RamUsageEstimator; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; -import org.opensearch.common.lease.Releasables; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -55,7 +55,7 @@ /** * Aggregate on only the top-scoring docs on a shard. - * + * <p> * TODO currently the diversity feature of this agg offers only 'script' and * 'field' as a means of generating a de-dup value. In future it would be nice * if users could use any of the "bucket" aggs syntax (geo, date histogram...) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java index fa98c799352a6..51d9830d3cea0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java @@ -73,4 +73,8 @@ public Aggregator createInternal( return new SamplerAggregator(name, shardSize, factories, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return false; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java index 9551be10e52b8..d06a0ed9976fc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java @@ -76,15 +76,14 @@ protected StringTerms buildEmptyTermsAggregation() { name, order, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, 0, emptyList(), - 0 + 0, + bucketCountThresholds ); } @@ -95,14 +94,13 @@ protected SignificantStringTerms buildEmptySignificantTermsAggregation(long subs int supersetSize = topReader.numDocs(); return new SignificantStringTerms( name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, subsetSize, supersetSize, significanceHeuristic, - emptyList() + emptyList(), + bucketCountThresholds ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java index f5595977004e0..5d7c5c2976169 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java @@ -33,10 +33,10 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.BytesRefHash; import org.opensearch.search.aggregations.CardinalityUpperBound; /** @@ -131,7 +131,7 @@ private static class FromSingle extends BytesKeyedBucketOrds { private final BytesRefHash ords; private FromSingle(BigArrays bigArrays) { - ords = new BytesRefHash(1, bigArrays); + ords = new BytesRefHash(bigArrays); } @Override @@ -190,7 +190,7 @@ private static class FromMany extends BytesKeyedBucketOrds { private final LongKeyedBucketOrds longToBucketOrds; private FromMany(BigArrays bigArrays) { - bytesToLong = new BytesRefHash(1, bigArrays); + bytesToLong = new BytesRefHash(bigArrays); longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/DoubleTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/DoubleTerms.java index 0b76c302801af..de02d5a938644 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -130,29 +130,27 @@ public DoubleTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List<Bucket> buckets, - long docCountError + long docCountError, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { super( name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -174,15 +172,14 @@ public DoubleTerms create(List<Bucket> buckets) { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -204,15 +201,14 @@ protected DoubleTerms create(String name, List<Bucket> buckets, BucketOrder redu name, reduceOrder, order, - requiredSize, - minDocCount, getMetadata(), format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index e0a22435b8f48..69fda2f3f6133 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -35,17 +35,23 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.LongArray; -import org.opensearch.common.util.LongHash; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.LongArray; +import org.opensearch.common.util.LongHash; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; @@ -57,6 +63,7 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.support.ValuesSource; @@ -70,8 +77,9 @@ import java.util.function.LongPredicate; import java.util.function.LongUnaryOperator; -import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; import static org.opensearch.search.aggregations.InternalOrder.isKeyOrder; +import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; /** * An aggregator of string values that relies on global ordinals in order to build buckets. @@ -84,6 +92,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final LongPredicate acceptedGlobalOrdinals; private final long valueCount; + private final String fieldName; + private Weight weight; private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; protected int segmentsWithSingleValuedOrds = 0; @@ -135,16 +145,105 @@ public GlobalOrdinalsStringTermsAggregator( return new DenseGlobalOrds(); }); } + this.fieldName = (valuesSource instanceof ValuesSource.Bytes.WithOrdinals.FieldData) + ? ((ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource).getIndexFieldName() + : null; } String descriptCollectionStrategy() { return collectionStrategy.describe(); } + public void setWeight(Weight weight) { + this.weight = weight; + } + + /** + Read doc frequencies directly from indexed terms in the segment to skip iterating through individual documents + @param ctx The LeafReaderContext to collect terms from + @param globalOrds The SortedSetDocValues for the field's ordinals + @param ordCountConsumer A consumer to accept collected term frequencies + @return A LeafBucketCollector implementation with collection termination, since collection is complete + @throws IOException If an I/O error occurs during reading + */ + LeafBucketCollector termDocFreqCollector( + LeafReaderContext ctx, + SortedSetDocValues globalOrds, + BiConsumer<Long, Integer> ordCountConsumer + ) throws IOException { + if (weight == null) { + // Weight not assigned - cannot use this optimization + return null; + } else { + if (weight.count(ctx) == 0) { + // No documents matches top level query on this segment, we can skip the segment entirely + return LeafBucketCollector.NO_OP_COLLECTOR; + } else if (weight.count(ctx) != ctx.reader().maxDoc()) { + // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and + // top-level query matches all docs in the segment + return null; + } + } + + Terms segmentTerms = ctx.reader().terms(this.fieldName); + if (segmentTerms == null) { + // Field is not indexed. + return null; + } + + NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); + if (docCountValues.nextDoc() != NO_MORE_DOCS) { + // This segment has at least one document with the _doc_count field. + return null; + } + + TermsEnum indexTermsEnum = segmentTerms.iterator(); + BytesRef indexTerm = indexTermsEnum.next(); + TermsEnum globalOrdinalTermsEnum = globalOrds.termsEnum(); + BytesRef ordinalTerm = globalOrdinalTermsEnum.next(); + + // Iterate over the terms in the segment, look for matches in the global ordinal terms, + // and increment bucket count when segment terms match global ordinal terms. + while (indexTerm != null && ordinalTerm != null) { + int compare = indexTerm.compareTo(ordinalTerm); + if (compare == 0) { + if (acceptedGlobalOrdinals.test(globalOrdinalTermsEnum.ord())) { + ordCountConsumer.accept(globalOrdinalTermsEnum.ord(), indexTermsEnum.docFreq()); + } + indexTerm = indexTermsEnum.next(); + ordinalTerm = globalOrdinalTermsEnum.next(); + } else if (compare < 0) { + indexTerm = indexTermsEnum.next(); + } else { + ordinalTerm = globalOrdinalTermsEnum.next(); + } + } + return new LeafBucketCollector() { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + throw new CollectionTerminatedException(); + } + }; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); collectionStrategy.globalOrdsReady(globalOrds); + + if (collectionStrategy instanceof DenseGlobalOrds + && this.resultStrategy instanceof StandardTermsResults + && sub == LeafBucketCollector.NO_OP_COLLECTOR) { + LeafBucketCollector termDocFreqCollector = termDocFreqCollector( + ctx, + globalOrds, + (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; @@ -342,9 +441,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx); segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount()); assert sub == LeafBucketCollector.NO_OP_COLLECTOR; - final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); mapping = valuesSource.globalOrdinalsMapping(ctx); - // Dense mode doesn't support include/exclude so we don't have to check it here. + + if (this.resultStrategy instanceof StandardTermsResults) { + LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector( + ctx, + segmentOrds, + (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) + ); + if (termDocFreqCollector != null) { + return termDocFreqCollector; + } + } + + final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, segmentOrds) { @@ -603,6 +713,7 @@ abstract class ResultStrategy< TB extends InternalMultiBucketAggregation.InternalBucket> implements Releasable { private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds); if (valueCount == 0) { // no context in this reader InternalAggregation[] results = new InternalAggregation[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { @@ -615,11 +726,11 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws long[] otherDocCount = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { final int size; - if (bucketCountThresholds.getMinDocCount() == 0) { + if (localBucketCountThresholds.getMinDocCount() == 0) { // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns - size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); + size = (int) Math.min(valueCount, localBucketCountThresholds.getRequiredSize()); } else { - size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize()); + size = (int) Math.min(maxBucketOrd(), localBucketCountThresholds.getRequiredSize()); } PriorityQueue<TB> ordered = buildPriorityQueue(size); final int finalOrdIdx = ordIdx; @@ -630,7 +741,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws @Override public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { otherDocCount[finalOrdIdx] += docCount; - if (docCount >= bucketCountThresholds.getShardMinDocCount()) { + if (docCount >= localBucketCountThresholds.getMinDocCount()) { if (spare == null) { spare = buildEmptyTemporaryBucket(); } @@ -799,15 +910,14 @@ StringTerms buildResult(long owningBucketOrd, long otherDocCount, StringTerms.Bu name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, Arrays.asList(topBuckets), - 0 + 0, + bucketCountThresholds ); } @@ -924,14 +1034,13 @@ void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPreOrd) throws IOE SignificantStringTerms buildResult(long owningBucketOrd, long otherDocCount, SignificantStringTerms.Bucket[] topBuckets) { return new SignificantStringTerms( name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, subsetSize(owningBucketOrd), supersetSize, significanceHeuristic, - Arrays.asList(topBuckets) + Arrays.asList(topBuckets), + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index ff742d189c7c7..eb4b84b7eb301 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -35,9 +35,9 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Randomness; +import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java index 97a95b8df840b..a7c5427fa38cc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java @@ -64,16 +64,15 @@ public abstract class InternalMappedSignificantTerms< protected InternalMappedSignificantTerms( String name, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, long subsetSize, long supersetSize, SignificanceHeuristic significanceHeuristic, - List<B> buckets + List<B> buckets, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { - super(name, requiredSize, minDocCount, metadata); + super(name, bucketCountThresholds, metadata); this.format = format; this.buckets = buckets; this.subsetSize = subsetSize; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedTerms.java index f5e92fec8195d..d542064df24d7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -64,17 +64,16 @@ protected InternalMappedTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List<B> buckets, - long docCountError + long docCountError, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { - super(name, reduceOrder, order, requiredSize, minDocCount, metadata); + super(name, reduceOrder, order, bucketCountThresholds, metadata); this.format = format; this.shardSize = shardSize; this.showTermDocCountError = showTermDocCountError; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java index fc84f35385d5c..5b90163fa3959 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java @@ -233,17 +233,16 @@ public InternalMultiTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, int shardSize, boolean showTermDocCountError, long otherDocCount, long docCountError, List<DocValueFormat> formats, - List<Bucket> buckets + List<Bucket> buckets, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { - super(name, reduceOrder, order, requiredSize, minDocCount, metadata); + super(name, reduceOrder, order, bucketCountThresholds, metadata); this.shardSize = shardSize; this.showTermDocCountError = showTermDocCountError; this.otherDocCount = otherDocCount; @@ -278,15 +277,14 @@ public InternalMultiTerms create(List<Bucket> buckets) { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, shardSize, showTermDocCountError, otherDocCount, docCountError, termFormats, - buckets + buckets, + bucketCountThresholds ); } @@ -357,15 +355,14 @@ protected InternalMultiTerms create( name, reduceOrder, order, - requiredSize, - minDocCount, metadata, shardSize, showTermDocCountError, otherDocCount, docCountError, termFormats, - buckets + buckets, + bucketCountThresholds ); } @@ -410,8 +407,8 @@ public int hashCode() { /** * Copy from InternalComposite - * - * Format <code>obj</code> using the provided {@link DocValueFormat}. + * <p> + * Format {@code obj} using the provided {@link DocValueFormat}. * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is * for numbers and a string for {@link BytesRef}s. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalRareTerms.java index 3bcbe2716651a..85d96e4978450 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -31,9 +31,9 @@ package org.opensearch.search.aggregations.bucket.terms; +import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index 84d148199a7f9..03bb519ed9961 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -39,6 +39,7 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import java.io.IOException; @@ -195,11 +196,17 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) protected final int requiredSize; protected final long minDocCount; + protected final TermsAggregator.BucketCountThresholds bucketCountThresholds; - protected InternalSignificantTerms(String name, int requiredSize, long minDocCount, Map<String, Object> metadata) { + protected InternalSignificantTerms( + String name, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + Map<String, Object> metadata + ) { super(name, metadata); - this.requiredSize = requiredSize; - this.minDocCount = minDocCount; + this.requiredSize = bucketCountThresholds.getRequiredSize(); + this.minDocCount = bucketCountThresholds.getMinDocCount(); + this.bucketCountThresholds = bucketCountThresholds; } /** @@ -209,6 +216,9 @@ protected InternalSignificantTerms(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); minDocCount = in.readVLong(); + // shardMinDocCount and shardSize are not used on the coordinator, so they are not deserialized. We use + // CoordinatorBucketCountThresholds which will throw an exception if they are accessed. + bucketCountThresholds = new TermsAggregator.CoordinatorBucketCountThresholds(minDocCount, -1, requiredSize, -1); } protected final void doWriteTo(StreamOutput out) throws IOException { @@ -224,6 +234,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { @Override public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { + LocalBucketCountThresholds localBucketCountThresholds = reduceContext.asLocalBucketCountThresholds(bucketCountThresholds); long globalSubsetSize = 0; long globalSupersetSize = 0; // Compute the overall result set size and the corpus size using the @@ -265,13 +276,21 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce } } SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext); - final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()); + boolean isCoordinatorPartialReduce = reduceContext.isFinalReduce() == false && reduceContext.isSliceLevel() == false; + // Do not apply size threshold on coordinator partial reduce + final int size = !isCoordinatorPartialReduce + ? Math.min(localBucketCountThresholds.getRequiredSize(), buckets.size()) + : buckets.size(); BucketSignificancePriorityQueue<B> ordered = new BucketSignificancePriorityQueue<>(size); for (Map.Entry<String, List<B>> entry : buckets.entrySet()) { List<B> sameTermBuckets = entry.getValue(); final B b = reduceBucket(sameTermBuckets, reduceContext); b.updateScore(heuristic); - if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) { + // For concurrent search case we do not apply bucket count thresholds in buildAggregation and instead is done here during + // reduce. However, the bucket score is only evaluated at the final coordinator reduce. + boolean meetsThresholds = (b.subsetDf >= localBucketCountThresholds.getMinDocCount()) + && (((b.score > 0) || reduceContext.isSliceLevel())); + if (isCoordinatorPartialReduce || meetsThresholds) { B removed = ordered.insertWithOverflow(b); if (removed == null) { reduceContext.consumeBucketsAndMaybeBreak(1); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java index 9a80155eea51c..b8f9406ff55b9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java @@ -46,6 +46,7 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.KeyComparable; import org.opensearch.search.aggregations.bucket.IteratorAndCurrent; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import java.io.IOException; @@ -223,29 +224,30 @@ public int hashCode() { protected final BucketOrder order; protected final int requiredSize; protected final long minDocCount; + protected final TermsAggregator.BucketCountThresholds bucketCountThresholds; + private boolean hasSliceLevelDocCountError = false; /** * Creates a new {@link InternalTerms} * @param name The name of the aggregation * @param reduceOrder The {@link BucketOrder} that should be used to merge shard results. * @param order The {@link BucketOrder} that should be used to sort the final reduce. - * @param requiredSize The number of top buckets. - * @param minDocCount The minimum number of documents allowed per bucket. + * @param bucketCountThresholds Object containing values for minDocCount, shardMinDocCount, size, shardSize. * @param metadata The metadata associated with the aggregation. */ protected InternalTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, + TermsAggregator.BucketCountThresholds bucketCountThresholds, Map<String, Object> metadata ) { super(name, metadata); this.reduceOrder = reduceOrder; this.order = order; - this.requiredSize = requiredSize; - this.minDocCount = minDocCount; + this.bucketCountThresholds = bucketCountThresholds; + this.requiredSize = bucketCountThresholds.getRequiredSize(); + this.minDocCount = bucketCountThresholds.getMinDocCount(); } /** @@ -257,6 +259,9 @@ protected InternalTerms(StreamInput in) throws IOException { order = InternalOrder.Streams.readOrder(in); requiredSize = readSize(in); minDocCount = in.readVLong(); + // shardMinDocCount and shardSize are not used on the coordinator, so they are not deserialized. We use + // CoordinatorBucketCountThresholds which will throw an exception if they are accessed. + bucketCountThresholds = new TermsAggregator.CoordinatorBucketCountThresholds(minDocCount, -1, requiredSize, getShardSize()); } @Override @@ -293,7 +298,7 @@ private BucketOrder getReduceOrder(List<InternalAggregation> aggregations) { return thisReduceOrder != null ? thisReduceOrder : order; } - private long getDocCountError(InternalTerms<?, ?> terms) { + private long getDocCountError(InternalTerms<?, ?> terms, ReduceContext reduceContext) { int size = terms.getBuckets().size(); if (size == 0 || size < terms.getShardSize() || isKeyOrder(terms.order)) { return 0; @@ -385,12 +390,19 @@ private List<B> reduceLegacy(List<InternalAggregation> aggregations, ReduceConte } public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { + LocalBucketCountThresholds localBucketCountThresholds = reduceContext.asLocalBucketCountThresholds(bucketCountThresholds); long sumDocCountError = 0; long otherDocCount = 0; InternalTerms<A, B> referenceTerms = null; for (InternalAggregation aggregation : aggregations) { @SuppressWarnings("unchecked") InternalTerms<A, B> terms = (InternalTerms<A, B>) aggregation; + // For Concurrent Segment Search the aggregation will have a computed doc count error coming from the shards. + // We use the existence of this doc count error to determine whether or not doc count error originated from the slice level + // and if so we will maintain the doc count error for the 1 shard case at the coordinator level + if (aggregations.size() == 1 && terms.getDocCountError() > 0) { + hasSliceLevelDocCountError = true; + } if (referenceTerms == null && aggregation.getClass().equals(UnmappedTerms.class) == false) { referenceTerms = terms; } @@ -407,7 +419,7 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce ); } otherDocCount += terms.getSumOfOtherDocCounts(); - final long thisAggDocCountError = getDocCountError(terms); + final long thisAggDocCountError = getDocCountError(terms, reduceContext); if (sumDocCountError != -1) { if (thisAggDocCountError == -1) { sumDocCountError = -1; @@ -429,11 +441,11 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce } final List<B> reducedBuckets; - /** - * Buckets returned by a partial reduce or a shard response are sorted by key. - * That allows to perform a merge sort when reducing multiple aggregations together. - * For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of - * the provided aggregations use a different {@link InternalTerms#reduceOrder}. + /* + Buckets returned by a partial reduce or a shard response are sorted by key. + That allows to perform a merge sort when reducing multiple aggregations together. + For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of + the provided aggregations use a different {@link InternalTerms#reduceOrder}. */ BucketOrder thisReduceOrder = getReduceOrder(aggregations); if (isKeyOrder(thisReduceOrder)) { @@ -444,8 +456,8 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce reducedBuckets = reduceLegacy(aggregations, reduceContext); } final B[] list; - if (reduceContext.isFinalReduce()) { - final int size = Math.min(requiredSize, reducedBuckets.size()); + if (reduceContext.isFinalReduce() || reduceContext.isSliceLevel()) { + final int size = Math.min(localBucketCountThresholds.getRequiredSize(), reducedBuckets.size()); // final comparator final BucketPriorityQueue<B> ordered = new BucketPriorityQueue<>(size, order.comparator()); for (B bucket : reducedBuckets) { @@ -455,7 +467,7 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce final long finalSumDocCountError = sumDocCountError; bucket.setDocCountError(docCountError -> docCountError + finalSumDocCountError); } - if (bucket.getDocCount() >= minDocCount) { + if (bucket.getDocCount() >= localBucketCountThresholds.getMinDocCount()) { B removed = ordered.insertWithOverflow(bucket); if (removed != null) { otherDocCount += removed.getDocCount(); @@ -474,7 +486,9 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce } else { // we can prune the list on partial reduce if the aggregation is ordered by key // and not filtered (minDocCount == 0) - int size = isKeyOrder(order) && minDocCount == 0 ? Math.min(requiredSize, reducedBuckets.size()) : reducedBuckets.size(); + int size = isKeyOrder(order) && localBucketCountThresholds.getMinDocCount() == 0 + ? Math.min(localBucketCountThresholds.getRequiredSize(), reducedBuckets.size()) + : reducedBuckets.size(); list = createBucketsArray(size); for (int i = 0; i < size; i++) { reduceContext.consumeBucketsAndMaybeBreak(1); @@ -491,14 +505,23 @@ public InternalAggregation reduce(List<InternalAggregation> aggregations, Reduce if (sumDocCountError == -1) { docCountError = -1; } else { - docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; + if (hasSliceLevelDocCountError) { + docCountError = sumDocCountError; + } else { + docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; + } + } + + // Shards must return buckets sorted by key, so we apply the sort here in shard level reduce + if (reduceContext.isSliceLevel()) { + Arrays.sort(list, thisReduceOrder.comparator()); } return create(name, Arrays.asList(list), reduceContext.isFinalReduce() ? order : thisReduceOrder, docCountError, otherDocCount); } @Override protected B reduceBucket(List<B> buckets, ReduceContext context) { - assert buckets.size() > 0; + assert !buckets.isEmpty(); long docCount = 0; // For the per term doc count error we add up the errors from the // shards that did not respond with the term. To do this we add up @@ -509,7 +532,7 @@ protected B reduceBucket(List<B> buckets, ReduceContext context) { for (B bucket : buckets) { docCount += bucket.getDocCount(); if (docCountError != -1) { - if (bucket.showDocCountError() == false || bucket.getDocCountError() == -1) { + if (bucket.showDocCountError() == false) { docCountError = -1; } else { docCountError += bucket.getDocCountError(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index a55b7d6bc154e..dd4ddfdb0f9df 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations.bucket.terms; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.ReorganizingLongHash; import org.opensearch.common.util.LongLongHash; -import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.ReorganizingLongHash; import org.opensearch.search.aggregations.CardinalityUpperBound; /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTerms.java index 18a97b1cbdb9a..a2f8af0bfe69f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTerms.java @@ -31,9 +31,9 @@ package org.opensearch.search.aggregations.bucket.terms; +import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.BucketOrder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java index 0e7956744091f..6e4cd895e7496 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java @@ -33,9 +33,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongHash; import org.opensearch.common.util.SetBackedScalingCuckooFilter; -import org.opensearch.common.lease.Releasables; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongTerms.java index 67aa80d0a9879..fe78145dce3e7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongTerms.java @@ -142,29 +142,27 @@ public LongTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List<Bucket> buckets, - long docCountError + long docCountError, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { super( name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -186,15 +184,14 @@ public LongTerms create(List<Bucket> buckets) { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -216,15 +213,14 @@ protected LongTerms create(String name, List<Bucket> buckets, BucketOrder reduce name, reduceOrder, order, - requiredSize, - minDocCount, getMetadata(), format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -293,15 +289,14 @@ static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat longTerms.getName(), longTerms.reduceOrder, longTerms.order, - longTerms.requiredSize, - longTerms.minDocCount, longTerms.metadata, longTerms.format, longTerms.shardSize, longTerms.showTermDocCountError, longTerms.otherDocCount, newBuckets, - longTerms.docCountError + longTerms.docCountError, + longTerms.bucketCountThresholds ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index bcdf1f4480a31..ade23f7290f89 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -36,9 +36,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.PriorityQueue; -import org.opensearch.common.util.LongArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.LongArray; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -50,6 +50,7 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForBytes; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.search.aggregations.support.ValuesSource; @@ -244,11 +245,12 @@ abstract class ResultStrategy<R extends InternalAggregation, B extends InternalM Releasable { private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds); B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length); long[] otherDocCounts = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); - int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); + int size = (int) Math.min(bucketOrds.size(), localBucketCountThresholds.getRequiredSize()); PriorityQueue<B> ordered = buildPriorityQueue(size); B spare = null; @@ -257,7 +259,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws while (ordsEnum.next()) { long docCount = bucketDocCount(ordsEnum.ord()); otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { + if (docCount < localBucketCountThresholds.getMinDocCount()) { continue; } if (spare == null) { @@ -454,15 +456,14 @@ StringTerms buildResult(long owningBucketOrd, long otherDocCount, StringTerms.Bu name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, Arrays.asList(topBuckets), - 0 + 0, + bucketCountThresholds ); } @@ -572,14 +573,13 @@ void buildSubAggs(SignificantStringTerms.Bucket[][] topBucketsPerOrd) throws IOE SignificantStringTerms buildResult(long owningBucketOrd, long otherDocCount, SignificantStringTerms.Bucket[] topBuckets) { return new SignificantStringTerms( name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, subsetSizes.get(owningBucketOrd), supersetSize, significanceHeuristic, - Arrays.asList(topBuckets) + Arrays.asList(topBuckets), + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java index 8c7b552e3b9de..c711b140be192 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationBuilder.java @@ -8,10 +8,10 @@ package org.opensearch.search.aggregations.bucket.terms; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java index aa6da630aa9f3..7134999e4aa85 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java @@ -157,6 +157,11 @@ protected Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Supplier for internal values source * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index 9d99c0b90a075..59f48bd7fbaba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -11,17 +11,18 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.PriorityQueue; import org.opensearch.ExceptionsHelper; import org.opensearch.common.CheckedSupplier; import org.opensearch.common.Numbers; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; @@ -33,6 +34,7 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; @@ -92,10 +94,10 @@ public MultiTermsAggregator( this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); // Todo, copy from TermsAggregator. need to remove duplicate code. if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { @@ -118,13 +120,14 @@ public MultiTermsAggregator( @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds); InternalMultiTerms.Bucket[][] topBucketsPerOrd = new InternalMultiTerms.Bucket[owningBucketOrds.length][]; long[] otherDocCounts = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + int size = (int) Math.min(bucketsInOrd, localBucketCountThresholds.getRequiredSize()); PriorityQueue<InternalMultiTerms.Bucket> ordered = new BucketPriorityQueue<>(size, partiallyBuiltBucketComparator); InternalMultiTerms.Bucket spare = null; BytesRef dest = null; @@ -136,7 +139,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I while (ordsEnum.next()) { long docCount = bucketDocCount(ordsEnum.ord()); otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { + if (docCount < localBucketCountThresholds.getMinDocCount()) { continue; } if (spare == null) { @@ -182,15 +185,14 @@ InternalMultiTerms buildResult(long owningBucketOrd, long otherDocCount, Interna name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, 0, formats, - List.of(topBuckets) + List.of(topBuckets), + bucketCountThresholds ); } @@ -200,15 +202,14 @@ public InternalAggregation buildEmptyAggregation() { name, order, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), bucketCountThresholds.getShardSize(), showTermDocCountError, 0, 0, formats, - Collections.emptyList() + Collections.emptyList(), + bucketCountThresholds ); } @@ -218,8 +219,8 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucket return new LeafBucketCollector() { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - for (List<Object> value : collector.apply(doc)) { - long bucketOrd = bucketOrds.add(owningBucketOrd, encode(value)); + for (BytesRef compositeKey : collector.apply(doc)) { + long bucketOrd = bucketOrds.add(owningBucketOrd, compositeKey); if (bucketOrd < 0) { bucketOrd = -1 - bucketOrd; collectExistingBucket(sub, doc, bucketOrd); @@ -233,16 +234,7 @@ public void collect(int doc, long owningBucketOrd) throws IOException { @Override protected void doClose() { - Releasables.close(bucketOrds); - } - - private static BytesRef encode(List<Object> values) { - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.writeCollection(values, StreamOutput::writeGenericValue); - return output.bytes().toBytesRef(); - } catch (IOException e) { - throw ExceptionsHelper.convertToRuntime(e); - } + Releasables.close(bucketOrds, multiTermsValue); } private static List<Object> decode(BytesRef bytesRef) { @@ -279,8 +271,8 @@ private void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOExcept MultiTermsValuesSourceCollector collector = multiTermsValue.getValues(ctx); // brute force for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) { - for (List<Object> value : collector.apply(docId)) { - bucketOrds.add(owningBucketOrd, encode(value)); + for (BytesRef compositeKey : collector.apply(docId)) { + bucketOrds.add(owningBucketOrd, compositeKey); } } } @@ -295,7 +287,7 @@ interface MultiTermsValuesSourceCollector { * Collect a list values of multi_terms on each doc. * Each terms could have multi_values, so the result is the cartesian product of each term's values. */ - List<List<Object>> apply(int doc) throws IOException; + List<BytesRef> apply(int doc) throws IOException; } @FunctionalInterface @@ -314,7 +306,46 @@ interface InternalValuesSourceCollector { /** * Collect a list values of a term on specific doc. */ - List<Object> apply(int doc) throws IOException; + List<TermValue<?>> apply(int doc) throws IOException; + } + + /** + * Represents an individual term value. + */ + static class TermValue<T> implements Writeable { + private static final Writer<BytesRef> BYTES_REF_WRITER = StreamOutput.getWriter(BytesRef.class); + private static final Writer<Long> LONG_WRITER = StreamOutput.getWriter(Long.class); + private static final Writer<BigInteger> BIG_INTEGER_WRITER = StreamOutput.getWriter(BigInteger.class); + private static final Writer<Double> DOUBLE_WRITER = StreamOutput.getWriter(Double.class); + + private final T value; + private final Writer<T> writer; + + private TermValue(T value, Writer<T> writer) { + this.value = value; + this.writer = writer; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writer.write(out, value); + } + + public static TermValue<BytesRef> of(BytesRef value) { + return new TermValue<>(value, BYTES_REF_WRITER); + } + + public static TermValue<Long> of(Long value) { + return new TermValue<>(value, LONG_WRITER); + } + + public static TermValue<BigInteger> of(BigInteger value) { + return new TermValue<>(value, BIG_INTEGER_WRITER); + } + + public static TermValue<Double> of(Double value) { + return new TermValue<>(value, DOUBLE_WRITER); + } } /** @@ -322,8 +353,9 @@ interface InternalValuesSourceCollector { * * @opensearch.internal */ - static class MultiTermsValuesSource { + static class MultiTermsValuesSource implements Releasable { private final List<InternalValuesSource> valuesSources; + private final BytesStreamOutput scratch = new BytesStreamOutput(); public MultiTermsValuesSource(List<InternalValuesSource> valuesSources) { this.valuesSources = valuesSources; @@ -336,37 +368,50 @@ public MultiTermsValuesSourceCollector getValues(LeafReaderContext ctx) throws I } return new MultiTermsValuesSourceCollector() { @Override - public List<List<Object>> apply(int doc) throws IOException { - List<CheckedSupplier<List<Object>, IOException>> collectedValues = new ArrayList<>(); + public List<BytesRef> apply(int doc) throws IOException { + List<List<TermValue<?>>> collectedValues = new ArrayList<>(); for (InternalValuesSourceCollector collector : collectors) { - collectedValues.add(() -> collector.apply(doc)); + collectedValues.add(collector.apply(doc)); } - List<List<Object>> result = new ArrayList<>(); - apply(0, collectedValues, new ArrayList<>(), result); + List<BytesRef> result = new ArrayList<>(); + scratch.seek(0); + scratch.writeVInt(collectors.size()); // number of fields per composite key + cartesianProduct(result, scratch, collectedValues, 0); return result; } /** - * DFS traverse each term's values and add cartesian product to results lists. + * Cartesian product using depth first search. + * + * <p> + * Composite keys are encoded to a {@link BytesRef} in a format compatible with {@link StreamOutput::writeGenericValue}, + * but reuses the encoding of the shared prefixes from the previous levels to avoid wasteful work. */ - private void apply( - int index, - List<CheckedSupplier<List<Object>, IOException>> collectedValues, - List<Object> current, - List<List<Object>> results + private void cartesianProduct( + List<BytesRef> compositeKeys, + BytesStreamOutput scratch, + List<List<TermValue<?>>> collectedValues, + int index ) throws IOException { - if (index == collectedValues.size()) { - results.add(List.copyOf(current)); - } else if (null != collectedValues.get(index)) { - for (Object value : collectedValues.get(index).get()) { - current.add(value); - apply(index + 1, collectedValues, current, results); - current.remove(current.size() - 1); - } + if (collectedValues.size() == index) { + compositeKeys.add(BytesRef.deepCopyOf(scratch.bytes().toBytesRef())); + return; + } + + long position = scratch.position(); + for (TermValue<?> value : collectedValues.get(index)) { + value.writeTo(scratch); // encode the value + cartesianProduct(compositeKeys, scratch, collectedValues, index + 1); // dfs + scratch.seek(position); // backtrack } } }; } + + @Override + public void close() { + scratch.close(); + } } /** @@ -379,27 +424,26 @@ static InternalValuesSource bytesValuesSource(ValuesSource valuesSource, Include return ctx -> { SortedBinaryDocValues values = valuesSource.bytesValues(ctx); return doc -> { - BytesRefBuilder previous = new BytesRefBuilder(); - if (false == values.advanceExact(doc)) { return Collections.emptyList(); } int valuesCount = values.docValueCount(); - List<Object> termValues = new ArrayList<>(valuesCount); + List<TermValue<?>> termValues = new ArrayList<>(valuesCount); // SortedBinaryDocValues don't guarantee uniqueness so we // need to take care of dups - previous.clear(); + BytesRef previous = null; for (int i = 0; i < valuesCount; ++i) { BytesRef bytes = values.nextValue(); if (includeExclude != null && false == includeExclude.accept(bytes)) { continue; } - if (i > 0 && previous.get().equals(bytes)) { + if (i > 0 && bytes.equals(previous)) { continue; } - previous.copyBytes(bytes); - termValues.add(BytesRef.deepCopyOf(bytes)); + BytesRef copy = BytesRef.deepCopyOf(bytes); + termValues.add(TermValue.of(copy)); + previous = copy; } return termValues; }; @@ -414,12 +458,12 @@ static InternalValuesSource unsignedLongValuesSource(ValuesSource.Numeric values int valuesCount = values.docValueCount(); BigInteger previous = Numbers.MAX_UNSIGNED_LONG_VALUE; - List<Object> termValues = new ArrayList<>(valuesCount); + List<TermValue<?>> termValues = new ArrayList<>(valuesCount); for (int i = 0; i < valuesCount; ++i) { BigInteger val = Numbers.toUnsignedBigInteger(values.nextValue()); if (previous.compareTo(val) != 0 || i == 0) { if (longFilter == null || longFilter.accept(NumericUtils.doubleToSortableLong(val.doubleValue()))) { - termValues.add(val); + termValues.add(TermValue.of(val)); } previous = val; } @@ -439,12 +483,12 @@ static InternalValuesSource longValuesSource(ValuesSource.Numeric valuesSource, int valuesCount = values.docValueCount(); long previous = Long.MAX_VALUE; - List<Object> termValues = new ArrayList<>(valuesCount); + List<TermValue<?>> termValues = new ArrayList<>(valuesCount); for (int i = 0; i < valuesCount; ++i) { long val = values.nextValue(); if (previous != val || i == 0) { if (longFilter == null || longFilter.accept(val)) { - termValues.add(val); + termValues.add(TermValue.of(val)); } previous = val; } @@ -464,12 +508,12 @@ static InternalValuesSource doubleValueSource(ValuesSource.Numeric valuesSource, int valuesCount = values.docValueCount(); double previous = Double.MAX_VALUE; - List<Object> termValues = new ArrayList<>(valuesCount); + List<TermValue<?>> termValues = new ArrayList<>(valuesCount); for (int i = 0; i < valuesCount; ++i) { double val = values.nextValue(); if (previous != val || i == 0) { if (longFilter == null || longFilter.accept(NumericUtils.doubleToSortableLong(val))) { - termValues.add(val); + termValues.add(TermValue.of(val)); } previous = val; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index a0265135fe9d3..9d095bbf7dccf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -38,9 +38,9 @@ import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.PriorityQueue; import org.opensearch.common.Numbers; -import org.opensearch.common.util.LongArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.LongArray; import org.opensearch.index.fielddata.FieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -52,6 +52,7 @@ import org.opensearch.search.aggregations.InternalOrder; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.LongFilter; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds.BucketOrdsEnum; import org.opensearch.search.aggregations.bucket.terms.SignificanceLookup.BackgroundFrequencyForLong; @@ -173,13 +174,14 @@ abstract class ResultStrategy<R extends InternalAggregation, B extends InternalM implements Releasable { private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + LocalBucketCountThresholds localBucketCountThresholds = context.asLocalBucketCountThresholds(bucketCountThresholds); B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length); long[] otherDocCounts = new long[owningBucketOrds.length]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]); long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]); - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + int size = (int) Math.min(bucketsInOrd, localBucketCountThresholds.getRequiredSize()); PriorityQueue<B> ordered = buildPriorityQueue(size); B spare = null; BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); @@ -187,7 +189,7 @@ private InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws while (ordsEnum.next()) { long docCount = bucketDocCount(ordsEnum.ord()); otherDocCounts[ordIdx] += docCount; - if (docCount < bucketCountThresholds.getShardMinDocCount()) { + if (docCount < localBucketCountThresholds.getMinDocCount()) { continue; } if (spare == null) { @@ -395,15 +397,14 @@ LongTerms buildResult(long owningBucketOrd, long otherDocCount, LongTerms.Bucket name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, List.of(topBuckets), - 0 + 0, + bucketCountThresholds ); } @@ -413,15 +414,14 @@ LongTerms buildEmptyResult() { name, order, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, 0, emptyList(), - 0 + 0, + bucketCountThresholds ); } } @@ -477,15 +477,14 @@ DoubleTerms buildResult(long owningBucketOrd, long otherDocCount, DoubleTerms.Bu name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, List.of(topBuckets), - 0 + 0, + bucketCountThresholds ); } @@ -495,15 +494,14 @@ DoubleTerms buildEmptyResult() { name, order, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, 0, emptyList(), - 0 + 0, + bucketCountThresholds ); } } @@ -558,15 +556,14 @@ UnsignedLongTerms buildResult(long owningBucketOrd, long otherDocCount, Unsigned name, reduceOrder, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, otherDocCount, List.of(topBuckets), - 0 + 0, + bucketCountThresholds ); } @@ -576,15 +573,14 @@ UnsignedLongTerms buildEmptyResult() { name, order, order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, bucketCountThresholds.getShardSize(), showTermDocCountError, 0, emptyList(), - 0 + 0, + bucketCountThresholds ); } } @@ -670,17 +666,17 @@ void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {} @Override SignificantLongTerms buildResult(long owningBucketOrd, long otherDocCoun, SignificantLongTerms.Bucket[] topBuckets) { - return new SignificantLongTerms( + SignificantLongTerms significantLongTerms = new SignificantLongTerms( name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, subsetSizes.get(owningBucketOrd), supersetSize, significanceHeuristic, - List.of(topBuckets) + List.of(topBuckets), + bucketCountThresholds ); + return significantLongTerms; } @Override @@ -691,14 +687,13 @@ SignificantLongTerms buildEmptyResult() { int supersetSize = topReader.numDocs(); return new SignificantLongTerms( name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), metadata(), format, 0, supersetSize, significanceHeuristic, - emptyList() + emptyList(), + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java index 171dddfcc6bba..c606f48953567 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/ParsedStringRareTerms.java @@ -32,14 +32,14 @@ package org.opensearch.search.aggregations.bucket.terms; -import java.io.IOException; -import java.nio.CharBuffer; - import org.apache.lucene.util.BytesRef; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import java.io.IOException; +import java.nio.CharBuffer; + /** * A significant rare result parsed between nodes * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index dc616ca7512be..5d83d926ab36f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -196,7 +196,7 @@ public double getPrecision() { * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall * error rate grows differently than individual filters - * + * <p> * This value does, however, affect the overall space usage of the filter. Coarser precisions provide * more compact filters. The default is 0.01 */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 96b440075b768..b5f3abe89ac59 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.bucket.terms; -import org.opensearch.core.ParseField; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.ParseField; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -237,6 +237,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Execution mode for rare terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java index aa1409a7bec78..34bbac55900a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -42,13 +42,13 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.index.FilterableTermsEnum; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.LongArray; import org.opensearch.common.util.LongHash; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -123,7 +123,7 @@ public void close() {} }; } return new BackgroundFrequencyForBytes() { - private final BytesRefHash termToPosition = new BytesRefHash(1, bigArrays); + private final BytesRefHash termToPosition = new BytesRefHash(bigArrays); private LongArray positionToFreq = bigArrays.newLongArray(1, false); @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTerms.java index 46e8cea7abc36..3da5a766fc37b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTerms.java @@ -130,16 +130,15 @@ public int hashCode() { public SignificantLongTerms( String name, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, long subsetSize, long supersetSize, SignificanceHeuristic significanceHeuristic, - List<Bucket> buckets + List<Bucket> buckets, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { - super(name, requiredSize, minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets); + super(name, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketCountThresholds); } /** @@ -158,14 +157,13 @@ public String getWriteableName() { public SignificantLongTerms create(List<SignificantLongTerms.Bucket> buckets) { return new SignificantLongTerms( name, - requiredSize, - minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + bucketCountThresholds ); } @@ -187,14 +185,13 @@ public Bucket createBucket(InternalAggregations aggregations, SignificantLongTer protected SignificantLongTerms create(long subsetSize, long supersetSize, List<Bucket> buckets) { return new SignificantLongTerms( getName(), - requiredSize, - minDocCount, getMetadata(), format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTerms.java index d8d93ad7ae159..c70db6005d7cd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTerms.java @@ -135,16 +135,15 @@ public int hashCode() { public SignificantStringTerms( String name, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, long subsetSize, long supersetSize, SignificanceHeuristic significanceHeuristic, - List<Bucket> buckets + List<Bucket> buckets, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { - super(name, requiredSize, minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets); + super(name, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets, bucketCountThresholds); } /** @@ -163,14 +162,13 @@ public String getWriteableName() { public SignificantStringTerms create(List<SignificantStringTerms.Bucket> buckets) { return new SignificantStringTerms( name, - requiredSize, - minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + bucketCountThresholds ); } @@ -192,14 +190,13 @@ public Bucket createBucket(InternalAggregations aggregations, SignificantStringT protected SignificantStringTerms create(long subsetSize, long supersetSize, List<Bucket> buckets) { return new SignificantStringTerms( getName(), - requiredSize, - minDocCount, getMetadata(), format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index 1dacd4c7de4e8..f6802a58dfed2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.bucket.terms; -import org.opensearch.core.ParseField; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.core.ParseField; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; @@ -246,12 +246,7 @@ public Aggregator build( @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map<String, Object> metadata) throws IOException { - final InternalAggregation aggregation = new UnmappedSignificantTerms( - name, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), - metadata - ); + final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds, metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { @@ -311,6 +306,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The execution mode for the significant terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 7f5804c8b9561..81366c212c86c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -40,10 +40,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; @@ -312,4 +312,9 @@ public void close() { Releasables.close(dupSequenceSpotters); } } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTerms.java index 574e2409b71d0..e86f938ff2081 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTerms.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.BucketOrder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java index b12c397b00fe2..cc35fe75e5e92 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BytesRefHash; import org.opensearch.common.util.SetBackedScalingCuckooFilter; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -135,7 +135,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I Arrays.fill(mergeMap, -1); long offset = 0; for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, context.bigArrays())) { + try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(context.bigArrays())) { filters[owningOrdIdx] = newFilter(); List<StringRareTerms.Bucket> builtBuckets = new ArrayList<>(); BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringTerms.java index c985bf770d4a7..6dedc65ff14e3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringTerms.java @@ -134,29 +134,27 @@ public StringTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List<Bucket> buckets, - long docCountError + long docCountError, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { super( name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -178,15 +176,14 @@ public StringTerms create(List<Bucket> buckets) { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -213,15 +210,14 @@ protected StringTerms create(String name, List<Bucket> buckets, BucketOrder redu name, reduceOrder, order, - requiredSize, - minDocCount, getMetadata(), format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index fd944374eeaaa..390348a9fa7d1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.Version; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryRewriteContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 9e2aa85bb1dd8..918cc0276ed13 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -33,12 +33,14 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.BucketOrder; @@ -66,8 +68,9 @@ public abstract class TermsAggregator extends DeferableBucketAggregator { /** * Bucket count thresholds * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class BucketCountThresholds implements Writeable, ToXContentFragment { private long minDocCount; private long shardMinDocCount; @@ -195,6 +198,30 @@ public boolean equals(Object obj) { } } + /** + * BucketCountThresholds type that throws an exception when shardMinDocCount or shardSize are accessed. This is used for + * deserialization on the coordinator during reduce as shardMinDocCount and shardSize should not be accessed this way on the + * coordinator. + * + * @opensearch.internal + */ + public static class CoordinatorBucketCountThresholds extends BucketCountThresholds { + + public CoordinatorBucketCountThresholds(long minDocCount, long shardMinDocCount, int requiredSize, int shardSize) { + super(minDocCount, shardMinDocCount, requiredSize, shardSize); + } + + @Override + public long getShardMinDocCount() { + throw new AggregationExecutionException("shard_min_doc_count should not be accessed via CoordinatorBucketCountThresholds"); + } + + @Override + public int getShardSize() { + throw new AggregationExecutionException("shard_size should not be accessed via CoordinatorBucketCountThresholds"); + } + } + protected final DocValueFormat format; protected final BucketCountThresholds bucketCountThresholds; protected final BucketOrder order; @@ -219,10 +246,10 @@ public TermsAggregator( partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); this.format = format; if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 11be3da5c8991..a4d73bfd3e634 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -265,13 +265,7 @@ public Aggregator build( @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map<String, Object> metadata) throws IOException { - final InternalAggregation aggregation = new UnmappedTerms( - name, - order, - bucketCountThresholds.getRequiredSize(), - bucketCountThresholds.getMinDocCount(), - metadata - ); + final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds, metadata); Aggregator agg = new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { @@ -564,4 +558,8 @@ public String toString() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java index 27e5adf031483..ea1484c6b9eef 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorSupplier.java @@ -34,8 +34,8 @@ import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; -import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedRareTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedRareTerms.java index e330174ef18f3..f3010f87b430e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedRareTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedRareTerms.java @@ -31,9 +31,9 @@ package org.opensearch.search.aggregations.bucket.terms; +import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.SetBackedScalingCuckooFilter; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedSignificantTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedSignificantTerms.java index 9384f9e793d81..2c8aa8f0a0c37 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedSignificantTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedSignificantTerms.java @@ -77,8 +77,12 @@ private Bucket( } } - public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, Map<String, Object> metadata) { - super(name, requiredSize, minDocCount, metadata); + public UnmappedSignificantTerms( + String name, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + Map<String, Object> metadata + ) { + super(name, bucketCountThresholds, metadata); } /** @@ -105,7 +109,7 @@ public String getType() { @Override public UnmappedSignificantTerms create(List<Bucket> buckets) { - return new UnmappedSignificantTerms(name, requiredSize, minDocCount, metadata); + return new UnmappedSignificantTerms(name, bucketCountThresholds, metadata); } @Override @@ -132,7 +136,7 @@ Bucket createBucket( @Override public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { - return new UnmappedSignificantTerms(name, requiredSize, minDocCount, metadata); + return new UnmappedSignificantTerms(name, bucketCountThresholds, metadata); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedTerms.java index 01902f9449bae..3d2bbb93c889a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -72,8 +72,13 @@ private Bucket( } } - public UnmappedTerms(String name, BucketOrder order, int requiredSize, long minDocCount, Map<String, Object> metadata) { - super(name, order, order, requiredSize, minDocCount, metadata); + public UnmappedTerms( + String name, + BucketOrder order, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + Map<String, Object> metadata + ) { + super(name, order, order, bucketCountThresholds, metadata); } /** @@ -100,7 +105,7 @@ public String getType() { @Override public UnmappedTerms create(List<Bucket> buckets) { - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, bucketCountThresholds, metadata); } @Override @@ -120,7 +125,7 @@ protected UnmappedTerms create(String name, List<Bucket> buckets, BucketOrder re @Override public InternalAggregation reduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) { - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, bucketCountThresholds, metadata); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTerms.java index db05ac84b4aec..edeec00d366fd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTerms.java @@ -121,29 +121,27 @@ public UnsignedLongTerms( String name, BucketOrder reduceOrder, BucketOrder order, - int requiredSize, - long minDocCount, Map<String, Object> metadata, DocValueFormat format, int shardSize, boolean showTermDocCountError, long otherDocCount, List<Bucket> buckets, - long docCountError + long docCountError, + TermsAggregator.BucketCountThresholds bucketCountThresholds ) { super( name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -165,15 +163,14 @@ public UnsignedLongTerms create(List<Bucket> buckets) { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -195,15 +192,14 @@ protected UnsignedLongTerms create(String name, List<Bucket> buckets, BucketOrde name, reduceOrder, order, - requiredSize, - minDocCount, getMetadata(), format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -272,15 +268,14 @@ static DoubleTerms convertUnsignedLongTermsToDouble(UnsignedLongTerms unsignedLo unsignedLongTerms.getName(), unsignedLongTerms.reduceOrder, unsignedLongTerms.order, - unsignedLongTerms.requiredSize, - unsignedLongTerms.minDocCount, unsignedLongTerms.metadata, unsignedLongTerms.format, unsignedLongTerms.shardSize, unsignedLongTerms.showTermDocCountError, unsignedLongTerms.otherDocCount, newBuckets, - unsignedLongTerms.docCountError + unsignedLongTerms.docCountError, + unsignedLongTerms.bucketCountThresholds ); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/JLHScore.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/JLHScore.java index fb5d39cbe0e58..aee00a1e422c9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/JLHScore.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/heuristic/JLHScore.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.bucket.terms.heuristic; -import java.io.IOException; - import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; +import java.io.IOException; + /** * JLHScore heuristic for significant terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHDRPercentilesAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHDRPercentilesAggregator.java index 8bf0158298395..e2edfd32f7ff8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHDRPercentilesAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHDRPercentilesAggregator.java @@ -32,14 +32,12 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.ArrayUtils; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; - import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -51,6 +49,8 @@ import java.io.IOException; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + /** * Base aggregator for HDR percentiles agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java index 6b998fc86361d..902e4d69ed5fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java @@ -34,9 +34,9 @@ /** * Hyperloglog counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * <a href="http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf">40671.pdf</a> and its + * <a href="https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen">appendix</a> + * <p> * Trying to understand what this class does without having read the paper is considered adventurous. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index f64db066f5a6d..e74179b403e8e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.lease.Releasable; import java.io.IOException; import java.util.HashMap; @@ -46,7 +46,6 @@ /** * Base class for HLL++ algorithms. - * * It contains methods for cloning and serializing the data structure. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 1e36ed0bd9be9..6f50d791594ff 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; @@ -47,6 +46,8 @@ import java.util.Objects; import java.util.zip.DataFormatException; +import org.HdrHistogram.DoubleHistogram; + /** * Base implementation of HDR percentiles * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java index 3f5f524c9c2f5..7c00b25ae365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java @@ -36,11 +36,11 @@ /** * Linear counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * <a href="http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf">40671.pdf</a> and its + * <a href="https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen">appendix</a> + * <p> * Trying to understand what this class does without having read the paper is considered adventurous. - * + * <p> * The algorithm just keep a record of all distinct values provided encoded as an integer. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index f3ea15addd1cd..fa8830de5dab9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.metrics; import org.opensearch.common.Nullable; -import org.opensearch.core.ParseField; import org.opensearch.common.TriFunction; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -73,7 +73,7 @@ public static <T extends AbstractPercentilesAggregationBuilder<T>> ConstructingO ParseField valuesField ) { - /** + /* * This is a non-ideal ConstructingObjectParser, because it is a compromise between Percentiles and Ranks. * Ranks requires an array of values because there is no sane default, and we want to keep that in the ctor. * Percentiles has defaults, which means the API allows the user to either use the default or configure @@ -86,6 +86,7 @@ public static <T extends AbstractPercentilesAggregationBuilder<T>> ConstructingO * out the behavior from there * * `args` are provided from the ConstructingObjectParser in-order they are defined in the parser. So: + * * - args[0]: values * - args[1]: tdigest config options * - args[2]: hdr config options @@ -197,7 +198,7 @@ public boolean keyed() { /** * Expert: set the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + * <p> * Deprecated: set numberOfSignificantValueDigits by configuring a {@link PercentilesConfig.Hdr} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -217,7 +218,7 @@ public T numberOfSignificantValueDigits(int numberOfSignificantValueDigits) { /** * Expert: get the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + * <p> * Deprecated: get numberOfSignificantValueDigits by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -232,7 +233,7 @@ public int numberOfSignificantValueDigits() { /** * Expert: set the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + * <p> * Deprecated: set compression by configuring a {@link PercentilesConfig.TDigest} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -249,7 +250,7 @@ public T compression(double compression) { /** * Expert: get the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + * <p> * Deprecated: get compression by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -317,15 +318,15 @@ public T percentilesConfig(PercentilesConfig percentilesConfig) { /** * Return the current algo configuration, or a default (Tdigest) otherwise - * + * <p> * This is needed because builders don't have a "build" or "finalize" method, but * the old API did bake in defaults. Certain operations like xcontent, equals, hashcode * will use the values in the builder at any time and need to be aware of defaults. - * + * <p> * But to maintain BWC behavior as much as possible, we allow the user to set * algo settings independent of method. To keep life simple we use a null to track * if any method has been selected yet. - * + * <p> * However, this means we need a way to fetch the default if the user hasn't * selected any method and uses a builder-side feature like xcontent */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java index dcc07a45d0528..8c79a80a26b9e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractTDigestPercentilesAggregator.java @@ -34,10 +34,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.ArrayUtils; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java index 0ba66ccd880d4..e58466b56df2a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java @@ -33,10 +33,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java index 75419b7c64b12..0a09fae1eaebe 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(AvgAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java index faae96b957b2d..99c4eaac4b777 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregator.java @@ -41,13 +41,13 @@ import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.common.Nullable; import org.opensearch.common.hash.MurmurHash3; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.BitMixer; import org.opensearch.common.util.LongArray; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 47084436d3d4f..980667b45324e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -89,6 +89,11 @@ protected Aggregator doCreateInternal( .build(name, config, precision(), searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private int precision() { return precisionThreshold == null ? HyperLogLogPlusPlus.DEFAULT_PRECISION diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregator.java index dba6cad69bec8..a187e65511864 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregator.java @@ -33,11 +33,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.LongArray; import org.opensearch.core.ParseField; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java index 20203b22b2459..99b3d09517a1f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java @@ -94,4 +94,9 @@ protected Aggregator doCreateInternal( .getAggregator(ExtendedStatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, sigma, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregator.java index 39790357ff9ca..656211608433a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregator.java @@ -34,10 +34,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java index 1d450eeae98d8..a3fc91c6b62fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java @@ -81,6 +81,11 @@ protected Aggregator doCreateInternal( .build(name, config, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoCentroidAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoCentroidAggregator::new, true); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java index 8dea6c3a4b649..bd1b3ae20f3f7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregator.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; @@ -41,6 +40,8 @@ import java.io.IOException; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + /** * Aggregate all docs into an HDR percentile ranks values * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregator.java index e8b1e73db689e..e468811af5996 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregator.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.InternalAggregation; @@ -41,6 +40,8 @@ import java.io.IOException; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + /** * Aggregate all docs into a single HDR percentile * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 1bd56be601a89..7ab35eaed785c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -34,13 +34,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.packed.PackedInts; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; -import org.opensearch.core.common.util.ByteArray; import org.opensearch.common.util.ByteUtils; import org.opensearch.common.util.IntArray; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.util.ByteArray; import java.nio.ByteBuffer; import java.nio.ByteOrder; @@ -49,16 +49,16 @@ * Hyperloglog++ counter, implemented based on pseudo code from * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * <p> * This implementation is different from the original implementation in that it uses a hash table instead of a sorted list for linear * counting. Although this requires more space and makes hyperloglog (which is less accurate) used sooner, this is also considerably faster. - * + * <p> * Trying to understand what this class does without having read the paper is considered adventurous. - * + * <p> * The HyperLogLogPlusPlus contains two algorithms, one for linear counting and the HyperLogLog algorithm. Initially hashes added to the * data structure are processed using the linear counting until a threshold defined by the precision is reached where the data is replayed * to the HyperLogLog algorithm and then this is used. - * + * <p> * It supports storing several HyperLogLogPlusPlus structures which are identified by a bucket number. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java index 62ebe0974b955..558e9df93c804 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java @@ -32,15 +32,15 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.IntArray; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.IntArray; /** * AbstractHyperLogLogPlusPlus instance that only supports linear counting. The maximum number of hashes supported * by the structure is determined at construction time. - * + * <p> * This structure expects all the added values to be distinct and therefore there are no checks * if an element has been previously added. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java index ddb7287cd89a6..7e9511ffdd379 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalCardinality.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java index dd95e2ac55277..f1016d4d48b62 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -33,8 +33,8 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentileRanks.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentileRanks.java index 304bd95e03bc9..5517a035fc2e1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentileRanks.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentileRanks.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; @@ -39,6 +38,8 @@ import java.util.Iterator; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + /** * Implementation of HDR percentiles ranks * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentiles.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentiles.java index b28d207cc192f..db5a7061af77f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentiles.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentiles.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; @@ -39,6 +38,8 @@ import java.util.Iterator; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + /** * Implementation of HDR percentiles agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java index 6150940512406..fbcf4a4d48603 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.script.Script; import org.opensearch.script.ScriptedMetricAggContexts; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalTopHits.java index 39055e06f9be8..e82f49aa13600 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalTopHits.java @@ -37,10 +37,10 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits.Relation; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index aac16ca3f46cf..8108b8a726856 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -37,9 +37,9 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.Bits; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java index 96f1af94f2d07..4fe936c8b7797 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MaxAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java index fbea8a192fa9a..0b4684452a8dc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregator.java @@ -35,9 +35,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; import org.opensearch.common.Nullable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.ObjectArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java index 9776595d5a76d..3ef3c2afc7875 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( .getAggregator(MedianAbsoluteDeviationAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), config.format(), searchContext, parent, metadata, compression); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index 6c81879fd2b8e..946057e42ac88 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -37,9 +37,9 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.Bits; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java index b117f70c81baf..58fbe5edefd12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MinAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java index 19352d30a5177..d3c18bcad1a43 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java @@ -111,4 +111,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentileRanksAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java index e249863e25313..148e26e038923 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java @@ -103,4 +103,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentilesAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregator.java index f60f402939462..8e04bbae41107 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregator.java @@ -36,10 +36,10 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.CollectionUtils; -import org.opensearch.common.util.ObjectArray; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.ObjectArray; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.script.Script; import org.opensearch.script.ScriptedMetricAggContexts; import org.opensearch.script.ScriptedMetricAggContexts.MapScript; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 5c831d60f75a8..58ef54ed64482 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -124,6 +124,11 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private static Script deepCopyScript(Script script, SearchContext context, Map<String, Object> aggParams) { if (script != null) { Map<String, Object> params = mergeParams(aggParams, deepCopyParams(script.getParams(), context)); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregator.java index d3636fffb5256..c52638fe4b9c6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregator.java @@ -33,10 +33,10 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java index 0c10df174efa0..0e96e631044dd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(StatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java index 097c0658067d2..4b8e882cd69bc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java @@ -33,9 +33,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java index b3506ff958833..ef9b93920ba18 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(SumAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TDigestState.java index f0a4236c90cd4..b61bbcfe1cbbf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TDigestState.java @@ -31,14 +31,15 @@ package org.opensearch.search.aggregations.metrics; -import com.tdunning.math.stats.AVLTreeDigest; -import com.tdunning.math.stats.Centroid; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; import java.util.Iterator; +import com.tdunning.math.stats.AVLTreeDigest; +import com.tdunning.math.stats.Centroid; + /** * Extension of {@link com.tdunning.math.stats.TDigest} with custom serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 0d12a9deb8f55..d49d0ad311f63 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -34,9 +34,9 @@ import org.opensearch.common.Nullable; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java index ee3cd2963c334..2d78360f20bc6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregator.java @@ -48,10 +48,10 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; import org.opensearch.action.search.MaxScoreCollector; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.LongObjectPagedHashMap; -import org.opensearch.common.lease.Releasables; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index e312983cd6d24..ba371327c6893 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -155,4 +155,8 @@ public Aggregator createInternal( return new TopHitsAggregator(searchContext.fetchPhase(), subSearchContext, name, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index 7e526add36ba7..6f9be06231819 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.aggregations.Aggregator; @@ -52,7 +52,7 @@ /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. - * + * <p> * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the * counts for all buckets owned by the parent aggregator) * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index feed42e911856..4a04dd2e0a932 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -84,4 +84,9 @@ protected Aggregator doCreateInternal( .getAggregator(ValueCountAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregator.java index b0cfafc8366f7..d85ee651cb5e0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregator.java @@ -33,9 +33,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; -import org.opensearch.common.lease.Releasables; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.AggregationExecutionException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java index 9a27e9801d5fe..111245cae99e5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( public String getStatsSubtype() { return configs.get(VALUE_FIELD.getPreferredName()).valueSourceType().typeName(); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java index efdf9b46a0c5f..c7f2a29793bff 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; @@ -64,7 +64,7 @@ public class BucketHelpers { * a date_histogram might have empty buckets due to no data existing for that time interval. * This can cause problems for operations like a derivative, which relies on a continuous * function. - * + * <p> * "insert_zeros": empty buckets will be filled with zeros for all metrics * "skip": empty buckets will simply be ignored * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index b35f7493691a9..e3996cd46b778 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import org.opensearch.common.Rounding; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Rounding; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/HoltWintersModel.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/HoltWintersModel.java index 7c48e319b76bf..fa0f889a967f4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/HoltWintersModel.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/HoltWintersModel.java @@ -34,10 +34,10 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.Nullable; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 753299f49730f..0473d724bee20 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -37,8 +37,8 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; -import org.opensearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.opensearch.search.aggregations.metrics.InternalMax; +import org.opensearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.opensearch.search.aggregations.metrics.Percentile; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index 1b023bc6efc94..296a51f1ec248 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -32,28 +32,28 @@ package org.opensearch.search.aggregations.pipeline; -import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH; -import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT; -import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.GAP_POLICY; - -import java.io.IOException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; - +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.xcontent.ParseFieldRegistry; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.xcontent.ParseFieldRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.BUCKETS_PATH; +import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.FORMAT; +import static org.opensearch.search.aggregations.pipeline.PipelineAggregator.Parser.GAP_POLICY; + /** * Aggregation Builder for moving_avg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 1e569b5caa020..d452da239d396 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -33,9 +33,9 @@ package org.opensearch.search.aggregations.pipeline; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index a4c3c14f3365f..3e97eda693b91 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -52,18 +52,18 @@ /** * This pipeline aggregation gives the user the ability to script functions that "move" across a window * of data, instead of single data points. It is the scripted version of MovingAvg pipeline agg. - * + * <p> * Through custom script contexts, we expose a number of convenience methods: - * - * - max - * - min - * - sum - * - unweightedAvg - * - linearWeightedAvg - * - ewma - * - holt - * - holtWintersMovAvg - * + * <ul> + * <li>max</li> + * <li> min</li> + * <li> sum</li> + * <li> unweightedAvg</li> + * <li> linearWeightedAvg</li> + * <li> ewma</li> + * <li> holt</li> + * <li> holtWintersMovAvg</li> + * </ul> * The user can also define any arbitrary logic via their own scripting, or combine with the above methods. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java index bac486576f537..051b9c43f63f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java @@ -75,7 +75,7 @@ public static double sum(double[] values) { /** * Calculate a simple unweighted (arithmetic) moving average. - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -94,7 +94,7 @@ public static double unweightedAvg(double[] values) { /** * Calculate a standard deviation over the values using the provided average. - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -118,7 +118,7 @@ public static double stdDev(double[] values, double avg) { /** * Calculate a linearly weighted moving average, such that older values are * linearly less important. "Time" is determined by position in collection - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -141,11 +141,11 @@ public static double linearWeightedAvg(double[] values) { /** * * Calculate a exponentially weighted moving average. - * + * <p> * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -171,13 +171,13 @@ public static double ewma(double[] values, double alpha) { /** * Calculate a doubly exponential weighted moving average - * + * <p> * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + * <p> * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -241,14 +241,14 @@ public static double[] holtForecast(double[] values, double alpha, double beta, /** * Calculate a triple exponential weighted moving average - * + * <p> * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + * <p> * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data. * Gamma is equivalent to alpha, but controls the smoothing of the seasonality instead of the data - * + * <p> * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java index 859eba1f1aaff..151051a21f610 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.pipeline; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.aggregations.InternalAggregation; @@ -48,8 +49,9 @@ /** * Base aggregator for pipline aggs * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class PipelineAggregator { /** * Parse the {@link PipelineAggregationBuilder} from a {@link XContentParser}. @@ -80,8 +82,9 @@ public interface Parser { * Tree of {@link PipelineAggregator}s to modify a tree of aggregations * after their final reduction. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class PipelineTree { /** * An empty tree of {@link PipelineAggregator}s. diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java index a61a866228161..8427346357b0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java @@ -36,11 +36,11 @@ /** * A cost minimizer which will fit a MovAvgModel to the data. - * + * <p> * This optimizer uses naive simulated annealing. Random solutions in the problem space * are generated, compared against the last period of data, and the least absolute deviation * is recorded as a cost. - * + * <p> * If the new cost is better than the old cost, the new coefficients are chosen. If the new * solution is worse, there is a temperature-dependent probability it will be randomly selected * anyway. This allows the algo to sample the problem space widely. As iterations progress, @@ -114,7 +114,7 @@ private static double acceptanceProbability(double oldCost, double newCost, doub /** * Calculates the "cost" of a model. E.g. when run on the training data, how closely do the predictions * match the test data - * + * <p> * Uses Least Absolute Differences to calculate error. Note that this is not scale free, but seems * to work fairly well in practice * diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInfo.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInfo.java index 6801aacf095c6..c0aebb49af5ec 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInfo.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInfo.java @@ -32,10 +32,11 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.Collections; @@ -49,8 +50,9 @@ /** * Data describing an agg * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregationInfo implements ReportingService.Info { private final Map<String, Set<String>> aggs; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index b4da1d10b4b68..a7de47bed2e6e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -36,10 +36,10 @@ import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.bucket.filter.InternalFilters; import org.opensearch.search.aggregations.bucket.global.InternalGlobal; -import org.opensearch.search.aggregations.bucket.histogram.InternalVariableWidthHistogram; import org.opensearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.opensearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.opensearch.search.aggregations.bucket.histogram.InternalVariableWidthHistogram; import org.opensearch.search.aggregations.bucket.missing.InternalMissing; import org.opensearch.search.aggregations.bucket.nested.InternalNested; import org.opensearch.search.aggregations.bucket.nested.InternalReverseNested; @@ -78,9 +78,9 @@ * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" * or not. This can be difficult to determine from an external perspective because each agg uses * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). - * + * <p> * This set of helpers aim to ease that task by codifying what "empty" is for each agg. - * + * <p> * It is not entirely accurate for all aggs, since some do not expose or track the needed state * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty * or because of summing to zero). Pipeline aggs in particular are not well supported diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java index 4cfd2070211d7..78180ff08fea9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationPath.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.search.aggregations.AggregationExecutionException; import org.opensearch.search.aggregations.Aggregator; @@ -131,8 +132,9 @@ public static AggregationPath parse(String path) { /** * Element in an agg path * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class PathElement { private final String fullName; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java index eba64998014e2..41f03bd4da9e2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationUsageService.java @@ -32,7 +32,8 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.node.ReportingService; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.service.ReportingService; import java.util.HashMap; import java.util.Map; @@ -41,8 +42,9 @@ /** * Service to track telemetry about aggregations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregationUsageService implements ReportingService<AggregationInfo> { private final Map<String, Map<String, LongAdder>> aggs; private final AggregationInfo info; @@ -52,8 +54,9 @@ public class AggregationUsageService implements ReportingService<AggregationInfo /** * Builder for the Agg usage service to track telemetry * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final Map<String, Map<String, LongAdder>> aggs; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java index 4a924cbd8305d..f1c647ae47e88 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -8,13 +8,13 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.common.TriConsumer; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -147,7 +147,7 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } abstract void doXContentBody(XContentBuilder builder, Params params) throws IOException; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java index 224f9281705e1..f6d6fe28a56d3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java @@ -274,7 +274,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, * MappedFieldType, it prefers to get the formatter from there. Only when a field can't be * resolved (which is to say script cases and unmapped field cases), it will fall back to calling this method on whatever * ValuesSourceType it was able to resolve to. - * + * <p> * For geoshape field we may never hit this function till we have aggregations which are only geo_shape * specific and not present on geo_points, as we use default CoreValueSource types for Geo based aggregations * as GEOPOINT @@ -411,7 +411,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, @Override public DocValueFormat getFormatter(String format, ZoneId tz) { return new DocValueFormat.DateTime( - format == null ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER : DateFormatter.forPattern(format), + format == null ? DateFieldMapper.getDefaultDateTimeFormatter() : DateFormatter.forPattern(format), tz == null ? ZoneOffset.UTC : tz, // If we were just looking at fields, we could read the resolution from the field settings, but we need to deal with script // output, which has no way to indicate the resolution, so we need to default to something. Milliseconds is the standard. diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/opensearch/search/aggregations/support/FieldContext.java index a6207917d52ea..ec8240a5b6b94 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/FieldContext.java @@ -31,6 +31,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.mapper.MappedFieldType; @@ -38,8 +39,9 @@ * Used by all field data based aggregators. This determine the context of the field data the aggregators are operating * in. It holds both the field names and the index field datas that are associated with them. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FieldContext { private final String field; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index da1d9961ed81b..d21737a8366b2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -227,6 +227,10 @@ public String toString() { return "anon SortedNumericDoubleValues of [" + super.toString() + "]"; } + @Override + public int advance(int target) throws IOException { + return values.advance(target); + } }; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java index a15608b85ad22..ab00dae80c4c5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfig.java @@ -9,9 +9,9 @@ package org.opensearch.search.aggregations.support; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index e3f914ca259f6..c866238d12fcb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Similar to {@link ValuesSourceAggregationBuilder}, except it references multiple ValuesSources (e.g. so that an aggregation * can pull values from multiple fields). - * + * <p> * A limitation of this class is that all the ValuesSource's being refereenced must be of the same type. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 9418823bab1b3..dc8561e9967af 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -32,11 +32,11 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.core.ParseField; import org.opensearch.common.TriFunction; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.AbstractQueryBuilder; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java index 59fa2e03f0bc3..ad2ae2a41eec9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * Enum to signal what kind of value type is used in the aggregator * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ValueType implements Writeable { STRING((byte) 1, "string", "string", CoreValuesSourceType.BYTES, DocValueFormat.RAW), @@ -61,7 +63,7 @@ public enum ValueType implements Writeable { "date", "date", CoreValuesSourceType.DATE, - new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) + new DocValueFormat.DateTime(DateFieldMapper.getDefaultDateTimeFormatter(), ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) ), IP((byte) 6, "ip", "ip", CoreValuesSourceType.IP, DocValueFormat.IP), // TODO: what is the difference between "number" and "numeric"? diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index e53118669385b..1f4dd429e094e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -43,8 +43,9 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.Prepared; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.ScorerAware; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.fielddata.AbstractSortingNumericDocValues; import org.opensearch.index.fielddata.DocValueBits; import org.opensearch.index.fielddata.GeoShapeValue; @@ -75,8 +76,9 @@ /** * Base class for a ValuesSource; the primitive data for an agg * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ValuesSource { /** @@ -242,6 +244,10 @@ public FieldData(IndexOrdinalsFieldData indexFieldData) { this.indexFieldData = indexFieldData; } + public String getIndexFieldName() { + return this.indexFieldData.getFieldName(); + } + @Override public SortedBinaryDocValues bytesValues(LeafReaderContext context) { final LeafOrdinalsFieldData atomicFieldData = indexFieldData.load(context); @@ -574,6 +580,11 @@ public boolean advanceExact(int target) throws IOException { } return false; } + + @Override + public int advance(int target) throws IOException { + return doubleValues.advance(target); + } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 70382369d5615..7a73fafb4a809 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -264,7 +264,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * DO NOT OVERRIDE THIS! - * + * <p> * This method only exists for legacy support. No new aggregations need this, nor should they override it. * * @param version For backwards compatibility, subclasses can change behavior based on the version diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java index b5bd355bb5847..d006b15df327c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceConfig.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexGeoPointFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; @@ -50,8 +51,9 @@ * A configuration that tells aggregations how to retrieve data from the index * in order to run a specific aggregation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValuesSourceConfig { /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceRegistry.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceRegistry.java index 0cb5a12bac4af..9f084833f8093 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceRegistry.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceRegistry.java @@ -31,6 +31,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.SearchModule; import org.opensearch.search.aggregations.AggregationExecutionException; @@ -49,15 +50,17 @@ * directly instantiate this class, instead get an already-configured copy from {@link QueryShardContext#getValuesSourceRegistry()}, or (in * the case of some test scenarios only) directly from {@link SearchModule#getValuesSourceRegistry()} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ValuesSourceRegistry { /** * The registry key for the values source registry key * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class RegistryKey<T> { private final String name; private final Class<T> supplierType; @@ -90,8 +93,9 @@ public int hashCode() { /** * Builder for the values source registry * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private final AggregationUsageService.Builder usageServiceBuilder; private Map<RegistryKey<?>, List<Map.Entry<ValuesSourceType, ?>>> aggregatorRegistry = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java index 86102e63297d1..ff81c08193a0b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.script.AggregationScript; import org.opensearch.search.DocValueFormat; @@ -43,14 +44,14 @@ * {@link ValuesSourceType} represents a collection of fields that share a common set of operations, for example all numeric fields. * Aggregations declare their support for a given ValuesSourceType (via {@link ValuesSourceRegistry.Builder#register}), * and should then not need to care about the fields which use that ValuesSourceType. - * + * <p> * ValuesSourceTypes provide a set of methods to instantiate concrete {@link ValuesSource} instances, based on the actual source of the * data for the aggregations. In general, aggregations should not call these methods, but rather rely on {@link ValuesSourceConfig} to have * selected the correct implementation. - * + * <p> * ValuesSourceTypes should be stateless. We recommend that plugins define an enum for their ValuesSourceTypes, even if the plugin only * intends to define one ValuesSourceType. ValuesSourceTypes are not serialized as part of the aggregations framework. - * + * <p> * Prefer reusing an existing ValuesSourceType (ideally from {@link CoreValuesSourceType}) over creating a new type. There are some cases * where creating a new type is necessary however. In particular, consider a new ValuesSourceType if the field has custom encoding/decoding * requirements; if the field needs to expose additional information to the aggregation (e.g. {@link ValuesSource.Range#rangeType()}); or @@ -58,8 +59,9 @@ * a sum aggregation). When adding a new ValuesSourceType, new aggregators should be added and registered at the same time, to add support * for the new type to existing aggregations, as appropriate. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ValuesSourceType { /** * Called when an aggregation is operating over a known empty set (usually because the field isn't specified), this method allows for diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java index e53d9b83a814c..349bd8e14edf6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptBytesValues.java @@ -33,7 +33,7 @@ import org.apache.lucene.search.Scorable; import org.opensearch.common.lucene.ScorerAware; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortingBinaryDocValues; import org.opensearch.script.AggregationScript; diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptDoubleValues.java index 22d6fc707bec3..9b73f1b2155fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -32,12 +32,11 @@ package org.opensearch.search.aggregations.support.values; import org.apache.lucene.search.Scorable; -import org.joda.time.ReadableInstant; import org.opensearch.common.lucene.ScorerAware; import org.opensearch.index.fielddata.SortingNumericDoubleValues; import org.opensearch.script.AggregationScript; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.opensearch.search.aggregations.AggregationExecutionException; +import org.joda.time.ReadableInstant; import java.io.IOException; import java.lang.reflect.Array; @@ -111,8 +110,6 @@ private static double toDoubleValue(Object o) { return ((ReadableInstant) o).getMillis(); } else if (o instanceof ZonedDateTime) { return ((ZonedDateTime) o).toInstant().toEpochMilli(); - } else if (o instanceof JodaCompatibleZonedDateTime) { - return ((JodaCompatibleZonedDateTime) o).toInstant().toEpochMilli(); } else if (o instanceof Boolean) { // We do expose boolean fields as boolean in scripts, however aggregations still expect // that scripts return the same internal representation as regular fields, so boolean diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptLongValues.java index a14f6165b9ac2..b62178f85273a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/values/ScriptLongValues.java @@ -33,12 +33,11 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.util.LongValues; -import org.joda.time.ReadableInstant; import org.opensearch.common.lucene.ScorerAware; import org.opensearch.index.fielddata.AbstractSortingNumericDocValues; import org.opensearch.script.AggregationScript; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.opensearch.search.aggregations.AggregationExecutionException; +import org.joda.time.ReadableInstant; import java.io.IOException; import java.lang.reflect.Array; @@ -110,8 +109,6 @@ private static long toLongValue(Object o) { return ((ReadableInstant) o).getMillis(); } else if (o instanceof ZonedDateTime) { return ((ZonedDateTime) o).toInstant().toEpochMilli(); - } else if (o instanceof JodaCompatibleZonedDateTime) { - return ((JodaCompatibleZonedDateTime) o).toInstant().toEpochMilli(); } else if (o instanceof Boolean) { // We do expose boolean fields as boolean in scripts, however aggregations still expect // that scripts return the same internal representation as regular fields, so boolean diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 246078e7a8eda..ebf9623eb367a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -13,7 +13,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchTask; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.monitor.jvm.JvmStats; @@ -255,8 +255,8 @@ boolean isNodeInDuress() { return isNodeInDuress; } - /** - * Returns true if the increase in heap usage is due to search requests. + /* + Returns true if the increase in heap usage is due to search requests. */ /** @@ -399,6 +399,7 @@ public SearchBackpressureStats nodeStats() { SearchTaskStats searchTaskStats = new SearchTaskStats( searchBackpressureStates.get(SearchTask.class).getCancellationCount(), searchBackpressureStates.get(SearchTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchTask.class).getCompletionCount(), taskTrackers.get(SearchTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchTasks))) @@ -407,6 +408,7 @@ public SearchBackpressureStats nodeStats() { SearchShardTaskStats searchShardTaskStats = new SearchShardTaskStats( searchBackpressureStates.get(SearchShardTask.class).getCancellationCount(), searchBackpressureStates.get(SearchShardTask.class).getLimitReachedCount(), + searchBackpressureStates.get(SearchShardTask.class).getCompletionCount(), taskTrackers.get(SearchShardTask.class) .stream() .collect(Collectors.toUnmodifiableMap(t -> TaskResourceUsageTrackerType.fromName(t.name()), t -> t.stats(searchShardTasks))) diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index d20e3e50d419f..79494eb0d3c24 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -54,8 +54,8 @@ private static class Defaults { /** * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. - * - * The setting below is deprecated. + * <p> + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting<Double> SETTING_CANCELLATION_RATIO = Setting.doubleSetting( @@ -71,8 +71,8 @@ private static class Defaults { /** * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. - * - * The setting below is deprecated. + * <p> + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting<Double> SETTING_CANCELLATION_RATE = Setting.doubleSetting( @@ -86,8 +86,8 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. - * - * The setting below is deprecated. + * <p> + * The setting below is deprecated. The new setting is in {@link SearchShardTaskSettings}. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ public static final Setting<Double> SETTING_CANCELLATION_BURST = Setting.doubleSetting( diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java index 678c19d83fb96..ffe97d125b27a 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchShardTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -30,21 +31,29 @@ public class SearchShardTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> resourceUsageTrackerStats; public SearchShardTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchShardTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + completionCount = in.readVLong(); + } else { + completionCount = -1; + } MapBuilder<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -62,6 +71,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -75,6 +87,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -88,11 +103,12 @@ public boolean equals(Object o) { SearchShardTaskStats that = (SearchShardTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java index 302350104bd3a..a7f9b4e3d004f 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java +++ b/server/src/main/java/org/opensearch/search/backpressure/stats/SearchTaskStats.java @@ -8,6 +8,7 @@ package org.opensearch.search.backpressure.stats; +import org.opensearch.Version; import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -31,21 +32,29 @@ public class SearchTaskStats implements ToXContentObject, Writeable { private final long cancellationCount; private final long limitReachedCount; + private final long completionCount; private final Map<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> resourceUsageTrackerStats; public SearchTaskStats( long cancellationCount, long limitReachedCount, + long completionCount, Map<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> resourceUsageTrackerStats ) { this.cancellationCount = cancellationCount; this.limitReachedCount = limitReachedCount; + this.completionCount = completionCount; this.resourceUsageTrackerStats = resourceUsageTrackerStats; } public SearchTaskStats(StreamInput in) throws IOException { this.cancellationCount = in.readVLong(); this.limitReachedCount = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.completionCount = in.readVLong(); + } else { + this.completionCount = -1; + } MapBuilder<TaskResourceUsageTrackerType, TaskResourceUsageTracker.Stats> builder = new MapBuilder<>(); builder.put(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, in.readOptionalWriteable(CpuUsageTracker.Stats::new)); @@ -63,6 +72,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(entry.getKey().getName(), entry.getValue()); } builder.endObject(); + if (completionCount != -1) { + builder.field("completion_count", completionCount); + } builder.startObject("cancellation_stats") .field("cancellation_count", cancellationCount) @@ -76,6 +88,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cancellationCount); out.writeVLong(limitReachedCount); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(completionCount); + } out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER)); out.writeOptionalWriteable(resourceUsageTrackerStats.get(TaskResourceUsageTrackerType.HEAP_USAGE_TRACKER)); @@ -89,11 +104,12 @@ public boolean equals(Object o) { SearchTaskStats that = (SearchTaskStats) o; return cancellationCount == that.cancellationCount && limitReachedCount == that.limitReachedCount + && completionCount == that.completionCount && resourceUsageTrackerStats.equals(that.resourceUsageTrackerStats); } @Override public int hashCode() { - return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats); + return Objects.hash(cancellationCount, limitReachedCount, resourceUsageTrackerStats, completionCount); } } diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java index a5f7d7061dbac..507953cb4a20e 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/CpuUsageTracker.java @@ -8,9 +8,9 @@ package org.opensearch.search.backpressure.trackers; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancellation; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java index 666cb7d605017..f1e8abe7e3230 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/ElapsedTimeTracker.java @@ -8,9 +8,9 @@ package org.opensearch.search.backpressure.trackers; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancellation; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java index fbd2155c0ef89..56b9f947f6e37 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/HeapUsageTracker.java @@ -11,13 +11,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.MovingAverage; -import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.monitor.jvm.JvmStats; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskCancellation; diff --git a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java index 8f772c91750ab..ce15e9e9b6622 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java +++ b/server/src/main/java/org/opensearch/search/backpressure/trackers/TaskResourceUsageTracker.java @@ -10,8 +10,8 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.tasks.TaskCancellation; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskCancellation; import java.util.List; import java.util.Optional; diff --git a/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java b/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java index 26a7738177759..20bdb71fd1923 100644 --- a/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/PointInTimeBuilder.java @@ -33,11 +33,12 @@ package org.opensearch.search.builder; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,8 +51,9 @@ * A search request with a point in time will execute using the reader contexts associated with that point time * instead of the latest reader contexts. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.3.0") public final class PointInTimeBuilder implements Writeable, ToXContentObject { private static final ParseField ID_FIELD = new ParseField("id"); private static final ParseField KEEP_ALIVE_FIELD = new ParseField("keep_alive"); diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 78cb895a0a4c0..1a5a9dc6d1f03 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -36,19 +36,20 @@ import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.Strings; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; @@ -91,8 +92,9 @@ * * @see org.opensearch.action.search.SearchRequest#source(SearchSourceBuilder) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable<SearchSourceBuilder> { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SearchSourceBuilder.class); @@ -115,6 +117,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField IGNORE_FAILURE_FIELD = new ParseField("ignore_failure"); public static final ParseField SORT_FIELD = new ParseField("sort"); public static final ParseField TRACK_SCORES_FIELD = new ParseField("track_scores"); + public static final ParseField INCLUDE_NAMED_QUERIES_SCORE = new ParseField("include_named_queries_score"); public static final ParseField TRACK_TOTAL_HITS_FIELD = new ParseField("track_total_hits"); public static final ParseField INDICES_BOOST_FIELD = new ParseField("indices_boost"); public static final ParseField AGGREGATIONS_FIELD = new ParseField("aggregations"); @@ -173,6 +176,8 @@ public static HighlightBuilder highlight() { private boolean trackScores = false; + private Boolean includeNamedQueriesScore; + private Integer trackTotalHitsUpTo; private SearchAfterBuilder searchAfterBuilder; @@ -274,6 +279,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchPipelineSource = in.readMap(); } } + if (in.getVersion().onOrAfter(Version.V_2_13_0)) { + includeNamedQueriesScore = in.readOptionalBoolean(); + } } @Override @@ -339,6 +347,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(searchPipelineSource); } } + if (out.getVersion().onOrAfter(Version.V_2_13_0)) { + out.writeOptionalBoolean(includeNamedQueriesScore); + } } /** @@ -566,6 +577,22 @@ public SearchSourceBuilder trackScores(boolean trackScores) { return this; } + /** + * Applies when there are named queries, to return the scores along as well + * Defaults to {@code false}. + */ + public SearchSourceBuilder includeNamedQueriesScores(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + /** + * Indicates whether scores will be returned as part of every search matched query.s + */ + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore != null && includeNamedQueriesScore; + } + /** * Indicates whether scores will be tracked for this request. */ @@ -1101,6 +1128,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.terminateAfter = terminateAfter; rewrittenBuilder.timeout = timeout; rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.includeNamedQueriesScore = includeNamedQueriesScore; rewrittenBuilder.trackTotalHitsUpTo = trackTotalHitsUpTo; rewrittenBuilder.version = version; rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; @@ -1153,6 +1181,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { trackScores = parser.booleanValue(); + } else if (INCLUDE_NAMED_QUERIES_SCORE.match(currentFieldName, parser.getDeprecationHandler())) { + includeNamedQueriesScore = parser.booleanValue(); } else if (TRACK_TOTAL_HITS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_BOOLEAN || (token == XContentParser.Token.VALUE_STRING && Booleans.isBoolean(parser.text()))) { @@ -1416,6 +1446,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(TRACK_SCORES_FIELD.getPreferredName(), true); } + if (includeNamedQueriesScore != null) { + builder.field(INCLUDE_NAMED_QUERIES_SCORE.getPreferredName(), includeNamedQueriesScore); + } + if (trackTotalHitsUpTo != null) { builder.field(TRACK_TOTAL_HITS_FIELD.getPreferredName(), trackTotalHitsUpTo); } @@ -1493,8 +1527,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Boosts on an index * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class IndexBoost implements Writeable, ToXContentObject { private final String index; private final float boost; @@ -1594,8 +1629,9 @@ public boolean equals(Object obj) { /** * Script field * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ScriptField implements Writeable, ToXContentFragment { private final boolean ignoreFailure; @@ -1745,6 +1781,7 @@ public int hashCode() { terminateAfter, timeout, trackScores, + includeNamedQueriesScore, version, seqNoAndPrimaryTerm, profile, @@ -1787,6 +1824,7 @@ public boolean equals(Object obj) { && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) + && Objects.equals(includeNamedQueriesScore, other.includeNamedQueriesScore) && Objects.equals(version, other.version) && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm) && Objects.equals(profile, other.profile) @@ -1803,7 +1841,7 @@ public String toString() { public String toString(Params params) { try { - return XContentHelper.toXContent(this, XContentType.JSON, params, true).utf8ToString(); + return XContentHelper.toXContent(this, MediaTypeRegistry.JSON, params, true).utf8ToString(); } catch (IOException e) { throw new OpenSearchException(e); } diff --git a/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java index 288ca9339f8bd..a9ff55b3a90b1 100644 --- a/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/opensearch/search/collapse/CollapseBuilder.java @@ -31,12 +31,13 @@ package org.opensearch.search.collapse; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; @@ -57,8 +58,9 @@ /** * A builder that enables field collapsing on search request. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollapseBuilder implements Writeable, ToXContentObject { public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField INNER_HITS_FIELD = new ParseField("inner_hits"); diff --git a/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java b/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java index 09a612a15d762..79b90d92f4daa 100644 --- a/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/opensearch/search/collapse/CollapseContext.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; @@ -43,8 +44,9 @@ /** * Context used for field collapsing * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollapseContext { private final String fieldName; private final MappedFieldType fieldType; diff --git a/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java b/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java index 35ab0c0701b4e..61e465e49be53 100644 --- a/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java +++ b/server/src/main/java/org/opensearch/search/dfs/AggregatedDfs.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -47,8 +48,9 @@ /** * Compute global distributed frequency across the index * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregatedDfs implements Writeable { private Map<Term, TermStatistics> termStatistics; diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java index 5094145ea1c6e..b5f6c082a18c5 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsPhase.java @@ -38,9 +38,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; +import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.rescore.RescoreContext; -import org.opensearch.tasks.TaskCancelledException; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index 0209f91f7fa52..2338a47435012 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.SearchPhaseResult; @@ -51,8 +52,9 @@ /** * Result from a Distributed Frequency Search phase * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DfsSearchResult extends SearchPhaseResult { private static final Term[] EMPTY_TERMS = new Term[0]; diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java index 7e36ace9e2112..780a6f35524ea 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchContext.java @@ -188,6 +188,14 @@ public boolean fetchScores() { return searchContext.sort() != null && searchContext.trackScores(); } + public boolean includeNamedQueriesScore() { + return searchContext.includeNamedQueriesScore(); + } + + public boolean hasInnerHits() { + return searchContext.hasInnerHits(); + } + /** * Configuration for returning inner hits */ @@ -209,6 +217,10 @@ public FetchFieldsContext fetchFieldsContext() { return searchContext.fetchFieldsContext(); } + public boolean hasScriptFields() { + return searchContext.hasScriptFields(); + } + /** * Configuration for script fields */ diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index 67c6eeae02271..1698f41caaf2b 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -44,14 +44,16 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.index.SequentialStoredFieldsLeafReader; import org.opensearch.common.lucene.search.Queries; -import org.opensearch.core.common.text.Text; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.IndexSettings; import org.opensearch.index.fieldvisitor.CustomFieldsVisitor; @@ -72,7 +74,6 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; -import org.opensearch.tasks.TaskCancelledException; import java.io.IOException; import java.util.ArrayList; @@ -90,10 +91,11 @@ /** * Fetch phase of a search request, used to fetch the actual top matching documents to be returned to the client, identified - * after reducing all of the matches returned by the query phase + * after reducing all the matches returned by the query phase * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FetchPhase { private static final Logger LOGGER = LogManager.getLogger(FetchPhase.class); diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/opensearch/search/fetch/FetchSearchResult.java index a8560863d8d98..26fa90141c2a9 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchSearchResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.fetch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.search.SearchHit; @@ -46,8 +47,9 @@ /** * Result from a fetch * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FetchSearchResult extends SearchPhaseResult { private SearchHits hits; diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java index fa30b2a5c7450..f3a1d5cafe755 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java @@ -88,7 +88,7 @@ public int docId() { /** * This lookup provides access to the source for the given hit document. Note * that it should always be set to the correct doc ID and {@link LeafReaderContext}. - * + * <p> * In most cases, the hit document's source is loaded eagerly at the start of the * {@link FetchPhase}. This lookup will contain the preloaded source. */ @@ -103,7 +103,7 @@ public IndexReader topLevelReader() { /** * Returns a {@link FetchSubPhaseProcessor} for this sub phase. - * + * <p> * If nothing should be executed for the provided {@code FetchContext}, then the * implementation should return {@code null} */ diff --git a/server/src/main/java/org/opensearch/search/fetch/ShardFetchRequest.java b/server/src/main/java/org/opensearch/search/fetch/ShardFetchRequest.java index 017e74a12b61c..8e6b2a3eef527 100644 --- a/server/src/main/java/org/opensearch/search/fetch/ShardFetchRequest.java +++ b/server/src/main/java/org/opensearch/search/fetch/ShardFetchRequest.java @@ -36,15 +36,15 @@ import org.apache.lucene.search.ScoreDoc; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.Lucene; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.RescoreDocIds; import org.opensearch.search.dfs.AggregatedDfs; -import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/fetch/ShardFetchSearchRequest.java b/server/src/main/java/org/opensearch/search/fetch/ShardFetchSearchRequest.java index 4e74017217217..07964e10db8cc 100644 --- a/server/src/main/java/org/opensearch/search/fetch/ShardFetchSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/fetch/ShardFetchSearchRequest.java @@ -38,9 +38,9 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.RescoreDocIds; import org.opensearch.search.dfs.AggregatedDfs; +import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/fetch/StoredFieldsContext.java b/server/src/main/java/org/opensearch/search/fetch/StoredFieldsContext.java index e8c1dc57627fb..3c22c2f9d29c4 100644 --- a/server/src/main/java/org/opensearch/search/fetch/StoredFieldsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/StoredFieldsContext.java @@ -32,11 +32,12 @@ package org.opensearch.search.fetch; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.rest.RestRequest; @@ -51,8 +52,9 @@ /** * Context used to fetch the {@code stored_fields}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class StoredFieldsContext implements Writeable { public static final String _NONE_ = "_none_"; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesContext.java index ce5bd152cca00..14c1d1cdda5ca 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesContext.java @@ -31,6 +31,7 @@ package org.opensearch.search.fetch.subphase; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.IndexSettings; import java.util.ArrayList; @@ -42,8 +43,9 @@ /** * All the required context to pull a field from the doc values. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FetchDocValuesContext { private final List<FieldAndFormat> fields; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java index a8ab8c0dcb8a8..9b17d9dbcd8de 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java @@ -47,7 +47,7 @@ /** * Fetch sub phase which pulls data from doc values. - * + * <p> * Specifying {@code "docvalue_fields": ["field1", "field2"]} * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsContext.java index 3803dedae9b57..38d04932c3f46 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchFieldsContext.java @@ -31,13 +31,16 @@ package org.opensearch.search.fetch.subphase; +import org.opensearch.common.annotation.PublicApi; + import java.util.List; /** * The context needed to retrieve fields. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FetchFieldsContext { private final List<FieldAndFormat> fields; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java index 5b9b9e1e70cfa..337576890e663 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java @@ -33,16 +33,17 @@ package org.opensearch.search.fetch.subphase; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.rest.RestRequest; import java.io.IOException; @@ -55,8 +56,9 @@ /** * Context used to fetch the {@code _source}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class FetchSourceContext implements Writeable, ToXContentObject { public static final ParseField INCLUDES_FIELD = new ParseField("includes", "include"); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourcePhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourcePhase.java index b94a3e3564373..3ea5756ea46b4 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourcePhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourcePhase.java @@ -34,8 +34,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.OpenSearchException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.FetchContext; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FieldAndFormat.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FieldAndFormat.java index f7e4b06624c76..dbdd7bca6678f 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FieldAndFormat.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FieldAndFormat.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ * Wrapper around a field name and the format that should be used to * display values of this field. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class FieldAndFormat implements Writeable, ToXContentObject { private static final ParseField FIELD_FIELD = new ParseField("field"); private static final ParseField FORMAT_FIELD = new ParseField("format"); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java index f50524244b115..fa80bb04c77f5 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.search.SearchHit; import org.opensearch.search.internal.SearchContext; @@ -59,8 +60,9 @@ /** * Context used for inner hits retrieval * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class InnerHitsContext { private final Map<String, InnerHitSubContext> innerHits; @@ -91,7 +93,10 @@ public void addInnerHitDefinition(InnerHitSubContext innerHit) { /** * A {@link SubSearchContext} that associates {@link TopDocs} to each {@link SearchHit} * in the parent search context + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class InnerHitSubContext extends SubSearchContext { private final String name; @@ -114,6 +119,11 @@ public String getName() { return name; } + @Override + public boolean hasInnerHits() { + return childInnerHits != null; + } + @Override public InnerHitsContext innerHits() { return childInnerHits; @@ -138,7 +148,7 @@ public SearchContext parentSearchContext() { /** * The _id of the root document. - * + * <p> * Since this ID is available on the context, inner hits can avoid re-loading the root _id. */ public String getId() { @@ -151,7 +161,7 @@ public void setId(String id) { /** * A source lookup for the root document. - * + * <p> * This shared lookup allows inner hits to avoid re-loading the root _source. */ public SourceLookup getRootLookup() { diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java index 0b07dc35f13bb..cadad8529da9d 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsPhase.java @@ -64,7 +64,7 @@ public InnerHitsPhase(FetchPhase fetchPhase) { @Override public FetchSubPhaseProcessor getProcessor(FetchContext searchContext) { - if (searchContext.innerHits() == null) { + if (searchContext.hasInnerHits() == false) { return null; } Map<String, InnerHitsContext.InnerHitSubContext> innerHits = searchContext.innerHits().getInnerHits(); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java index 6c589438d6b4c..406d9c8b4bc03 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/MatchedQueriesPhase.java @@ -28,12 +28,12 @@ * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ - package org.opensearch.search.fetch.subphase; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -45,6 +45,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -67,25 +68,69 @@ public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOExcept if (namedQueries.isEmpty()) { return null; } + + Map<String, Weight> weights = prepareWeights(context, namedQueries); + + return context.includeNamedQueriesScore() ? createScoringProcessor(weights) : createNonScoringProcessor(weights); + } + + private Map<String, Weight> prepareWeights(FetchContext context, Map<String, Query> namedQueries) throws IOException { Map<String, Weight> weights = new HashMap<>(); + ScoreMode scoreMode = context.includeNamedQueriesScore() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; for (Map.Entry<String, Query> entry : namedQueries.entrySet()) { - weights.put( - entry.getKey(), - context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), ScoreMode.COMPLETE_NO_SCORES, 1) - ); + weights.put(entry.getKey(), context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), scoreMode, 1)); } + return weights; + } + + private FetchSubPhaseProcessor createScoringProcessor(Map<String, Weight> weights) { return new FetchSubPhaseProcessor() { + final Map<String, Scorer> matchingScorers = new HashMap<>(); + + @Override + public void setNextReader(LeafReaderContext readerContext) throws IOException { + matchingScorers.clear(); + for (Map.Entry<String, Weight> entry : weights.entrySet()) { + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Scorer scorer = scorerSupplier.get(0L); + if (scorer != null) { + matchingScorers.put(entry.getKey(), scorer); + } + } + } + } + + @Override + public void process(HitContext hitContext) throws IOException { + Map<String, Float> matches = new LinkedHashMap<>(); + int docId = hitContext.docId(); + for (Map.Entry<String, Scorer> entry : matchingScorers.entrySet()) { + Scorer scorer = entry.getValue(); + if (scorer.iterator().docID() < docId) { + scorer.iterator().advance(docId); + } + if (scorer.iterator().docID() == docId) { + matches.put(entry.getKey(), scorer.score()); + } + } + hitContext.hit().matchedQueriesWithScores(matches); + } + }; + } - final Map<String, Bits> matchingIterators = new HashMap<>(); + private FetchSubPhaseProcessor createNonScoringProcessor(Map<String, Weight> weights) { + return new FetchSubPhaseProcessor() { + final Map<String, Bits> matchingBits = new HashMap<>(); @Override public void setNextReader(LeafReaderContext readerContext) throws IOException { - matchingIterators.clear(); + matchingBits.clear(); for (Map.Entry<String, Weight> entry : weights.entrySet()) { - ScorerSupplier ss = entry.getValue().scorerSupplier(readerContext); - if (ss != null) { - Bits matchingBits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), ss); - matchingIterators.put(entry.getKey(), matchingBits); + ScorerSupplier scorerSupplier = entry.getValue().scorerSupplier(readerContext); + if (scorerSupplier != null) { + Bits bits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), scorerSupplier); + matchingBits.put(entry.getKey(), bits); } } } @@ -93,15 +138,14 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException { @Override public void process(HitContext hitContext) { List<String> matches = new ArrayList<>(); - int doc = hitContext.docId(); - for (Map.Entry<String, Bits> iterator : matchingIterators.entrySet()) { - if (iterator.getValue().get(doc)) { - matches.add(iterator.getKey()); + int docId = hitContext.docId(); + for (Map.Entry<String, Bits> entry : matchingBits.entrySet()) { + if (entry.getValue().get(docId)) { + matches.add(entry.getKey()); } } hitContext.hit().matchedQueries(matches.toArray(new String[0])); } }; } - } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsContext.java index 78c35098d9cc4..c37d3b359ded8 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsContext.java @@ -32,6 +32,7 @@ package org.opensearch.search.fetch.subphase; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.script.FieldScript; import java.util.ArrayList; @@ -40,15 +41,17 @@ /** * Context used for script fields * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptFieldsContext { /** * Script field use in the script fields context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class ScriptField { private final String name; private final FieldScript.LeafFactory script; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java index aad20f0746f58..bee536dbaf7f6 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhase.java @@ -33,7 +33,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.common.document.DocumentField; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.script.FieldScript; import org.opensearch.search.fetch.FetchContext; import org.opensearch.search.fetch.FetchSubPhase; @@ -54,7 +54,7 @@ public final class ScriptFieldsPhase implements FetchSubPhase { @Override public FetchSubPhaseProcessor getProcessor(FetchContext context) { - if (context.scriptFields() == null) { + if (context.hasScriptFields() == false) { return null; } List<ScriptFieldsContext.ScriptField> scriptFields = context.scriptFields().fields(); diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index 162c79c28f982..bee75dcecd528 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -35,13 +35,13 @@ import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.opensearch.Version; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -773,6 +773,6 @@ public final boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java index d0fb0f6da53c4..89c77b3cd403f 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.WeightedSpanTerm; @@ -104,6 +105,8 @@ protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> t super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); } else if (query instanceof OpenSearchToParentBlockJoinQuery) { super.extract(((OpenSearchToParentBlockJoinQuery) query).getChildQuery(), boost, terms); + } else if (query instanceof IndexOrDocValuesQuery) { + super.extract(((IndexOrDocValuesQuery) query).getIndexQuery(), boost, terms); } else { super.extract(query, boost, terms); } diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index c087ad6b71344..69f86bb91cc6e 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -48,7 +48,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.text.Text; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.search.fetch.FetchSubPhase; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java index cc0723ed7a432..0e7c3cf30ccec 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -68,8 +69,9 @@ * * @see org.opensearch.search.builder.SearchSourceBuilder#highlight() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilder> { /** default for whether to highlight fields based on the source even if stored separately */ public static final boolean DEFAULT_FORCE_SOURCE = false; @@ -476,8 +478,9 @@ public HighlightBuilder rewrite(QueryRewriteContext ctx) throws IOException { /** * Field for highlight builder * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Field extends AbstractHighlighterBuilder<Field> { static final NamedObjectParser<Field, Void> PARSER; static { @@ -584,8 +587,9 @@ public Field rewrite(QueryRewriteContext ctx) throws IOException { /** * Order for highlight builder * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Order implements Writeable { NONE, SCORE; @@ -615,8 +619,9 @@ public String toString() { /** * Boundary scanner type * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum BoundaryScannerType implements Writeable { CHARS, WORD, diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java index 7a29a53304dc3..30effe2826d76 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java @@ -32,6 +32,7 @@ package org.opensearch.search.fetch.subphase.highlight; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -52,8 +53,9 @@ /** * A field highlighted with its highlighted fragments. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class HighlightField implements ToXContentFragment, Writeable { private String name; diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java index eb5f4f3c14eb2..c06a733203434 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighter.java @@ -123,13 +123,27 @@ public HighlightField highlight(FieldHighlightContext fieldContext) throws IOExc List<Object> textsToHighlight; Analyzer analyzer = context.mapperService().documentMapper().mappers().indexAnalyzer(); final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset(); + final Integer fieldMaxAnalyzedOffset = field.fieldOptions().maxAnalyzerOffset(); + if (fieldMaxAnalyzedOffset != null && fieldMaxAnalyzedOffset > maxAnalyzedOffset) { + throw new IllegalArgumentException( + "max_analyzer_offset has exceeded [" + + maxAnalyzedOffset + + "] - maximum allowed to be analyzed for highlighting. " + + "This maximum can be set by changing the [" + + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() + + "] index level setting. " + + "For large texts, indexing with offsets or term vectors is recommended!" + ); + } textsToHighlight = HighlightUtils.loadFieldValues(fieldType, context.getQueryShardContext(), hitContext, fieldContext.forceSource); for (Object textToHighlight : textsToHighlight) { String text = convertFieldValue(fieldType, textToHighlight); int textLength = text.length(); - if (textLength > maxAnalyzedOffset) { + if (fieldMaxAnalyzedOffset != null && textLength > fieldMaxAnalyzedOffset) { + text = text.substring(0, fieldMaxAnalyzedOffset); + } else if (textLength > maxAnalyzedOffset) { throw new IllegalArgumentException( "The length of [" + fieldContext.fieldName diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java index 7464ba094b97e..c3d34dbdef56c 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/SearchHighlightContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.BoundaryScannerType; import java.util.Arrays; @@ -46,8 +47,9 @@ /** * Search context used during highlighting phase * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchHighlightContext { private final Map<String, Field> fields; @@ -82,8 +84,9 @@ public boolean forceSource(Field field) { /** * Field for the search highlight context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Field { private final String field; private final FieldOptions fieldOptions; @@ -107,8 +110,9 @@ public FieldOptions fieldOptions() { /** * Field options for the search highlight context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class FieldOptions { // Field options that default to null or -1 are often set to their real default in HighlighterParseElement#parse diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java index d4bc3544cc184..df85246a84d54 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/UnifiedHighlighter.java @@ -46,8 +46,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.CheckedSupplier; -import org.opensearch.core.common.text.Text; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.text.Text; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/main/java/org/opensearch/search/internal/AliasFilter.java b/server/src/main/java/org/opensearch/search/internal/AliasFilter.java index 408f67f5002d9..1732c0ab0db8a 100644 --- a/server/src/main/java/org/opensearch/search/internal/AliasFilter.java +++ b/server/src/main/java/org/opensearch/search/internal/AliasFilter.java @@ -32,10 +32,11 @@ package org.opensearch.search.internal; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.Rewriteable; @@ -47,8 +48,9 @@ /** * Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AliasFilter implements Writeable, Rewriteable<AliasFilter> { private final String[] aliases; diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index e3ca932eb4699..ec3ed2332d0b8 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -32,6 +32,8 @@ package org.opensearch.search.internal; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; @@ -62,9 +64,9 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.CombinedBitSet; import org.apache.lucene.util.SparseFixedBitSet; -import org.opensearch.cluster.metadata.DataStream; -import org.opensearch.common.lucene.search.TopDocsAndMaxScore; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchService; import org.opensearch.search.dfs.AggregatedDfs; @@ -90,14 +92,18 @@ /** * Context-aware extension of {@link IndexSearcher}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ContextIndexSearcher extends IndexSearcher implements Releasable { + + private static final Logger logger = LogManager.getLogger(ContextIndexSearcher.class); /** * The interval at which we check for search cancellation when we cannot use * a {@link CancellableBulkScorer}. See {@link #intersectScorerAndBitSet}. */ - private static int CHECK_CANCELLED_SCORER_INTERVAL = 1 << 11; + + private static final int CHECK_CANCELLED_SCORER_INTERVAL = 1 << 11; private AggregatedDfs aggregatedDfs; private QueryProfiler profiler; @@ -264,10 +270,11 @@ public void search( @Override protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException { - if (shouldReverseLeafReaderContexts()) { - // reverse the segment search order if this flag is true. - // Certain queries can benefit if we reverse the segment read order, - // for example time series based queries if searched for desc sort order. + // Time series based workload by default traverses segments in desc order i.e. latest to the oldest order. + // This is actually beneficial for search queries to start search on latest segments first for time series workload. + // That can slow down ASC order queries on timestamp workload. So to avoid that slowdown, we will reverse leaf + // reader order here. + if (searchContext.shouldUseTimeSeriesDescSortOptimization()) { for (int i = leaves.size() - 1; i >= 0; i--) { searchLeaf(leaves.get(i), weight, collector); } @@ -281,7 +288,7 @@ protected void search(List<LeafReaderContext> leaves, Weight weight, Collector c /** * Lower-level search API. - * + * <p> * {@link LeafCollector#collect(int)} is called for every matching document in * the provided <code>ctx</code>. */ @@ -295,6 +302,9 @@ private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collecto final LeafCollector leafCollector; try { cancellable.checkCancelled(); + if (weight instanceof ProfileWeight) { + ((ProfileWeight) weight).associateCollectorToLeaves(ctx, collector); + } weight = wrapWeight(weight); // See please https://github.com/apache/lucene/pull/964 collector.setWeight(weight); @@ -343,6 +353,10 @@ private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collecto } } } + + // Note: this is called if collection ran successfully, including the above special cases of + // CollectionTerminatedException and TimeExceededException, but no other exception. + leafCollector.finish(); } private Weight wrapWeight(Weight weight) { @@ -373,6 +387,11 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { return null; } } + + @Override + public int count(LeafReaderContext context) throws IOException { + return weight.count(context); + } }; } else { return weight; @@ -439,6 +458,16 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio return collectionStatistics; } + /** + * Compute the leaf slices that will be used by concurrent segment search to spread work across threads + * @param leaves all the segments + * @return leafSlice group to be executed by different threads + */ + @Override + protected LeafSlice[] slices(List<LeafReaderContext> leaves) { + return slicesInternal(leaves, searchContext.getTargetMaxSliceCount()); + } + public DirectoryReader getDirectoryReader() { final IndexReader reader = getIndexReader(); assert reader instanceof DirectoryReader : "expected an instance of DirectoryReader, got " + reader.getClass(); @@ -484,38 +513,39 @@ private boolean canMatch(LeafReaderContext ctx) throws IOException { } private boolean canMatchSearchAfter(LeafReaderContext ctx) throws IOException { - if (searchContext.request() != null && searchContext.request().source() != null) { + if (searchContext.searchAfter() != null && searchContext.request() != null && searchContext.request().source() != null) { // Only applied on primary sort field and primary search_after. FieldSortBuilder primarySortField = FieldSortBuilder.getPrimaryFieldSortOrNull(searchContext.request().source()); if (primarySortField != null) { MinAndMax<?> minMax = FieldSortBuilder.getMinMaxOrNullForSegment( this.searchContext.getQueryShardContext(), ctx, - primarySortField + primarySortField, + searchContext.sort() + ); + return SearchService.canMatchSearchAfter( + searchContext.searchAfter(), + minMax, + primarySortField, + searchContext.trackTotalHitsUpTo() ); - return SearchService.canMatchSearchAfter(searchContext.searchAfter(), minMax, primarySortField); } } return true; } - private boolean shouldReverseLeafReaderContexts() { - // Time series based workload by default traverses segments in desc order i.e. latest to the oldest order. - // This is actually beneficial for search queries to start search on latest segments first for time series workload. - // That can slow down ASC order queries on timestamp workload. So to avoid that slowdown, we will reverse leaf - // reader order here. - if (searchContext.indexShard().isTimeSeriesDescSortOptimizationEnabled()) { - // Only reverse order for asc order sort queries - if (searchContext.sort() != null - && searchContext.sort().sort != null - && searchContext.sort().sort.getSort() != null - && searchContext.sort().sort.getSort().length > 0 - && searchContext.sort().sort.getSort()[0].getReverse() == false - && searchContext.sort().sort.getSort()[0].getField() != null - && searchContext.sort().sort.getSort()[0].getField().equals(DataStream.TIMESERIES_FIELDNAME)) { - return true; - } + // package-private for testing + LeafSlice[] slicesInternal(List<LeafReaderContext> leaves, int targetMaxSlice) { + LeafSlice[] leafSlices; + if (targetMaxSlice == 0) { + // use the default lucene slice calculation + leafSlices = super.slices(leaves); + logger.debug("Slice count using lucene default [{}]", leafSlices.length); + } else { + // use the custom slice calculation based on targetMaxSlice + leafSlices = MaxTargetSliceSupplier.getSlices(leaves, targetMaxSlice); + logger.debug("Slice count using max target slice supplier [{}]", leafSlices.length); } - return false; + return leafSlices; } } diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index bb990e69e7722..3a3b46366a6d2 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -340,6 +340,14 @@ public FieldDoc searchAfter() { return in.searchAfter(); } + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + return in.includeNamedQueriesScore(includeNamedQueriesScore); + } + + public boolean includeNamedQueriesScore() { + return in.includeNamedQueriesScore(); + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { return in.parsedPostFilter(postFilter); @@ -559,4 +567,19 @@ public void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollector public BucketCollectorProcessor bucketCollectorProcessor() { return in.bucketCollectorProcessor(); } + + @Override + public boolean shouldUseConcurrentSearch() { + return in.shouldUseConcurrentSearch(); + } + + @Override + public int getTargetMaxSliceCount() { + return in.getTargetMaxSliceCount(); + } + + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return in.shouldUseTimeSeriesDescSortOptimization(); + } } diff --git a/server/src/main/java/org/opensearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/InternalScrollSearchRequest.java index 6c407e6ba3401..e458f7f38eac4 100644 --- a/server/src/main/java/org/opensearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/InternalScrollSearchRequest.java @@ -36,9 +36,9 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.Scroll; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java index 1561d18f3040a..c9d7b0084c1e1 100644 --- a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java +++ b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java @@ -32,23 +32,29 @@ package org.opensearch.search.internal; +import org.opensearch.Version; import org.opensearch.action.search.SearchResponseSections; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.suggest.Suggest; import java.io.IOException; +import java.util.Collections; +import java.util.List; /** * {@link SearchResponseSections} subclass that can be serialized over the wire. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class InternalSearchResponse extends SearchResponseSections implements Writeable, ToXContentFragment { public static InternalSearchResponse empty() { return empty(true); @@ -67,7 +73,20 @@ public InternalSearchResponse( Boolean terminatedEarly, int numReducePhases ) { - super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases); + this(hits, aggregations, suggest, profileResults, timedOut, terminatedEarly, numReducePhases, Collections.emptyList()); + } + + public InternalSearchResponse( + SearchHits hits, + InternalAggregations aggregations, + Suggest suggest, + SearchProfileShardResults profileResults, + boolean timedOut, + Boolean terminatedEarly, + int numReducePhases, + List<SearchExtBuilder> searchExtBuilderList + ) { + super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, searchExtBuilderList); } public InternalSearchResponse(StreamInput in) throws IOException { @@ -78,7 +97,8 @@ public InternalSearchResponse(StreamInput in) throws IOException { in.readBoolean(), in.readOptionalBoolean(), in.readOptionalWriteable(SearchProfileShardResults::new), - in.readVInt() + in.readVInt(), + readSearchExtBuildersOnOrAfter(in) ); } @@ -91,5 +111,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(terminatedEarly); out.writeOptionalWriteable(profileResults); out.writeVInt(numReducePhases); + writeSearchExtBuildersOnOrAfter(out, searchExtBuilders); + } + + private static List<SearchExtBuilder> readSearchExtBuildersOnOrAfter(StreamInput in) throws IOException { + return (in.getVersion().onOrAfter(Version.V_2_10_0)) ? in.readNamedWriteableList(SearchExtBuilder.class) : Collections.emptyList(); + } + + private static void writeSearchExtBuildersOnOrAfter(StreamOutput out, List<SearchExtBuilder> searchExtBuilders) throws IOException { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeNamedWriteableList(searchExtBuilders); + } } } diff --git a/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java new file mode 100644 index 0000000000000..64984585f3ab6 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +/** + * Supplier to compute leaf slices based on passed in leaves and max target slice count to limit the number of computed slices. It sorts + * all the leaves based on document count and then assign each leaf in round-robin fashion to the target slice count slices. Based on + * experiment results as shared in <a href=https://github.com/opensearch-project/OpenSearch/issues/7358>issue-7358</a> + * we can see this mechanism helps to achieve better tail/median latency over default lucene slice computation. + * + * @opensearch.internal + */ +final class MaxTargetSliceSupplier { + + static IndexSearcher.LeafSlice[] getSlices(List<LeafReaderContext> leaves, int targetMaxSlice) { + if (targetMaxSlice <= 0) { + throw new IllegalArgumentException("MaxTargetSliceSupplier called with unexpected slice count of " + targetMaxSlice); + } + + // slice count should not exceed the segment count + int targetSliceCount = Math.min(targetMaxSlice, leaves.size()); + + // Make a copy so we can sort: + List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves); + + // Sort by maxDoc, descending: + sortedLeaves.sort(Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc()))); + + final List<List<LeafReaderContext>> groupedLeaves = new ArrayList<>(targetSliceCount); + for (int i = 0; i < targetSliceCount; ++i) { + groupedLeaves.add(new ArrayList<>()); + } + // distribute the slices in round-robin fashion + for (int idx = 0; idx < sortedLeaves.size(); ++idx) { + int currentGroup = idx % targetSliceCount; + groupedLeaves.get(currentGroup).add(sortedLeaves.get(idx)); + } + + return groupedLeaves.stream().map(IndexSearcher.LeafSlice::new).toArray(IndexSearcher.LeafSlice[]::new); + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index 549638c204f49..776e92d325ae4 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -32,9 +32,10 @@ package org.opensearch.search.internal; -import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; @@ -57,8 +58,9 @@ * ie. when an index gets removed. To prevent accessing closed IndexReader / IndexSearcher instances the SearchContext * can be guarded by a reference count and fail if it's been closed by an external event. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ReaderContext implements Releasable { private final ShardSearchContextId id; private final IndexService indexService; diff --git a/server/src/main/java/org/opensearch/search/internal/ScrollContext.java b/server/src/main/java/org/opensearch/search/internal/ScrollContext.java index ae4074930e0c0..e3517756ced6e 100644 --- a/server/src/main/java/org/opensearch/search/internal/ScrollContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ScrollContext.java @@ -34,13 +34,15 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TotalHits; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.Scroll; /** * Wrapper around information that needs to stay around when scrolling. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ScrollContext { public TotalHits totalHits = null; public float maxScore = Float.NaN; diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index c2f81b0d4b8b5..3d13378e58e5d 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -38,10 +38,11 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; import org.opensearch.common.Nullable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.BigArrays; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.BigArrays; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -57,6 +58,8 @@ import org.opensearch.search.aggregations.BucketCollectorProcessor; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.SearchContextAggregations; +import org.opensearch.search.aggregations.bucket.LocalBucketCountThresholds; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.collapse.CollapseContext; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.fetch.FetchPhase; @@ -88,8 +91,9 @@ * shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on * state from one query / fetch phase to another. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SearchContext implements Releasable { public static final int DEFAULT_TERMINATE_AFTER = 0; @@ -184,6 +188,10 @@ public final void close() { public abstract void highlight(SearchHighlightContext highlight); + public boolean hasInnerHits() { + return innerHitsContext != null; + } + public InnerHitsContext innerHits() { if (innerHitsContext == null) { innerHitsContext = new InnerHitsContext(); @@ -283,7 +291,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Indicates if the current index should perform frequent low level search cancellation check. - * + * <p> * Enabling low-level checks will make long running searches to react to the cancellation request faster. However, * since it will produce more cancellation checks it might slow the search performance down. */ @@ -301,6 +309,29 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { public abstract boolean trackScores(); + /** + * Determines whether named queries' scores should be included in the search results. + * By default, this is set to return false, indicating that scores from named queries are not included. + * + * @param includeNamedQueriesScore true to include scores from named queries, false otherwise. + */ + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + // Default implementation does nothing and returns this for chaining. + // Implementations of SearchContext should override this method to actually store the value. + return this; + } + + /** + * Checks if scores from named queries are included in the search results. + * + * @return true if scores from named queries are included, false otherwise. + */ + public boolean includeNamedQueriesScore() { + // Default implementation returns false. + // Implementations of SearchContext should override this method to return the actual value. + return false; + } + public abstract SearchContext trackTotalHitsUpTo(int trackTotalHits); /** @@ -396,10 +427,20 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Returns concurrent segment search status for the search context */ - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return false; } + /** + * Returns local bucket count thresholds based on concurrent segment search status + */ + public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { + return new LocalBucketCountThresholds( + shouldUseConcurrentSearch() ? 0 : bucketCountThresholds.getShardMinDocCount(), + bucketCountThresholds.getShardSize() + ); + } + /** * Adds a releasable that will be freed when this context is closed. */ @@ -471,4 +512,8 @@ public String toString() { public abstract void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollectorProcessor); public abstract BucketCollectorProcessor bucketCollectorProcessor(); + + public abstract int getTargetMaxSliceCount(); + + public abstract boolean shouldUseTimeSeriesDescSortOptimization(); } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchContextId.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchContextId.java index 3e0d2b1d6c638..c6c09ff54241b 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchContextId.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchContextId.java @@ -32,6 +32,7 @@ package org.opensearch.search.internal; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -42,8 +43,9 @@ /** * Used to support Point in Time Searching * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ShardSearchContextId implements Writeable { private final String sessionId; private final long id; diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index d2f6bc234e752..de1d5fb8b4098 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -43,21 +43,23 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.Rewriteable; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.AliasFilterParsingException; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.search.Scroll; @@ -66,12 +68,11 @@ import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.transport.TransportRequest; import java.io.IOException; -import java.util.Collections; import java.util.Arrays; +import java.util.Collections; import java.util.Map; import java.util.function.Function; @@ -82,8 +83,9 @@ * It provides all the methods that the {@link SearchContext} needs. * Provides a cache key based on its content that can be used to cache shard level response. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardSearchRequest extends TransportRequest implements IndicesRequest { public static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); diff --git a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java index 55315013ea8c9..b2c97baf78d91 100644 --- a/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SubSearchContext.java @@ -82,6 +82,8 @@ public class SubSearchContext extends FilteredSearchContext { private boolean explain; private boolean trackScores; + + private boolean includeNamedQueriesScore; private boolean version; private boolean seqNoAndPrimaryTerm; @@ -234,6 +236,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext parsedPostFilter(ParsedQuery postFilter) { throw new UnsupportedOperationException("Not supported"); diff --git a/server/src/main/java/org/opensearch/search/lookup/DocLookup.java b/server/src/main/java/org/opensearch/search/lookup/DocLookup.java index 069d9dd02ce2b..8de49e8d026f6 100644 --- a/server/src/main/java/org/opensearch/search/lookup/DocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/DocLookup.java @@ -32,6 +32,7 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -41,8 +42,9 @@ /** * Looks up a document * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DocLookup { private final MapperService mapperService; diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java index fe911ed799646..70cbd8d7ad6c3 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafDocLookup.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.MappedFieldType; @@ -50,8 +51,9 @@ /** * Looks up a doc from a leaf reader * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class LeafDocLookup implements Map<String, ScriptDocValues<?>> { private final Map<String, ScriptDocValues<?>> localCacheFieldData = new HashMap<>(4); @@ -76,6 +78,7 @@ public void setDocument(int docId) { this.docId = docId; } + @SuppressWarnings("removal") @Override public ScriptDocValues<?> get(Object key) { // assume its a string... diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java index 47bb8754a5803..23b1b311f7d93 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.LeafReader; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fieldvisitor.SingleFieldsVisitor; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -50,8 +51,9 @@ /** * looks up multiple leaf fields * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class LeafFieldsLookup implements Map { private final MapperService mapperService; diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafSearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafSearchLookup.java index 1c87f26053060..33fd89a305fe1 100644 --- a/server/src/main/java/org/opensearch/search/lookup/LeafSearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/LeafSearchLookup.java @@ -33,6 +33,7 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import java.util.HashMap; import java.util.Map; @@ -42,8 +43,9 @@ /** * Per-segment version of {@link SearchLookup}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class LeafSearchLookup { final LeafReaderContext ctx; diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java new file mode 100644 index 0000000000000..d02313ada1db9 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/lookup/LeafTermFrequencyLookup.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.lookup; + +import org.apache.lucene.search.IndexSearcher; +import org.opensearch.index.query.functionscore.TermFrequencyFunction; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory; +import org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * Looks up term frequency per-segment + * + * @opensearch.internal + */ +public class LeafTermFrequencyLookup { + + private final IndexSearcher indexSearcher; + private final LeafSearchLookup leafLookup; + private final Map<String, TermFrequencyFunction> termFreqCache; + + public LeafTermFrequencyLookup(IndexSearcher indexSearcher, LeafSearchLookup leafLookup) { + this.indexSearcher = indexSearcher; + this.leafLookup = leafLookup; + this.termFreqCache = new HashMap<>(); + } + + public Object getTermFrequency(TermFrequencyFunctionName functionName, String field, String val, int docId) throws IOException { + TermFrequencyFunction termFrequencyFunction = getOrCreateTermFrequencyFunction(functionName, field, val); + return termFrequencyFunction.execute(docId); + } + + private TermFrequencyFunction getOrCreateTermFrequencyFunction(TermFrequencyFunctionName functionName, String field, String val) + throws IOException { + String cacheKey = (val == null) + ? String.format(Locale.ROOT, "%s-%s", functionName, field) + : String.format(Locale.ROOT, "%s-%s-%s", functionName, field, val); + + if (!termFreqCache.containsKey(cacheKey)) { + TermFrequencyFunction termFrequencyFunction = TermFrequencyFunctionFactory.createFunction( + functionName, + field, + val, + leafLookup.ctx, + indexSearcher + ); + termFreqCache.put(cacheKey, termFrequencyFunction); + } + + return termFreqCache.get(cacheKey); + } +} diff --git a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java index 898d34dc02c01..906616eb9ba5f 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SearchLookup.java @@ -33,6 +33,7 @@ package org.opensearch.search.lookup; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -47,8 +48,9 @@ /** * Orchestrator class for search phase lookups * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SearchLookup { /** * The maximum depth of field dependencies. @@ -58,6 +60,12 @@ public class SearchLookup { */ private static final int MAX_FIELD_CHAIN_DEPTH = 5; + /** + * This constant should be used in cases when shard id is unknown. + * Mostly it should be used in tests. + */ + public static final int UNKNOWN_SHARD_ID = -1; + /** * The chain of fields for which this lookup was created, used for detecting * loops caused by runtime fields referring to other runtime fields. The chain is empty @@ -72,14 +80,27 @@ public class SearchLookup { private final SourceLookup sourceLookup; private final FieldsLookup fieldsLookup; private final BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup; + private final int shardId; /** - * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, - * stored fields, or _source. + * Constructor for backwards compatibility. Use the one with explicit shardId argument. */ + @Deprecated public SearchLookup( MapperService mapperService, BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup + ) { + this(mapperService, fieldDataLookup, UNKNOWN_SHARD_ID); + } + + /** + * Create the top level field lookup for a search request. Provides a way to look up fields from doc_values, + * stored fields, or _source. + */ + public SearchLookup( + MapperService mapperService, + BiFunction<MappedFieldType, Supplier<SearchLookup>, IndexFieldData<?>> fieldDataLookup, + int shardId ) { this.fieldChain = Collections.emptySet(); docMap = new DocLookup( @@ -89,6 +110,7 @@ public SearchLookup( sourceLookup = new SourceLookup(); fieldsLookup = new FieldsLookup(mapperService); this.fieldDataLookup = fieldDataLookup; + this.shardId = shardId; } /** @@ -107,6 +129,7 @@ private SearchLookup(SearchLookup searchLookup, Set<String> fieldChain) { this.sourceLookup = searchLookup.sourceLookup; this.fieldsLookup = searchLookup.fieldsLookup; this.fieldDataLookup = searchLookup.fieldDataLookup; + this.shardId = searchLookup.shardId; } /** @@ -141,4 +164,11 @@ public DocLookup doc() { public SourceLookup source() { return sourceLookup; } + + public int shardId() { + if (shardId == UNKNOWN_SHARD_ID) { + throw new IllegalStateException("Shard id is unknown for this lookup"); + } + return shardId; + } } diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index 00fdca4e143ee..cbac29fde7932 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -36,12 +36,13 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedBiConsumer; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.lucene.index.SequentialStoredFieldsLeafReader; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.fieldvisitor.FieldsVisitor; import org.opensearch.search.fetch.subphase.FetchSourceContext; @@ -58,8 +59,9 @@ /** * Orchestrator class for source lookups * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SourceLookup implements Map { private LeafReader reader; @@ -180,7 +182,7 @@ public List<Object> extractRawValues(String path) { /** * For the provided path, return its value in the source. - * + * <p> * Note that in contrast with {@link SourceLookup#extractRawValues}, array and object values * can be returned. * diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index d4292b85b20a5..c88dfb2060393 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -16,11 +16,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.SearchPhaseResult; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; @@ -117,106 +119,161 @@ protected void afterResponseProcessor(Processor processor, long timeInNanos) {} protected void onResponseProcessorFailed(Processor processor) {} - SearchRequest transformRequest(SearchRequest request) throws SearchPipelineProcessingException { - if (searchRequestProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformRequest(); - try { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - request.writeTo(bytesStreamOutput); - try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { - try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { - request = new SearchRequest(input); - } - } - } - for (SearchRequestProcessor processor : searchRequestProcessors) { - beforeRequestProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - request = processor.processRequest(request); - } catch (Exception e) { - onRequestProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from request processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterRequestProcessor(processor, took); - } + void transformRequest(SearchRequest request, ActionListener<SearchRequest> requestListener, PipelineProcessingContext requestContext) + throws SearchPipelineProcessingException { + if (searchRequestProcessors.isEmpty()) { + requestListener.onResponse(request); + return; + } + + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + request.writeTo(bytesStreamOutput); + try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { + request = new SearchRequest(input); } - } catch (Exception e) { - onTransformRequestFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformRequest(took); } + } catch (IOException e) { + requestListener.onFailure(new SearchPipelineProcessingException(e)); + return; } - return request; + + ActionListener<SearchRequest> finalListener = getTerminalSearchRequestActionListener(requestListener, requestContext); + + // Chain listeners back-to-front + ActionListener<SearchRequest> currentListener = finalListener; + for (int i = searchRequestProcessors.size() - 1; i >= 0; i--) { + final ActionListener<SearchRequest> nextListener = currentListener; + SearchRequestProcessor processor = searchRequestProcessors.get(i); + currentListener = ActionListener.wrap(r -> { + long start = relativeTimeSupplier.getAsLong(); + beforeRequestProcessor(processor); + processor.processRequestAsync(r, requestContext, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + nextListener.onResponse(rr); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + onRequestProcessorFailed(processor); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from request processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + nextListener.onResponse(r); + } else { + nextListener.onFailure(new SearchPipelineProcessingException(e)); + } + })); + }, finalListener::onFailure); + } + + beforeTransformRequest(); + currentListener.onResponse(request); } - SearchResponse transformResponse(SearchRequest request, SearchResponse response) throws SearchPipelineProcessingException { - if (searchResponseProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformResponse(); - try { - for (SearchResponseProcessor processor : searchResponseProcessors) { - beforeResponseProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - response = processor.processResponse(request, response); - } catch (Exception e) { - onResponseProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from response processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterResponseProcessor(processor, took); + private ActionListener<SearchRequest> getTerminalSearchRequestActionListener( + ActionListener<SearchRequest> requestListener, + PipelineProcessingContext requestContext + ) { + final long pipelineStart = relativeTimeSupplier.getAsLong(); + + return ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + requestListener.onResponse(new PipelinedRequest(this, r, requestContext)); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + onTransformRequestFailure(); + requestListener.onFailure(new SearchPipelineProcessingException(e)); + }); + } + + ActionListener<SearchResponse> transformResponseListener( + SearchRequest request, + ActionListener<SearchResponse> responseListener, + PipelineProcessingContext requestContext + ) { + if (searchResponseProcessors.isEmpty()) { + // No response transformation necessary + return responseListener; + } + + long[] pipelineStart = new long[1]; + + final ActionListener<SearchResponse> originalListener = responseListener; + responseListener = ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + originalListener.onResponse(r); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + onTransformResponseFailure(); + originalListener.onFailure(e); + }); + ActionListener<SearchResponse> finalListener = responseListener; // Jump directly to this one on exception. + + for (int i = searchResponseProcessors.size() - 1; i >= 0; i--) { + final ActionListener<SearchResponse> currentFinalListener = responseListener; + final SearchResponseProcessor processor = searchResponseProcessors.get(i); + + responseListener = ActionListener.wrap(r -> { + beforeResponseProcessor(processor); + final long start = relativeTimeSupplier.getAsLong(); + processor.processResponseAsync(request, r, requestContext, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + currentFinalListener.onResponse(rr); + }, e -> { + onResponseProcessorFailed(processor); + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from response processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + // Pass the previous response through to the next processor in the chain + currentFinalListener.onResponse(r); + } else { + currentFinalListener.onFailure(new SearchPipelineProcessingException(e)); } - } - } catch (Exception e) { - onTransformResponseFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformResponse(took); - } + })); + }, finalListener::onFailure); } - return response; + final ActionListener<SearchResponse> chainListener = responseListener; + return ActionListener.wrap(r -> { + beforeTransformResponse(); + pipelineStart[0] = relativeTimeSupplier.getAsLong(); + chainListener.onResponse(r); + }, originalListener::onFailure); + } <Result extends SearchPhaseResult> void runSearchPhaseResultsTransformer( SearchPhaseResults<Result> searchPhaseResult, SearchPhaseContext context, String currentPhase, - String nextPhase + String nextPhase, + PipelineProcessingContext requestContext ) throws SearchPipelineProcessingException { try { for (SearchPhaseResultsProcessor searchPhaseResultsProcessor : searchPhaseResultsProcessors) { if (currentPhase.equals(searchPhaseResultsProcessor.getBeforePhase().getName()) && nextPhase.equals(searchPhaseResultsProcessor.getAfterPhase().getName())) { try { - searchPhaseResultsProcessor.process(searchPhaseResult, context); + searchPhaseResultsProcessor.process(searchPhaseResult, context, requestContext); } catch (Exception e) { if (searchPhaseResultsProcessor.isIgnoreFailure()) { logger.warn( diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index b4f6549c83390..2ed770be60458 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -11,15 +11,17 @@ import org.opensearch.Version; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -30,9 +32,12 @@ /** * TODO: Copied verbatim from {@link org.opensearch.ingest.PipelineConfiguration}. - * + * <p> * See if we can refactor into a common class. I suspect not, just because this one will hold + * + * @opensearch.api */ +@PublicApi(since = "2.7.0") public class PipelineConfiguration extends AbstractDiffable<PipelineConfiguration> implements ToXContentObject { private static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>( "pipeline_config", @@ -120,7 +125,7 @@ public static PipelineConfiguration readFrom(StreamInput in) throws IOException return new PipelineConfiguration( in.readString(), in.readBytesReference(), - in.getVersion().onOrAfter(Version.V_3_0_0) ? in.readMediaType() : in.readEnum(XContentType.class) + in.getVersion().onOrAfter(Version.V_2_10_0) ? in.readMediaType() : in.readEnum(XContentType.class) ); } @@ -130,14 +135,14 @@ public static Diff<PipelineConfiguration> readDiffFrom(StreamInput in) throws IO @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java new file mode 100644 index 0000000000000..a1f2b8b99d958 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import java.util.HashMap; +import java.util.Map; + +/** + * A holder for state that is passed through each processor in the pipeline. + */ +public class PipelineProcessingContext { + private final Map<String, Object> attributes = new HashMap<>(); + + /** + * Set a generic attribute in the state for this request. Overwrites any existing value. + * + * @param name the name of the attribute to set + * @param value the value to set on the attributen + */ + public void setAttribute(String name, Object value) { + attributes.put(name, value); + } + + /** + * Retrieves a generic attribute value from the state for this request. + * @param name the name of the attribute + * @return the value of the attribute if previously set (and null otherwise) + */ + public Object getAttribute(String name) { + return attributes.get(name); + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java index 7d1bac4463117..827941a8ea81d 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java @@ -21,8 +21,8 @@ import java.util.Map; import java.util.function.LongSupplier; -import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; import static org.opensearch.ingest.ConfigurationUtils.IGNORE_FAILURE_KEY; +import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; import static org.opensearch.ingest.Pipeline.DESCRIPTION_KEY; import static org.opensearch.ingest.Pipeline.VERSION_KEY; diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index 5a7539808c127..d550fbb768133 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -12,6 +12,7 @@ import org.opensearch.action.search.SearchPhaseResults; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; /** @@ -21,14 +22,20 @@ */ public final class PipelinedRequest extends SearchRequest { private final Pipeline pipeline; + private final PipelineProcessingContext requestContext; - PipelinedRequest(Pipeline pipeline, SearchRequest transformedRequest) { + PipelinedRequest(Pipeline pipeline, SearchRequest transformedRequest, PipelineProcessingContext requestContext) { super(transformedRequest); this.pipeline = pipeline; + this.requestContext = requestContext; } - public SearchResponse transformResponse(SearchResponse response) { - return pipeline.transformResponse(this, response); + public void transformRequest(ActionListener<SearchRequest> requestListener) { + pipeline.transformRequest(this, requestListener, requestContext); + } + + public ActionListener<SearchResponse> transformResponseListener(ActionListener<SearchResponse> responseListener) { + return pipeline.transformResponseListener(this, responseListener, requestContext); } public <Result extends SearchPhaseResult> void transformSearchPhaseResults( @@ -37,7 +44,7 @@ public <Result extends SearchPhaseResult> void transformSearchPhaseResults( final String currentPhase, final String nextPhase ) { - pipeline.runSearchPhaseResultsTransformer(searchPhaseResult, searchPhaseContext, currentPhase, nextPhase); + pipeline.runSearchPhaseResultsTransformer(searchPhaseResult, searchPhaseContext, currentPhase, nextPhase, requestContext); } // Visible for testing diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index fb33f46acada4..a06383fbe9cef 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -15,19 +15,12 @@ * Whether changes are made and what exactly is modified is up to the implementation. * <p> * Processors may get called concurrently and thus need to be thread-safe. - * + * <p> * TODO: Refactor {@link org.opensearch.ingest.Processor} to extend this interface, and specialize to IngestProcessor. * * @opensearch.internal */ public interface Processor { - /** - * Processor configuration key to let the factory know the context for pipeline creation. - * <p> - * See {@link PipelineSource}. - */ - String PIPELINE_SOURCE = "pipeline_source"; - /** * Gets the type of processor */ diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java index 0206b9b6cf716..0b80cdbef6669 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java @@ -18,7 +18,7 @@ /** * Information about a search pipeline processor - * + * <p> * TODO: This is copy/pasted from the ingest ProcessorInfo. * Can/should we share implementation or is this just boilerplate? * diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java index 772dc8758bace..a64266cfb2a2b 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPhaseResultsProcessor.java @@ -32,6 +32,22 @@ <Result extends SearchPhaseResult> void process( final SearchPhaseContext searchPhaseContext ); + /** + * Processes the {@link SearchPhaseResults} obtained from a SearchPhase which will be returned to next + * SearchPhase. Receives the {@link PipelineProcessingContext} passed to other processors. + * @param searchPhaseResult {@link SearchPhaseResults} + * @param searchPhaseContext {@link SearchContext} + * @param requestContext {@link PipelineProcessingContext} + * @param <Result> {@link SearchPhaseResult} + */ + default <Result extends SearchPhaseResult> void process( + final SearchPhaseResults<Result> searchPhaseResult, + final SearchPhaseContext searchPhaseContext, + final PipelineProcessingContext requestContext + ) { + process(searchPhaseResult, searchPhaseContext); + } + /** * The phase which should have run before, this processor can start executing. * @return {@link SearchPhaseName} diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineInfo.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineInfo.java index b2075a51ff732..c60638e80ddbe 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineInfo.java @@ -11,8 +11,8 @@ import org.opensearch.Version; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.Collections; diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineMetadata.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineMetadata.java index e8842cd67a895..56f8399c22412 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineMetadata.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineMetadata.java @@ -13,9 +13,9 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.NamedDiff; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 1066d836e5183..2175b5d135394 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -13,7 +13,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.DeleteSearchPipelineRequest; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.search.SearchRequest; @@ -34,14 +33,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.gateway.GatewayService; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.ingest.ConfigurationUtils; -import org.opensearch.node.ReportingService; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; @@ -408,8 +408,8 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { pipeline = pipelineHolder.pipeline; } } - SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); - return new PipelinedRequest(pipeline, transformedRequest); + PipelineProcessingContext requestContext = new PipelineProcessingContext(); + return new PipelinedRequest(pipeline, searchRequest, requestContext); } Map<String, Processor.Factory<SearchRequestProcessor>> getRequestProcessorFactories() { diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java index c236cde1a5cc0..30adc9b0afbe8 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java @@ -9,10 +9,50 @@ package org.opensearch.search.pipeline; import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search request. */ public interface SearchRequestProcessor extends Processor { + /** + * Process a SearchRequest without receiving request-scoped state. + * Implement this method if the processor makes no asynchronous calls. + * @param request the search request (which may have been modified by an earlier processor) + * @return the modified search request + * @throws Exception implementation-specific processing exception + */ SearchRequest processRequest(SearchRequest request) throws Exception; + + /** + * Process a SearchRequest, with request-scoped state shared across processors in the pipeline + * Implement this method if the processor makes no asynchronous calls. + * @param request the search request (which may have been modified by an earlier processor) + * @param requestContext request-scoped state shared across processors in the pipeline + * @return the modified search request + * @throws Exception implementation-specific processing exception + */ + default SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { + return processRequest(request); + } + + /** + * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} + * executes. + * <p> + * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processRequest. + * @param request the executed {@link SearchRequest} + * @param requestListener callback to be invoked on successful processing or on failure + */ + default void processRequestAsync( + SearchRequest request, + PipelineProcessingContext requestContext, + ActionListener<SearchRequest> requestListener + ) { + try { + requestListener.onResponse(processRequest(request, requestContext)); + } catch (Exception e) { + requestListener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java index 2f22cedb9b5c0..98591ab9d0def 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java @@ -10,10 +10,60 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search response. */ public interface SearchResponseProcessor extends Processor { + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + * <p> + * Implement this method if the processor makes no asynchronous calls. + * + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @return a modified {@link SearchResponse} (or the input {@link SearchResponse} if no changes) + * @throws Exception if an error occurs during processing + */ SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception; + + /** + * Process a SearchResponse, with request-scoped state shared across processors in the pipeline + * <p> + * Implement this method if the processor makes no asynchronous calls. + * + * @param request the (maybe transformed) search request + * @param response the search response (which may have been modified by an earlier processor) + * @param requestContext request-scoped state shared across processors in the pipeline + * @return the modified search response + * @throws Exception implementation-specific processing exception + */ + default SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception { + return processResponse(request, response); + } + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + * <p> + * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processResponse. + * + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @param responseListener callback to be invoked on successful processing or on failure + */ + default void processResponseAsync( + SearchRequest request, + SearchResponse response, + PipelineProcessingContext requestContext, + ActionListener<SearchResponse> responseListener + ) { + try { + responseListener.onResponse(processResponse(request, response, requestContext)); + } catch (Exception e) { + responseListener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java new file mode 100644 index 0000000000000..67e1c1147cb87 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchRequestProcessor.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; + +/** + * A specialization of {@link SearchRequestProcessor} that makes use of the request-scoped processor state. + * Implementors must implement the processRequest method that accepts request-scoped processor state. + */ +public interface StatefulSearchRequestProcessor extends SearchRequestProcessor { + @Override + default SearchRequest processRequest(SearchRequest request) { + throw new UnsupportedOperationException(); + } + + @Override + SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception; +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java new file mode 100644 index 0000000000000..f0842d24e1b56 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/StatefulSearchResponseProcessor.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; + +/** + * A specialization of {@link SearchResponseProcessor} that makes use of the request-scoped processor state. + * Implementors must implement the processResponse method that accepts request-scoped processor state. + */ +public interface StatefulSearchResponseProcessor extends SearchResponseProcessor { + @Override + default SearchResponse processResponse(SearchRequest request, SearchResponse response) { + throw new UnsupportedOperationException(); + } + + @Override + SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception; +} diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index 4d0949624ebed..904b04b249b1b 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -70,7 +70,7 @@ public AbstractInternalProfileTree() { * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those * that are past the rewrite phase and are now being wrapped by createWeight() ) follow * a recursive progression. We can track the dependency tree by a simple stack - * + * <p> * The only hiccup is that the first scoring query will be identical to the last rewritten * query, so we need to take special care to fix that * @@ -109,7 +109,7 @@ public PB getProfileBreakdown(E query) { /** * Helper method to add a new node to the dependency tree. - * + * <p> * Initializes a new list in the dependency tree, saves the query and * generates a new {@link AbstractProfileBreakdown} to track the timings * of this element. @@ -180,6 +180,10 @@ private ProfileResult doGetTree(int token) { // calculating the same times over and over...but worth the effort? String type = getTypeFromElement(element); String description = getDescriptionFromElement(element); + return createProfileResult(type, description, breakdown, childrenProfileResults); + } + + protected ProfileResult createProfileResult(String type, String description, PB breakdown, List<ProfileResult> childrenProfileResults) { return new ProfileResult( type, description, diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java index a29d4f9a0ee20..4a1563e7cdce9 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java @@ -50,8 +50,10 @@ public abstract class AbstractProfileBreakdown<T extends Enum<T>> { /** * The accumulated timings for this query node */ - private final Timer[] timings; - private final T[] timingTypes; + protected final Timer[] timings; + protected final T[] timingTypes; + public static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + public static final String TIMING_TYPE_START_TIME_SUFFIX = "_start_time"; /** Sole constructor. */ public AbstractProfileBreakdown(Class<T> clazz) { @@ -74,17 +76,11 @@ public void setTimer(T timing, Timer timer) { * Build a timing count breakdown for current instance */ public Map<String, Long> toBreakdownMap() { - return buildBreakdownMap(this); - } - - /** - * Build a timing count breakdown for arbitrary instance - */ - protected final Map<String, Long> buildBreakdownMap(AbstractProfileBreakdown<T> breakdown) { - Map<String, Long> map = new HashMap<>(breakdown.timings.length * 2); - for (T timingType : breakdown.timingTypes) { - map.put(timingType.toString(), breakdown.timings[timingType.ordinal()].getApproximateTiming()); - map.put(timingType.toString() + "_count", breakdown.timings[timingType.ordinal()].getCount()); + Map<String, Long> map = new HashMap<>(this.timings.length * 3); + for (T timingType : this.timingTypes) { + map.put(timingType.toString(), this.timings[timingType.ordinal()].getApproximateTiming()); + map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, this.timings[timingType.ordinal()].getCount()); + map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, this.timings[timingType.ordinal()].getEarliestTimerStartTime()); } return Collections.unmodifiableMap(map); } @@ -92,11 +88,11 @@ protected final Map<String, Long> buildBreakdownMap(AbstractProfileBreakdown<T> /** * Fetch extra debugging information. */ - protected Map<String, Object> toDebugMap() { + public Map<String, Object> toDebugMap() { return emptyMap(); } - public final long toNodeTime() { + public long toNodeTime() { long total = 0; for (T timingType : timingTypes) { total += timings[timingType.ordinal()].getApproximateTiming(); diff --git a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java index 4f071f730cd45..3fe621321c8ad 100644 --- a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java @@ -8,6 +8,12 @@ package org.opensearch.search.profile; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; + +import java.util.List; +import java.util.Map; + /** * Provide contextual profile breakdowns which are associated with freestyle context. Used when concurrent * search over segments is activated and each collector needs own non-shareable profile breakdown instance. @@ -25,4 +31,8 @@ public ContextualProfileBreakdown(Class<T> clazz) { * @return contextual profile breakdown instance */ public abstract AbstractProfileBreakdown<T> context(Object context); + + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) {} + + public void associateCollectorsToLeaves(Map<Collector, List<LeafReaderContext>> collectorToLeaves) {} } diff --git a/server/src/main/java/org/opensearch/search/profile/NetworkTime.java b/server/src/main/java/org/opensearch/search/profile/NetworkTime.java index 45d8c2883cb4a..89a8836d807ae 100644 --- a/server/src/main/java/org/opensearch/search/profile/NetworkTime.java +++ b/server/src/main/java/org/opensearch/search/profile/NetworkTime.java @@ -9,6 +9,7 @@ package org.opensearch.search.profile; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -18,8 +19,9 @@ /** * Utility class to track time of network operations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NetworkTime implements Writeable { private long inboundNetworkTime; private long outboundNetworkTime; diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index 89c3d7504de66..2c0d2cf3ba78a 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -32,11 +32,13 @@ package org.opensearch.search.profile; +import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.InstantiatingObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +46,8 @@ import java.io.IOException; import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -61,15 +65,22 @@ * Each InternalProfileResult has a List of InternalProfileResults, which will contain * "children" queries if applicable * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ProfileResult implements Writeable, ToXContentObject { static final ParseField TYPE = new ParseField("type"); static final ParseField DESCRIPTION = new ParseField("description"); static final ParseField BREAKDOWN = new ParseField("breakdown"); static final ParseField DEBUG = new ParseField("debug"); static final ParseField NODE_TIME = new ParseField("time"); + static final ParseField MAX_SLICE_NODE_TIME = new ParseField("max_slice_time"); + static final ParseField MIN_SLICE_NODE_TIME = new ParseField("min_slice_time"); + static final ParseField AVG_SLICE_NODE_TIME = new ParseField("avg_slice_time"); static final ParseField NODE_TIME_RAW = new ParseField("time_in_nanos"); + static final ParseField MAX_SLICE_NODE_TIME_RAW = new ParseField("max_slice_time_in_nanos"); + static final ParseField MIN_SLICE_NODE_TIME_RAW = new ParseField("min_slice_time_in_nanos"); + static final ParseField AVG_SLICE_NODE_TIME_RAW = new ParseField("avg_slice_time_in_nanos"); static final ParseField CHILDREN = new ParseField("children"); private final String type; @@ -77,6 +88,9 @@ public final class ProfileResult implements Writeable, ToXContentObject { private final Map<String, Long> breakdown; private final Map<String, Object> debug; private final long nodeTime; + private Long maxSliceNodeTime; + private Long minSliceNodeTime; + private Long avgSliceNodeTime; private final List<ProfileResult> children; public ProfileResult( @@ -86,6 +100,20 @@ public ProfileResult( Map<String, Object> debug, long nodeTime, List<ProfileResult> children + ) { + this(type, description, breakdown, debug, nodeTime, children, null, null, null); + } + + public ProfileResult( + String type, + String description, + Map<String, Long> breakdown, + Map<String, Object> debug, + long nodeTime, + List<ProfileResult> children, + Long maxSliceNodeTime, + Long minSliceNodeTime, + Long avgSliceNodeTime ) { this.type = type; this.description = description; @@ -93,6 +121,9 @@ public ProfileResult( this.debug = debug == null ? Map.of() : debug; this.children = children == null ? List.of() : children; this.nodeTime = nodeTime; + this.maxSliceNodeTime = maxSliceNodeTime; + this.minSliceNodeTime = minSliceNodeTime; + this.avgSliceNodeTime = avgSliceNodeTime; } /** @@ -105,6 +136,15 @@ public ProfileResult(StreamInput in) throws IOException { breakdown = in.readMap(StreamInput::readString, StreamInput::readLong); debug = in.readMap(StreamInput::readString, StreamInput::readGenericValue); children = in.readList(ProfileResult::new); + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + this.maxSliceNodeTime = in.readOptionalLong(); + this.minSliceNodeTime = in.readOptionalLong(); + this.avgSliceNodeTime = in.readOptionalLong(); + } else { + this.maxSliceNodeTime = null; + this.minSliceNodeTime = null; + this.avgSliceNodeTime = null; + } } @Override @@ -115,6 +155,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(breakdown, StreamOutput::writeString, StreamOutput::writeLong); out.writeMap(debug, StreamOutput::writeString, StreamOutput::writeGenericValue); out.writeList(children); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalLong(maxSliceNodeTime); + out.writeOptionalLong(minSliceNodeTime); + out.writeOptionalLong(avgSliceNodeTime); + } } /** @@ -154,6 +199,18 @@ public long getTime() { return nodeTime; } + public Long getMaxSliceTime() { + return maxSliceNodeTime; + } + + public Long getMinSliceTime() { + return minSliceNodeTime; + } + + public Long getAvgSliceTime() { + return avgSliceNodeTime; + } + /** * Returns a list of all profiled children queries */ @@ -168,9 +225,27 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(DESCRIPTION.getPreferredName(), description); if (builder.humanReadable()) { builder.field(NODE_TIME.getPreferredName(), new TimeValue(getTime(), TimeUnit.NANOSECONDS).toString()); + if (getMaxSliceTime() != null) { + builder.field(MAX_SLICE_NODE_TIME.getPreferredName(), new TimeValue(getMaxSliceTime(), TimeUnit.NANOSECONDS).toString()); + } + if (getMinSliceTime() != null) { + builder.field(MIN_SLICE_NODE_TIME.getPreferredName(), new TimeValue(getMinSliceTime(), TimeUnit.NANOSECONDS).toString()); + } + if (getAvgSliceTime() != null) { + builder.field(AVG_SLICE_NODE_TIME.getPreferredName(), new TimeValue(getAvgSliceTime(), TimeUnit.NANOSECONDS).toString()); + } } builder.field(NODE_TIME_RAW.getPreferredName(), getTime()); - builder.field(BREAKDOWN.getPreferredName(), breakdown); + if (getMaxSliceTime() != null) { + builder.field(MAX_SLICE_NODE_TIME_RAW.getPreferredName(), getMaxSliceTime()); + } + if (getMinSliceTime() != null) { + builder.field(MIN_SLICE_NODE_TIME_RAW.getPreferredName(), getMinSliceTime()); + } + if (getAvgSliceTime() != null) { + builder.field(AVG_SLICE_NODE_TIME_RAW.getPreferredName(), getAvgSliceTime()); + } + createBreakdownView(builder); if (false == debug.isEmpty()) { builder.field(DEBUG.getPreferredName(), debug); } @@ -186,6 +261,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } + private void createBreakdownView(XContentBuilder builder) throws IOException { + Map<String, Long> modifiedBreakdown = new LinkedHashMap<>(breakdown); + removeStartTimeFields(modifiedBreakdown); + builder.field(BREAKDOWN.getPreferredName(), modifiedBreakdown); + } + + static void removeStartTimeFields(Map<String, Long> modifiedBreakdown) { + Iterator<Map.Entry<String, Long>> iterator = modifiedBreakdown.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry<String, Long> entry = iterator.next(); + if (entry.getKey().endsWith(AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX)) { + iterator.remove(); + } + } + } + private static final InstantiatingObjectParser<ProfileResult, Void> PARSER; static { InstantiatingObjectParser.Builder<ProfileResult, Void> parser = InstantiatingObjectParser.builder( @@ -199,6 +290,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws parser.declareObject(optionalConstructorArg(), (p, c) -> p.map(), DEBUG); parser.declareLong(constructorArg(), NODE_TIME_RAW); parser.declareObjectArray(optionalConstructorArg(), (p, c) -> fromXContent(p), CHILDREN); + parser.declareLong(optionalConstructorArg(), MAX_SLICE_NODE_TIME_RAW); + parser.declareLong(optionalConstructorArg(), MIN_SLICE_NODE_TIME_RAW); + parser.declareLong(optionalConstructorArg(), AVG_SLICE_NODE_TIME_RAW); PARSER = parser.build(); } diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java index 502d8e4852588..8ff622152ee70 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -46,8 +47,9 @@ /** * Shard level profile results * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ProfileShardResult implements Writeable { private final List<QueryProfileShardResult> queryProfileResults; diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 2bc2f3a5a3920..75337f89e67ca 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -32,8 +32,13 @@ package org.opensearch.search.profile; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.aggregation.AggregationProfiler; +import org.opensearch.search.profile.aggregation.ConcurrentAggregationProfiler; +import org.opensearch.search.profile.query.ConcurrentQueryProfileTree; +import org.opensearch.search.profile.query.ConcurrentQueryProfiler; +import org.opensearch.search.profile.query.InternalQueryProfileTree; import org.opensearch.search.profile.query.QueryProfiler; import java.util.ArrayList; @@ -43,25 +48,30 @@ /** * Wrapper around all the profilers that makes management easier. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Profilers { private final ContextIndexSearcher searcher; private final List<QueryProfiler> queryProfilers; private final AggregationProfiler aggProfiler; + private final boolean isConcurrentSegmentSearchEnabled; /** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */ - public Profilers(ContextIndexSearcher searcher) { + public Profilers(ContextIndexSearcher searcher, boolean isConcurrentSegmentSearchEnabled) { this.searcher = searcher; + this.isConcurrentSegmentSearchEnabled = isConcurrentSegmentSearchEnabled; this.queryProfilers = new ArrayList<>(); - this.aggProfiler = new AggregationProfiler(); + this.aggProfiler = isConcurrentSegmentSearchEnabled ? new ConcurrentAggregationProfiler() : new AggregationProfiler(); addQueryProfiler(); } /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(searcher.getExecutor() != null); + QueryProfiler profiler = isConcurrentSegmentSearchEnabled + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 231324b4a5598..864c689cf7fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -51,7 +51,19 @@ public class Timer { private boolean doTiming; - private long timing, count, lastCount, start; + private long timing, count, lastCount, start, earliestTimerStartTime; + + public Timer() { + this(0, 0, 0, 0, 0); + } + + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + this.timing = timing; + this.count = count; + this.lastCount = lastCount; + this.start = start; + this.earliestTimerStartTime = earliestTimerStartTime; + } /** pkg-private for testing */ long nanoTime() { @@ -71,6 +83,9 @@ public final void start() { doTiming = (count - lastCount) >= Math.min(lastCount >>> 8, 1024); if (doTiming) { start = nanoTime(); + if (count == 0) { + earliestTimerStartTime = start; + } } count++; } @@ -92,6 +107,14 @@ public final long getCount() { return count; } + /** Return the timer start time in nanoseconds.*/ + public final long getEarliestTimerStartTime() { + if (start != 0) { + throw new IllegalStateException("#start call misses a matching #stop call"); + } + return earliestTimerStartTime; + } + /** Return an approximation of the total time spent between consecutive calls of #start and #stop. */ public final long getApproximateTiming() { if (start != 0) { diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java index 24eccba575e77..8642f0da4a90b 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile.aggregation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.profile.AbstractProfileBreakdown; import java.util.HashMap; @@ -42,8 +43,9 @@ /** * {@linkplain AbstractProfileBreakdown} customized to work with aggregations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregationProfileBreakdown extends AbstractProfileBreakdown<AggregationTimingType> { private final Map<String, Object> extra = new HashMap<>(); @@ -59,7 +61,7 @@ public void addDebugInfo(String key, Object value) { } @Override - protected Map<String, Object> toDebugMap() { + public Map<String, Object> toDebugMap() { return unmodifiableMap(extra); } } diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java index 046e929821ab5..6cb92b3efaac3 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile.aggregation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,8 +52,9 @@ * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class AggregationProfileShardResult implements Writeable, ToXContentFragment { public static final String AGGREGATIONS = "aggregations"; diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfiler.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfiler.java index 1d2cf424ee5a7..cae3ced5ad0c5 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfiler.java @@ -32,44 +32,40 @@ package org.opensearch.search.profile.aggregation; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.profile.AbstractProfiler; import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; import java.util.Map; /** * Main class to profile aggregations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class AggregationProfiler extends AbstractProfiler<AggregationProfileBreakdown, Aggregator> { - private final Map<List<String>, AggregationProfileBreakdown> profileBreakdownLookup = new HashMap<>(); + private final Map<Aggregator, AggregationProfileBreakdown> profileBreakdownLookup = new HashMap<>(); public AggregationProfiler() { super(new InternalAggregationProfileTree()); } + /** + * This method does not need to be thread safe for concurrent search use case as well. + * The {@link AggregationProfileBreakdown} for each Aggregation operator is created in sync path when + * {@link org.opensearch.search.aggregations.BucketCollector#preCollection()} is called + * on the Aggregation collector instances during construction. + */ @Override public AggregationProfileBreakdown getQueryBreakdown(Aggregator agg) { - List<String> path = getAggregatorPath(agg); - AggregationProfileBreakdown aggregationProfileBreakdown = profileBreakdownLookup.get(path); + AggregationProfileBreakdown aggregationProfileBreakdown = profileBreakdownLookup.get(agg); if (aggregationProfileBreakdown == null) { aggregationProfileBreakdown = super.getQueryBreakdown(agg); - profileBreakdownLookup.put(path, aggregationProfileBreakdown); + profileBreakdownLookup.put(agg, aggregationProfileBreakdown); } return aggregationProfileBreakdown; } - - public static List<String> getAggregatorPath(Aggregator agg) { - LinkedList<String> path = new LinkedList<>(); - while (agg != null) { - path.addFirst(agg.name()); - agg = agg.parent(); - } - return path; - } } diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java b/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java new file mode 100644 index 0000000000000..deed68c535cf9 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfiler.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.profile.aggregation; + +import org.opensearch.search.profile.AbstractProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * Main class to profile aggregations with concurrent execution + * + * @opensearch.internal + */ +public class ConcurrentAggregationProfiler extends AggregationProfiler { + + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String START_TIME_KEY = AggregationTimingType.INITIALIZE + AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX; + private static final String[] breakdownCountStatsTypes = { "build_leaf_collector_count", "collect_count" }; + + @Override + public List<ProfileResult> getTree() { + List<ProfileResult> tree = profileTree.getTree(); + List<ProfileResult> reducedTree = new LinkedList<>(); + Map<String, List<ProfileResult>> sliceLevelAggregationMap = getSliceLevelAggregationMap(tree); + for (List<ProfileResult> profileResultsAcrossSlices : sliceLevelAggregationMap.values()) { + reducedTree.addAll(reduceProfileResultsTree(profileResultsAcrossSlices)); + } + return reducedTree; + } + + private List<ProfileResult> reduceProfileResultsTree(List<ProfileResult> profileResultsAcrossSlices) { + String type = profileResultsAcrossSlices.get(0).getQueryName(); + String description = profileResultsAcrossSlices.get(0).getLuceneDescription(); + long maxSliceNodeEndTime = Long.MIN_VALUE; + long minSliceNodeStartTime = Long.MAX_VALUE; + long maxSliceNodeTime = Long.MIN_VALUE; + long minSliceNodeTime = Long.MAX_VALUE; + long avgSliceNodeTime = 0L; + Map<String, Long> breakdown = new HashMap<>(); + Map<String, Long> timeStatsMap = new HashMap<>(); + Map<String, Long> minSliceStartTimeMap = new HashMap<>(); + Map<String, Long> maxSliceEndTimeMap = new HashMap<>(); + Map<String, Long> countStatsMap = new HashMap<>(); + Map<String, Object> debug = new HashMap<>(); + List<ProfileResult> children = new LinkedList<>(); + + for (ProfileResult profileResult : profileResultsAcrossSlices) { + long profileNodeTime = profileResult.getTime(); + long sliceStartTime = profileResult.getTimeBreakdown().get(START_TIME_KEY); + + // Profiled total time + maxSliceNodeEndTime = Math.max(maxSliceNodeEndTime, sliceStartTime + profileNodeTime); + minSliceNodeStartTime = Math.min(minSliceNodeStartTime, sliceStartTime); + + // Profiled total time stats + maxSliceNodeTime = Math.max(maxSliceNodeTime, profileNodeTime); + minSliceNodeTime = Math.min(minSliceNodeTime, profileNodeTime); + avgSliceNodeTime += profileNodeTime; + + // Profiled breakdown time stats + for (AggregationTimingType timingType : AggregationTimingType.values()) { + buildBreakdownStatsMap(timeStatsMap, profileResult, timingType.toString()); + } + + // Profiled breakdown total time + for (AggregationTimingType timingType : AggregationTimingType.values()) { + String breakdownTimingType = timingType.toString(); + Long startTime = profileResult.getTimeBreakdown() + .get(breakdownTimingType + AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX); + Long endTime = startTime + profileResult.getTimeBreakdown().get(breakdownTimingType); + minSliceStartTimeMap.put( + breakdownTimingType, + Math.min(minSliceStartTimeMap.getOrDefault(breakdownTimingType, Long.MAX_VALUE), startTime) + ); + maxSliceEndTimeMap.put( + breakdownTimingType, + Math.max(maxSliceEndTimeMap.getOrDefault(breakdownTimingType, Long.MIN_VALUE), endTime) + ); + } + + // Profiled breakdown count stats + for (String breakdownCountType : breakdownCountStatsTypes) { + buildBreakdownStatsMap(countStatsMap, profileResult, breakdownCountType); + } + + // Profiled breakdown count + for (AggregationTimingType timingType : AggregationTimingType.values()) { + String breakdownType = timingType.toString(); + String breakdownTypeCount = breakdownType + AbstractProfileBreakdown.TIMING_TYPE_COUNT_SUFFIX; + breakdown.put( + breakdownTypeCount, + breakdown.getOrDefault(breakdownTypeCount, 0L) + profileResult.getTimeBreakdown().get(breakdownTypeCount) + ); + } + + debug = profileResult.getDebugInfo(); + children.addAll(profileResult.getProfiledChildren()); + } + // nodeTime + long nodeTime = maxSliceNodeEndTime - minSliceNodeStartTime; + avgSliceNodeTime /= profileResultsAcrossSlices.size(); + + // Profiled breakdown time stats + for (AggregationTimingType breakdownTimingType : AggregationTimingType.values()) { + buildBreakdownMap(profileResultsAcrossSlices.size(), breakdown, timeStatsMap, breakdownTimingType.toString()); + } + + // Profiled breakdown total time + for (AggregationTimingType breakdownTimingType : AggregationTimingType.values()) { + String breakdownType = breakdownTimingType.toString(); + breakdown.put(breakdownType, maxSliceEndTimeMap.get(breakdownType) - minSliceStartTimeMap.get(breakdownType)); + } + + // Profiled breakdown count stats + for (String breakdownCountType : breakdownCountStatsTypes) { + buildBreakdownMap(profileResultsAcrossSlices.size(), breakdown, countStatsMap, breakdownCountType); + } + + // children + List<ProfileResult> reducedChildrenTree = new LinkedList<>(); + if (!children.isEmpty()) { + Map<String, List<ProfileResult>> sliceLevelAggregationMap = getSliceLevelAggregationMap(children); + for (List<ProfileResult> profileResults : sliceLevelAggregationMap.values()) { + reducedChildrenTree.addAll(reduceProfileResultsTree(profileResults)); + } + } + + ProfileResult reducedResult = new ProfileResult( + type, + description, + breakdown, + debug, + nodeTime, + reducedChildrenTree, + maxSliceNodeTime, + minSliceNodeTime, + avgSliceNodeTime + ); + return List.of(reducedResult); + } + + static void buildBreakdownMap(int treeSize, Map<String, Long> breakdown, Map<String, Long> statsMap, String breakdownType) { + String maxBreakdownType = MAX_PREFIX + breakdownType; + String minBreakdownType = MIN_PREFIX + breakdownType; + String avgBreakdownType = AVG_PREFIX + breakdownType; + breakdown.put(maxBreakdownType, statsMap.get(maxBreakdownType)); + breakdown.put(minBreakdownType, statsMap.get(minBreakdownType)); + breakdown.put(avgBreakdownType, statsMap.get(avgBreakdownType) / treeSize); + } + + static void buildBreakdownStatsMap(Map<String, Long> statsMap, ProfileResult result, String breakdownType) { + String maxBreakdownType = MAX_PREFIX + breakdownType; + String minBreakdownType = MIN_PREFIX + breakdownType; + String avgBreakdownType = AVG_PREFIX + breakdownType; + statsMap.put( + maxBreakdownType, + Math.max(statsMap.getOrDefault(maxBreakdownType, Long.MIN_VALUE), result.getTimeBreakdown().get(breakdownType)) + ); + statsMap.put( + minBreakdownType, + Math.min(statsMap.getOrDefault(minBreakdownType, Long.MAX_VALUE), result.getTimeBreakdown().get(breakdownType)) + ); + statsMap.put(avgBreakdownType, statsMap.getOrDefault(avgBreakdownType, 0L) + result.getTimeBreakdown().get(breakdownType)); + } + + /** + * @return a slice level aggregation map where the key is the description of the aggregation and + * the value is a list of ProfileResult across all slices. + */ + static Map<String, List<ProfileResult>> getSliceLevelAggregationMap(List<ProfileResult> tree) { + Map<String, List<ProfileResult>> sliceLevelAggregationMap = new HashMap<>(); + for (ProfileResult result : tree) { + String description = result.getLuceneDescription(); + final List<ProfileResult> sliceLevelAggregationList = sliceLevelAggregationMap.computeIfAbsent( + description, + k -> new LinkedList<>() + ); + sliceLevelAggregationList.add(result); + } + return sliceLevelAggregationMap; + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/InternalAggregationProfileTree.java b/server/src/main/java/org/opensearch/search/profile/aggregation/InternalAggregationProfileTree.java index 36cfc53f41ccd..34716b87c7c9c 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/InternalAggregationProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/InternalAggregationProfileTree.java @@ -62,6 +62,10 @@ protected String getTypeFromElement(Aggregator element) { return element.getClass().getSimpleName(); } + /** + * @return is used to group aggregations with same name across slices. + * So the name returned here should be same across slices for an aggregation operator. + */ @Override protected String getDescriptionFromElement(Aggregator element) { return element.name(); diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java new file mode 100644 index 0000000000000..2f5d632ee2d87 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.AbstractInternalProfileTree; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + * + * @opensearch.internal + */ +public abstract class AbstractQueryProfileTree extends AbstractInternalProfileTree<ContextualProfileBreakdown<QueryTimingType>, Query> { + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + @Override + protected String getTypeFromElement(Query query) { + // Anonymous classes won't have a name, + // we need to get the super class + if (query.getClass().getSimpleName().isEmpty()) { + return query.getClass().getSuperclass().getSimpleName(); + } + return query.getClass().getSimpleName(); + } + + @Override + protected String getDescriptionFromElement(Query query) { + return query.toString(); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + */ + public void stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + } + + public long getRewriteTime() { + return rewriteTime; + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java b/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java index 26edc14e6f9f8..5cb6942445638 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java +++ b/server/src/main/java/org/opensearch/search/profile/query/CollectorResult.java @@ -33,11 +33,12 @@ package org.opensearch.search.profile.query; import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -55,8 +56,9 @@ * Collectors used in the search. Children CollectorResult's may be * embedded inside of a parent CollectorResult * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CollectorResult implements ToXContentObject, Writeable { public static final String REASON_SEARCH_COUNT = "search_count"; diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 74ef78bc93c5f..99169b42c05f0 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -8,10 +8,16 @@ package org.opensearch.search.profile.query; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -23,8 +29,22 @@ * @opensearch.internal */ public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown<QueryTimingType> { + static final String SLICE_END_TIME_SUFFIX = "_slice_end_time"; + static final String SLICE_START_TIME_SUFFIX = "_slice_start_time"; + static final String MAX_PREFIX = "max_"; + static final String MIN_PREFIX = "min_"; + static final String AVG_PREFIX = "avg_"; + private long queryNodeTime = Long.MIN_VALUE; + private long maxSliceNodeTime = Long.MIN_VALUE; + private long minSliceNodeTime = Long.MAX_VALUE; + private long avgSliceNodeTime = 0L; + + // keep track of all breakdown timings per segment. package-private for testing private final Map<Object, AbstractProfileBreakdown<QueryTimingType>> contexts = new ConcurrentHashMap<>(); + // represents slice to leaves mapping as for each slice a unique collector instance is created + private final Map<Collector, List<LeafReaderContext>> sliceCollectorsToLeaves = new ConcurrentHashMap<>(); + /** Sole constructor. */ public ConcurrentQueryProfileBreakdown() { super(QueryTimingType.class); @@ -44,14 +64,355 @@ public AbstractProfileBreakdown<QueryTimingType> context(Object context) { @Override public Map<String, Long> toBreakdownMap() { - final Map<String, Long> map = new HashMap<>(buildBreakdownMap(this)); + final Map<String, Long> topLevelBreakdownMapWithWeightTime = super.toBreakdownMap(); + final long createWeightStartTime = topLevelBreakdownMapWithWeightTime.get( + QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_START_TIME_SUFFIX + ); + final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); + + if (contexts.isEmpty()) { + // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the + // create_weight time/count + queryNodeTime = createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + return buildDefaultQueryBreakdownMap(createWeightTime); + } else if (sliceCollectorsToLeaves.isEmpty()) { + // This will happen when each slice executes search leaf for its leaves and query is rewritten for the leaf being searched. It + // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for + // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later + // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no + // concurrency involved. + assert contexts.size() == 1 : "Unexpected size: " + + contexts.size() + + " of leaves breakdown in ConcurrentQueryProfileBreakdown of rewritten query for a leaf."; + AbstractProfileBreakdown<QueryTimingType> breakdown = contexts.values().iterator().next(); + queryNodeTime = breakdown.toNodeTime() + createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + Map<String, Long> queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + return queryBreakdownMap; + } + + // first create the slice level breakdowns + final Map<Collector, Map<String, Long>> sliceLevelBreakdowns = buildSliceLevelBreakdown(); + return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); + } + + /** + * @param createWeightTime time for creating weight + * @return default breakdown map for concurrent query which includes the create weight time and all other timing type stats in the + * breakdown has default value of 0. For concurrent search case, the max/min/avg stats for each timing type will also be 0 in this + * default breakdown map. + */ + private Map<String, Long> buildDefaultQueryBreakdownMap(long createWeightTime) { + final Map<String, Long> concurrentQueryBreakdownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + final String timingTypeKey = timingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + concurrentQueryBreakdownMap.put(timingTypeKey, createWeightTime); + concurrentQueryBreakdownMap.put(timingTypeCountKey, 1L); + continue; + } + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + // add time related stats + concurrentQueryBreakdownMap.put(timingTypeKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeTime, 0L); + // add count related stats + concurrentQueryBreakdownMap.put(timingTypeCountKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeCount, 0L); + } + return concurrentQueryBreakdownMap; + } + + /** + * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. + * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. + * + * @return map of collector (or slice) to breakdown map + */ + Map<Collector, Map<String, Long>> buildSliceLevelBreakdown() { + final Map<Collector, Map<String, Long>> sliceLevelBreakdowns = new HashMap<>(); + long totalSliceNodeTime = 0L; + for (Map.Entry<Collector, List<LeafReaderContext>> slice : sliceCollectorsToLeaves.entrySet()) { + final Collector sliceCollector = slice.getKey(); + // initialize each slice level breakdown + final Map<String, Long> currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); + // max slice end time across all timing types + long sliceMaxEndTime = Long.MIN_VALUE; + long sliceMinStartTime = Long.MAX_VALUE; + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // do nothing for create weight as that is query level time and not slice level + continue; + } + + // for each timing type compute maxSliceEndTime and minSliceStartTime. Also add the counts of timing type to + // compute total count at slice level + final String timingTypeCountKey = timingType + TIMING_TYPE_COUNT_SUFFIX; + final String timingTypeStartKey = timingType + TIMING_TYPE_START_TIME_SUFFIX; + final String timingTypeSliceStartTimeKey = timingType + SLICE_START_TIME_SUFFIX; + final String timingTypeSliceEndTimeKey = timingType + SLICE_END_TIME_SUFFIX; + + for (LeafReaderContext sliceLeaf : slice.getValue()) { + if (!contexts.containsKey(sliceLeaf)) { + // In case like early termination, the sliceCollectorToLeave association will be added for a + // leaf, but the leaf level breakdown will not be created in the contexts map. + // This is because before updating the contexts map, the query hits earlyTerminationException. + // To handle such case, we will ignore the leaf that is not present. + // + // Other than early termination, it can also happen in other cases. For example: there is a must boolean query + // with 2 boolean clauses. While creating scorer for first clause if no docs are found for the field in a leaf + // context then it will return null scorer. Then for 2nd clause weight as well no scorer will be created for this + // leaf context (as it is a must query). Due to this it will end up missing the leaf context in the contexts map + // for second clause weight. + continue; + } + final Map<String, Long> currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); + // get the count for current leaf timing type + final long sliceLeafTimingTypeCount = currentSliceLeafBreakdownMap.get(timingTypeCountKey); + currentSliceBreakdown.compute( + timingTypeCountKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeCount : value + sliceLeafTimingTypeCount + ); + + if (sliceLeafTimingTypeCount == 0L) { + // In case where a slice with multiple leaves, it is possible that any one of the leaves has 0 invocations for a + // specific breakdown type. We should skip the slice start/end time computation for any leaf with 0 invocations on a + // timing type, as 0 does not represent an actual timing. + // For example, a slice has 0 invocations for a breakdown type from its leading leaves. Another example, let's + // consider a slice with three leaves: leaf A with a score count of 5, leaf B with a score count of 0, + // and leaf C with a score count of 4. In this situation, we only compute the timing type slice start/end time based + // on leaf A and leaf C. This is because leaf B has a start time of zero. + continue; + } + + // compute the sliceStartTime for timingType using min of startTime across slice leaves + final long sliceLeafTimingTypeStartTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey); + currentSliceBreakdown.compute( + timingTypeSliceStartTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeStartTime : Math.min(value, sliceLeafTimingTypeStartTime) + ); + + // compute the sliceEndTime for timingType using max of endTime across slice leaves + final long sliceLeafTimingTypeEndTime = sliceLeafTimingTypeStartTime + currentSliceLeafBreakdownMap.get( + timingType.toString() + ); + currentSliceBreakdown.compute( + timingTypeSliceEndTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) + ); + } + // Only when we've checked all leaves in a slice and still find no invocations, then we should set the slice start/end time + // to the default 0L. This is because buildQueryBreakdownMap expects timingTypeSliceStartTimeKey and + // timingTypeSliceEndTimeKey in the slice level breakdowns. + if (currentSliceBreakdown.get(timingTypeCountKey) != null && currentSliceBreakdown.get(timingTypeCountKey) == 0L) { + currentSliceBreakdown.put(timingTypeSliceStartTimeKey, 0L); + currentSliceBreakdown.put(timingTypeSliceEndTimeKey, 0L); + } + // compute sliceMaxEndTime as max of sliceEndTime across all timing types + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); + long currentSliceStartTime = currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE); + if (currentSliceStartTime == 0L) { + // The timer for the current timing type never starts, so we continue here + continue; + } + sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); + // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime + currentSliceBreakdown.put( + timingType.toString(), + currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( + timingTypeSliceStartTimeKey, + 0L + ) + ); + } + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part + long currentSliceNodeTime; + if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { + currentSliceNodeTime = 0L; + } else if (sliceMinStartTime == Long.MAX_VALUE || sliceMaxEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected value of sliceMinStartTime [" + + sliceMinStartTime + + "] or sliceMaxEndTime [" + + sliceMaxEndTime + + "] while computing the slice level timing profile breakdowns" + ); + } else { + currentSliceNodeTime = sliceMaxEndTime - sliceMinStartTime; + } + + // compute max/min slice times + maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); + minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); + // total time at query level + totalSliceNodeTime += currentSliceNodeTime; + } + avgSliceNodeTime = totalSliceNodeTime / sliceCollectorsToLeaves.size(); + return sliceLevelBreakdowns; + } + + /** + * Computes the query level breakdownMap using the breakdown maps of all the slices. In query level breakdown map, it has the + * time/count stats for each breakdown type. Total time per breakdown type at query level is computed by subtracting the max of slice + * end time with min of slice start time for that type. Count for each breakdown type at query level is sum of count of that type + * across slices. Other than these, there are max/min/avg stats across slices for each breakdown type + * + * @param sliceLevelBreakdowns breakdown map for all the slices + * @param createWeightTime time for create weight + * @param createWeightStartTime start time for create weight + * @return breakdown map for entire query + */ + public Map<String, Long> buildQueryBreakdownMap( + Map<Collector, Map<String, Long>> sliceLevelBreakdowns, + long createWeightTime, + long createWeightStartTime + ) { + final Map<String, Long> queryBreakdownMap = new HashMap<>(); + long queryEndTime = Long.MIN_VALUE; + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + final String timingTypeKey = queryTimingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + final String sliceEndTimeForTimingType = timingTypeKey + SLICE_END_TIME_SUFFIX; + final String sliceStartTimeForTimingType = timingTypeKey + SLICE_START_TIME_SUFFIX; + + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + + long queryTimingTypeEndTime = Long.MIN_VALUE; + long queryTimingTypeStartTime = Long.MAX_VALUE; + long queryTimingTypeCount = 0L; - for (final AbstractProfileBreakdown<QueryTimingType> context : contexts.values()) { - for (final Map.Entry<String, Long> entry : buildBreakdownMap(context).entrySet()) { - map.merge(entry.getKey(), entry.getValue(), Long::sum); + // the create weight time is computed at the query level and is called only once per query + if (queryTimingType == QueryTimingType.CREATE_WEIGHT) { + queryBreakdownMap.put(timingTypeCountKey, 1L); + queryBreakdownMap.put(timingTypeKey, createWeightTime); + continue; } + + // for all other timing types, we will compute min/max/avg/total across slices + for (Map.Entry<Collector, Map<String, Long>> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); + // compute max/min/avg TimingType time across slices + queryBreakdownMap.compute( + maxBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.max(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.min(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : sliceBreakdownTypeTime + value + ); + + // compute max/min/avg TimingType count across slices + queryBreakdownMap.compute( + maxBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.max(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.min(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : sliceBreakdownTypeCount + value + ); + + // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType + queryTimingTypeEndTime = Math.max( + queryTimingTypeEndTime, + sliceBreakdown.getValue().getOrDefault(sliceEndTimeForTimingType, Long.MIN_VALUE) + ); + queryTimingTypeStartTime = Math.min( + queryTimingTypeStartTime, + sliceBreakdown.getValue().getOrDefault(sliceStartTimeForTimingType, Long.MAX_VALUE) + ); + queryTimingTypeCount += sliceBreakdownTypeCount; + } + + if (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected timing type [" + + timingTypeKey + + "] start [" + + queryTimingTypeStartTime + + "] or end time [" + + queryTimingTypeEndTime + + "] computed across slices for profile results" + ); + } + queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); + queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + // compute query end time using max of query end time across all timing types + queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); + } + if (queryEndTime == Long.MIN_VALUE) { + throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); } + queryNodeTime = queryEndTime - createWeightStartTime; + return queryBreakdownMap; + } + + @Override + public long toNodeTime() { + return queryNodeTime; + } + + @Override + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) { + // Each slice (or collector) is executed by single thread. So the list for a key will always be updated by a single thread only + sliceCollectorsToLeaves.computeIfAbsent(collector, k -> new ArrayList<>()).add(leaf); + } + + @Override + public void associateCollectorsToLeaves(Map<Collector, List<LeafReaderContext>> collectorsToLeaves) { + sliceCollectorsToLeaves.putAll(collectorsToLeaves); + } + + Map<Collector, List<LeafReaderContext>> getSliceCollectorsToLeaves() { + return Collections.unmodifiableMap(sliceCollectorsToLeaves); + } + + // used by tests + Map<Object, AbstractProfileBreakdown<QueryTimingType>> getContexts() { + return contexts; + } + + long getMaxSliceNodeTime() { + return maxSliceNodeTime; + } + + long getMinSliceNodeTime() { + return minSliceNodeTime; + } - return map; + long getAvgSliceNodeTime() { + return avgSliceNodeTime; } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java new file mode 100644 index 0000000000000..4e54178c3b4fb --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +import java.util.List; +import java.util.Map; + +/** + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the concurrent execution. + * + * @opensearch.internal + */ +public class ConcurrentQueryProfileTree extends AbstractQueryProfileTree { + + @Override + protected ContextualProfileBreakdown<QueryTimingType> createProfileBreakdown() { + return new ConcurrentQueryProfileBreakdown(); + } + + @Override + protected ProfileResult createProfileResult( + String type, + String description, + ContextualProfileBreakdown<QueryTimingType> breakdown, + List<ProfileResult> childrenProfileResults + ) { + assert breakdown instanceof ConcurrentQueryProfileBreakdown; + final ConcurrentQueryProfileBreakdown concurrentBreakdown = (ConcurrentQueryProfileBreakdown) breakdown; + return new ProfileResult( + type, + description, + concurrentBreakdown.toBreakdownMap(), + concurrentBreakdown.toDebugMap(), + concurrentBreakdown.toNodeTime(), + childrenProfileResults, + concurrentBreakdown.getMaxSliceNodeTime(), + concurrentBreakdown.getMinSliceNodeTime(), + concurrentBreakdown.getAvgSliceNodeTime() + ); + } + + /** + * For concurrent query case, when there are nested queries (with children), then the {@link ConcurrentQueryProfileBreakdown} created + * for the child queries weight doesn't have the association of collector to leaves. This is because child query weights are not + * exposed by the {@link org.apache.lucene.search.Weight} interface. So after all the collection is happened and before the result + * tree is created we need to pass the association from parent to the child breakdowns. This will be then used to create the + * breakdown map at slice level for the child queries as well + * + * @return a hierarchical representation of the profiled query tree + */ + @Override + public List<ProfileResult> getTree() { + for (Integer root : roots) { + final ContextualProfileBreakdown<QueryTimingType> parentBreakdown = breakdowns.get(root); + assert parentBreakdown instanceof ConcurrentQueryProfileBreakdown; + final Map<Collector, List<LeafReaderContext>> parentCollectorToLeaves = ((ConcurrentQueryProfileBreakdown) parentBreakdown) + .getSliceCollectorsToLeaves(); + // update all the children with the parent collectorToLeaves association + updateCollectorToLeavesForChildBreakdowns(root, parentCollectorToLeaves); + } + // once the collector to leaves mapping is updated, get the result + return super.getTree(); + } + + /** + * Updates the children with collector to leaves mapping as recorded by parent breakdown + * @param parentToken parent token number in the tree + * @param collectorToLeaves collector to leaves mapping recorded by parent + */ + private void updateCollectorToLeavesForChildBreakdowns(Integer parentToken, Map<Collector, List<LeafReaderContext>> collectorToLeaves) { + final List<Integer> children = tree.get(parentToken); + if (children != null) { + for (Integer currentChild : children) { + final ContextualProfileBreakdown<QueryTimingType> currentChildBreakdown = breakdowns.get(currentChild); + currentChildBreakdown.associateCollectorsToLeaves(collectorToLeaves); + updateCollectorToLeavesForChildBreakdowns(currentChild, collectorToLeaves); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java new file mode 100644 index 0000000000000..42bf23bb13fbe --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class acts as a thread-local storage for profiling a query with concurrent execution + * + * @opensearch.internal + */ +public final class ConcurrentQueryProfiler extends QueryProfiler { + + private final Map<Long, ConcurrentQueryProfileTree> threadToProfileTree; + // The LinkedList does not need to be thread safe, as the map associates thread IDs with LinkedList, and only + // one thread will access the LinkedList at a time. + private final Map<Long, LinkedList<Timer>> threadToRewriteTimers; + + public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); + long threadId = getCurrentThreadId(); + // We utilize LinkedHashMap to preserve the insertion order of the profiled queries + threadToProfileTree = Collections.synchronizedMap(new LinkedHashMap<>()); + threadToProfileTree.put(threadId, (ConcurrentQueryProfileTree) profileTree); + threadToRewriteTimers = new ConcurrentHashMap<>(); + threadToRewriteTimers.put(threadId, new LinkedList<>()); + } + + @Override + public ContextualProfileBreakdown<QueryTimingType> getQueryBreakdown(Query query) { + ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( + getCurrentThreadId(), + k -> new ConcurrentQueryProfileTree() + ); + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on ConcurrentQueryProfileTree stack. + */ + @Override + public void pollLastElement() { + ConcurrentQueryProfileTree concurrentProfileTree = threadToProfileTree.get(getCurrentThreadId()); + if (concurrentProfileTree != null) { + concurrentProfileTree.pollLast(); + } + } + + /** + * @return a hierarchical representation of the profiled tree + */ + @Override + public List<ProfileResult> getTree() { + List<ProfileResult> profileResults = new ArrayList<>(); + for (Map.Entry<Long, ConcurrentQueryProfileTree> profile : threadToProfileTree.entrySet()) { + profileResults.addAll(profile.getValue().getTree()); + } + return profileResults; + } + + /** + * Begin timing the rewrite phase of a request + */ + @Override + public void startRewriteTime() { + Timer rewriteTimer = new Timer(); + threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); + rewriteTimer.start(); + } + + /** + * Stop recording the current rewrite timer + */ + public void stopAndAddRewriteTime() { + Timer rewriteTimer = threadToRewriteTimers.get(getCurrentThreadId()).getLast(); + rewriteTimer.stop(); + } + + /** + * @return total time taken to rewrite all queries in this concurrent query profiler + */ + @Override + public long getRewriteTime() { + long totalRewriteTime = 0L; + List<Timer> rewriteTimers = new LinkedList<>(); + threadToRewriteTimers.values().forEach(rewriteTimers::addAll); + LinkedList<long[]> mergedIntervals = mergeRewriteTimeIntervals(rewriteTimers); + for (long[] interval : mergedIntervals) { + totalRewriteTime += interval[1] - interval[0]; + } + return totalRewriteTime; + } + + // package private for unit testing + LinkedList<long[]> mergeRewriteTimeIntervals(List<Timer> timers) { + LinkedList<long[]> mergedIntervals = new LinkedList<>(); + timers.sort(Comparator.comparingLong(Timer::getEarliestTimerStartTime)); + for (Timer timer : timers) { + long startTime = timer.getEarliestTimerStartTime(); + long endTime = startTime + timer.getApproximateTiming(); + if (mergedIntervals.isEmpty() || mergedIntervals.getLast()[1] < startTime) { + long[] interval = new long[2]; + interval[0] = startTime; + interval[1] = endTime; + mergedIntervals.add(interval); + } else { + mergedIntervals.getLast()[1] = Math.max(mergedIntervals.getLast()[1], endTime); + } + } + return mergedIntervals; + } + + private long getCurrentThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java index 8b860c3a58cea..024d91a8e2ed2 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java @@ -44,11 +44,12 @@ /** * This class wraps a Lucene Collector and times the execution of: - * - setScorer() - * - collect() - * - doSetNextReader() - * - needsScores() - * + * <ul> + * <li> setScorer()</li> + * <li> collect()</li> + * <li> doSetNextReader()</li> + * <li> needsScores()</li> + * </ul> * InternalProfiler facilitates the linking of the Collector graph * * @opensearch.internal @@ -117,7 +118,7 @@ public Collector getCollector() { /** * Creates a human-friendly representation of the Collector name. - * + * <p> * InternalBucket Collectors use the aggregation name in their toString() method, * which makes the profiled output a bit nicer. * diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java index 074738d2491ec..4156e442c9254 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollectorManager.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; +import org.opensearch.search.aggregations.AggregationCollectorManager; import org.opensearch.search.query.EarlyTerminatingListener; import org.opensearch.search.query.ReduceableSearchResult; @@ -38,6 +39,7 @@ public class InternalProfileCollectorManager private long minSliceTime = Long.MAX_VALUE; private long avgSliceTime = 0; private int sliceCount = 0; + private String collectorManagerName; public InternalProfileCollectorManager( CollectorManager<? extends Collector, ReduceableSearchResult> manager, @@ -46,9 +48,29 @@ public InternalProfileCollectorManager( ) { this.manager = manager; this.reason = reason; + this.collectorManagerName = deriveCollectorManagerName(manager); this.children = children; } + /** + * Creates a human-friendly representation of the CollectorManager name. + * + * @param manager The CollectorManager to derive a name from + * @return A (hopefully) prettier name + */ + private String deriveCollectorManagerName(CollectorManager<? extends Collector, ReduceableSearchResult> manager) { + String name = manager.getClass().getSimpleName(); + if (name.equals("")) { + name = manager.getClass().getEnclosingClass().getSimpleName(); + } + + // Include the user-defined agg name + if (manager instanceof AggregationCollectorManager) { + name += ": [" + ((AggregationCollectorManager) manager).getCollectorName() + "]"; + } + return name; + } + @Override public InternalProfileCollector newCollector() throws IOException { return new InternalProfileCollector(manager.newCollector(), reason, children); @@ -117,7 +139,7 @@ public Collection<? extends InternalProfileComponent> children() { @Override public String getName() { - return manager.getClass().getSimpleName(); + return collectorManagerName; } @Override diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileComponent.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileComponent.java index 1752d76e27f06..4d8fbcb745a00 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileComponent.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileComponent.java @@ -8,13 +8,16 @@ package org.opensearch.search.profile.query; +import org.opensearch.common.annotation.PublicApi; + import java.util.Collection; /** * Container for an agg profiling component * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.9.0") public interface InternalProfileComponent { /** * @return profile component name diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java index 40c6a29384491..1ed367f094fb7 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java @@ -32,73 +32,18 @@ package org.opensearch.search.profile.query; -import org.apache.lucene.search.Query; -import org.opensearch.search.profile.AbstractInternalProfileTree; import org.opensearch.search.profile.ContextualProfileBreakdown; import org.opensearch.search.profile.ProfileResult; /** - * This class tracks the dependency tree for queries (scoring and rewriting) and - * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree - * and returns a list of {@link ProfileResult} that can be serialized back to the client + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the non-concurrent execution. * * @opensearch.internal */ -final class InternalQueryProfileTree extends AbstractInternalProfileTree<ContextualProfileBreakdown<QueryTimingType>, Query> { - - /** Rewrite time */ - private long rewriteTime; - private long rewriteScratch; - private final boolean concurrent; - - InternalQueryProfileTree(boolean concurrent) { - this.concurrent = concurrent; - } +public class InternalQueryProfileTree extends AbstractQueryProfileTree { @Override protected ContextualProfileBreakdown<QueryTimingType> createProfileBreakdown() { - return (concurrent) ? new ConcurrentQueryProfileBreakdown() : new QueryProfileBreakdown(); - } - - @Override - protected String getTypeFromElement(Query query) { - // Anonymous classes won't have a name, - // we need to get the super class - if (query.getClass().getSimpleName().isEmpty()) { - return query.getClass().getSuperclass().getSimpleName(); - } - return query.getClass().getSimpleName(); - } - - @Override - protected String getDescriptionFromElement(Query query) { - return query.toString(); - } - - /** - * Begin timing a query for a specific Timing context - */ - public void startRewriteTime() { - assert rewriteScratch == 0; - rewriteScratch = System.nanoTime(); - } - - /** - * Halt the timing process and add the elapsed rewriting time. - * startRewriteTime() must be called for a particular context prior to calling - * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and - * nonsensical - * - * @return The elapsed time - */ - public long stopAndAddRewriteTime() { - long time = Math.max(1, System.nanoTime() - rewriteScratch); - rewriteTime += time; - rewriteScratch = 0; - return time; - } - - public long getRewriteTime() { - return rewriteTime; + return new QueryProfileBreakdown(); } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java index 12f9a7184d84a..c7e70d8d88007 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; @@ -137,4 +138,7 @@ public boolean isCacheable(LeafReaderContext ctx) { return false; } + public void associateCollectorToLeaves(LeafReaderContext leaf, Collector collector) { + profile.associateCollectorToLeaves(collector, leaf); + } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java index a9f3d4aaf7885..76d6229159698 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfileShardResult.java @@ -32,6 +32,7 @@ package org.opensearch.search.profile.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,8 +52,9 @@ * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class QueryProfileShardResult implements Writeable, ToXContentObject { public static final String COLLECTOR = "collector"; diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index 9527e010005c3..78e65c5bfa257 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -33,6 +33,7 @@ package org.opensearch.search.profile.query; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.profile.AbstractProfiler; import org.opensearch.search.profile.ContextualProfileBreakdown; @@ -44,22 +45,23 @@ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us * to know the relationship between nodes in tree without explicitly * walking the tree or pre-wrapping everything - * + * <p> * A Profiler is associated with every Search, not per Search-Request. E.g. a * request may execute two searches (query + global agg). A Profiler just * represents one of those * - * @opensearch.internal + * @opensearch.api */ -public final class QueryProfiler extends AbstractProfiler<ContextualProfileBreakdown<QueryTimingType>, Query> { +@PublicApi(since = "1.0.0") +public class QueryProfiler extends AbstractProfiler<ContextualProfileBreakdown<QueryTimingType>, Query> { /** * The root Collector used in the search */ private InternalProfileComponent collector; - public QueryProfiler(boolean concurrent) { - super(new InternalQueryProfileTree(concurrent)); + public QueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); } /** Set the collector that is associated with this profiler. */ @@ -75,24 +77,24 @@ public void setCollector(InternalProfileComponent collector) { * single metric */ public void startRewriteTime() { - ((InternalQueryProfileTree) profileTree).startRewriteTime(); + ((AbstractQueryProfileTree) profileTree).startRewriteTime(); } /** * Stop recording the current rewrite and add it's time to the total tally, returning the * cumulative time so far. - * - * @return cumulative rewrite time */ - public long stopAndAddRewriteTime() { - return ((InternalQueryProfileTree) profileTree).stopAndAddRewriteTime(); + public void stopAndAddRewriteTime() { + ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** + * The rewriting process is complex and hard to display because queries can undergo significant changes. + * Instead of showing intermediate results, we display the cumulative time for the non-concurrent search case. * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { - return ((InternalQueryProfileTree) profileTree).getRewriteTime(); + return ((AbstractQueryProfileTree) profileTree).getRewriteTime(); } /** diff --git a/server/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java b/server/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java index 4d3b2a124f64f..e22f766d3894c 100644 --- a/server/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java +++ b/server/src/main/java/org/opensearch/search/query/ConcurrentQueryPhaseSearcher.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; +import org.opensearch.OpenSearchException; import org.opensearch.search.aggregations.AggregationProcessor; import org.opensearch.search.aggregations.ConcurrentAggregationProcessor; import org.opensearch.search.internal.ContextIndexSearcher; @@ -103,8 +104,8 @@ public AggregationProcessor aggregationProcessor(SearchContext searchContext) { } private static <T extends Exception> void rethrowCauseIfPossible(RuntimeException re, SearchContext searchContext) throws T { - // Rethrow exception if cause is null - if (re.getCause() == null) { + // Rethrow exception if cause is null or if it's an instance of OpenSearchException + if (re.getCause() == null || re instanceof OpenSearchException) { throw re; } diff --git a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java index c611587e879d6..91762bee2ac08 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/QueryCollectorContext.java @@ -40,8 +40,12 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.SimpleCollector; import org.apache.lucene.search.Weight; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.MinimumScoreCollector; import org.opensearch.common.lucene.search.FilteredCollector; +import org.opensearch.search.aggregations.AggregationCollectorManager; +import org.opensearch.search.aggregations.BucketCollector; +import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.InternalProfileCollector; import org.opensearch.search.profile.query.InternalProfileCollectorManager; @@ -59,8 +63,9 @@ /** * The context used during query collection * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class QueryCollectorContext { private static final Collector EMPTY_COLLECTOR = new SimpleCollector() { @Override @@ -198,16 +203,46 @@ protected InternalProfileCollector createWithProfiler(InternalProfileCollector i for (CollectorManager<? extends Collector, ReduceableSearchResult> manager : subs) { final Collector collector = manager.newCollector(); - if (!(collector instanceof InternalProfileCollector)) { - throw new IllegalArgumentException("non-profiling collector"); + if (collector instanceof BucketCollector) { + subCollectors.add( + new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION, Collections.emptyList()) + ); + } else { + subCollectors.add( + new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, Collections.emptyList()) + ); } - subCollectors.add((InternalProfileCollector) collector); } final Collector collector = MultiCollector.wrap(subCollectors); return new InternalProfileCollector(collector, REASON_SEARCH_MULTI, subCollectors); } + @Override + protected InternalProfileCollectorManager createWithProfiler(InternalProfileCollectorManager in) { + final List<CollectorManager<?, ReduceableSearchResult>> managers = new ArrayList<>(); + final List<InternalProfileCollectorManager> children = new ArrayList<>(); + managers.add(in); + children.add(in); + for (CollectorManager<? extends Collector, ReduceableSearchResult> manager : subs) { + final InternalProfileCollectorManager subCollectorManager; + if (manager instanceof AggregationCollectorManager) { + subCollectorManager = new InternalProfileCollectorManager( + manager, + ((AggregationCollectorManager) manager).getCollectorReason(), + Collections.emptyList() + ); + } else { + subCollectorManager = new InternalProfileCollectorManager(manager, REASON_SEARCH_MULTI, Collections.emptyList()); + } + managers.add(subCollectorManager); + children.add(subCollectorManager); + } + CollectorManager<? extends Collector, ReduceableSearchResult> multiCollectorManager = QueryCollectorManagerContext + .createMultiCollectorManager(managers); + return new InternalProfileCollectorManager(multiCollectorManager, REASON_SEARCH_MULTI, children); + } + @Override CollectorManager<? extends Collector, ReduceableSearchResult> createManager( CollectorManager<? extends Collector, ReduceableSearchResult> in diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index 8418fdca2f777..8f98f0d9efbd4 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -48,9 +48,11 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.concurrent.EWMATrackingThreadPoolExecutor; +import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.lucene.queries.SearchAfterSortedDocQuery; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchContextSourcePrinter; @@ -67,7 +69,6 @@ import org.opensearch.search.rescore.RescoreProcessor; import org.opensearch.search.sort.SortAndFormats; import org.opensearch.search.suggest.SuggestProcessor; -import org.opensearch.tasks.TaskCancelledException; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -88,8 +89,9 @@ * Query phase of a search request, used to run the query and get back from each shard information about the matching documents * (document ids and score or sort criteria) so that matches can be reduced on the coordinating node * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryPhase { private static final Logger LOGGER = LogManager.getLogger(QueryPhase.class); // TODO: remove this property diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseExecutionException.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseExecutionException.java index 43bd87183d0be..072c78c67b977 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseExecutionException.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseExecutionException.java @@ -32,6 +32,7 @@ package org.opensearch.search.query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.SearchException; import org.opensearch.search.SearchShardTarget; @@ -41,8 +42,9 @@ /** * Thrown if there are any errors in the query phase * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class QueryPhaseExecutionException extends SearchException { public QueryPhaseExecutionException(SearchShardTarget shardTarget, String msg, Throwable cause) { diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcher.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcher.java index 4518b5e71bdf3..38e45a5212c81 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcher.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcher.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.aggregations.AggregationProcessor; import org.opensearch.search.aggregations.DefaultAggregationProcessor; import org.opensearch.search.internal.ContextIndexSearcher; @@ -22,8 +23,9 @@ * The extension point which allows to plug in custom search implementation to be * used at {@link QueryPhase}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.0.0") public interface QueryPhaseSearcher { /** * Perform search using {@link CollectorManager} diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java index 407603f00461e..19a59e9f7bebe 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java @@ -10,12 +10,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.Query; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.AggregationProcessor; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; -import org.apache.lucene.search.CollectorManager; import java.io.IOException; import java.util.LinkedList; @@ -33,9 +32,7 @@ public class QueryPhaseSearcherWrapper implements QueryPhaseSearcher { public QueryPhaseSearcherWrapper() { this.defaultQueryPhaseSearcher = new QueryPhase.DefaultQueryPhaseSearcher(); - this.concurrentQueryPhaseSearcher = FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - ? new ConcurrentQueryPhaseSearcher() - : null; + this.concurrentQueryPhaseSearcher = new ConcurrentQueryPhaseSearcher(); } /** @@ -57,8 +54,7 @@ public boolean searchWith( boolean hasFilterCollector, boolean hasTimeout ) throws IOException { - if (searchContext.isConcurrentSegmentSearchEnabled()) { - LOGGER.info("Using concurrent search over segments (experimental)"); + if (searchContext.shouldUseConcurrentSearch()) { return concurrentQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } else { return defaultQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); @@ -72,8 +68,7 @@ public boolean searchWith( */ @Override public AggregationProcessor aggregationProcessor(SearchContext searchContext) { - if (searchContext.isConcurrentSegmentSearchEnabled()) { - LOGGER.info("Using concurrent search over segments (experimental)"); + if (searchContext.shouldUseConcurrentSearch()) { return concurrentQueryPhaseSearcher.aggregationProcessor(searchContext); } else { return defaultQueryPhaseSearcher.aggregationProcessor(searchContext); diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java index 776b8fbb9fca2..5421745f1656e 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchRequest.java @@ -37,14 +37,14 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.Nullable; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskId; import org.opensearch.search.dfs.AggregatedDfs; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.transport.TransportRequest; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index dcf940a4edcb4..f3ac953ab9d1d 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -34,10 +34,11 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TotalHits; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.stream.DelayableWriteable; +import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.search.DocValueFormat; import org.opensearch.search.RescoreDocIds; import org.opensearch.search.SearchPhaseResult; @@ -58,8 +59,9 @@ /** * The result of the query search * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class QuerySearchResult extends SearchPhaseResult { private int from; @@ -123,7 +125,7 @@ public static QuerySearchResult nullInstance() { * Returns true if the result doesn't contain any useful information. * It is used by the search action to avoid creating an empty response on * shard request that rewrites to match_no_docs. - * + * <p> * TODO: Currently we need the concrete aggregators to build empty responses. This means that we cannot * build an empty response in the coordinating node so we rely on this hack to ensure that at least one shard * returns a valid empty response. We should move the ability to create empty responses to aggregation builders diff --git a/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java index dbfaa70e1968f..8f89b5bf5a3a7 100644 --- a/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/ReduceableSearchResult.java @@ -8,13 +8,16 @@ package org.opensearch.search.query; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** * The search result callback returned by reduce phase of the collector manager. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.0.0") public interface ReduceableSearchResult { /** * Apply the reduce operation to the query search results diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index 39c34f7c0d5d5..65d3948c8401e 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -183,7 +183,9 @@ CollectorManager<?, ReduceableSearchResult> createManager(CollectorManager<?, Re ); } else { if (hitCount == -1) { - if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_ACCURATE) { + manager = new TotalHitCountCollectorManager(sort); + } else { manager = new EarlyTerminatingCollectorManager<>( new TotalHitCountCollectorManager(sort), trackTotalHitsUpTo, @@ -530,7 +532,7 @@ public ReduceableSearchResult reduce(Collection<Collector> collectors) throws IO float score = collector.getMaxScore(); if (Float.isNaN(maxScore)) { maxScore = score; - } else { + } else if (!Float.isNaN(score)) { maxScore = Math.max(maxScore, score); } } diff --git a/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java b/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java index c0e4bbb874531..d4b87f004b58f 100644 --- a/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/opensearch/search/rescore/RescoreContext.java @@ -33,6 +33,7 @@ package org.opensearch.search.rescore; import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.PublicApi; import java.util.Collections; import java.util.List; @@ -43,8 +44,9 @@ * implementations should extend this with any additional resources that * they will need while rescoring. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RescoreContext { private final int windowSize; private final Rescorer rescorer; diff --git a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java index 33f8e5e7b535d..c7a9b077109cf 100644 --- a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java +++ b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java @@ -35,19 +35,21 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TopDocs; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; /** * A query rescorer interface used to re-rank the Top-K results of a previously * executed search. - * + * <p> * Subclasses should borrow heavily from {@link QueryRescorer} because it is * fairly well behaved and documents that tradeoffs that it is making. There * is also an {@code ExampleRescorer} that is worth looking at. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Rescorer { /** * Modifies the result of the previously executed search ({@link TopDocs}) diff --git a/server/src/main/java/org/opensearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/opensearch/search/rescore/RescorerBuilder.java index d4094298f0c5f..7ac7454f02e3b 100644 --- a/server/src/main/java/org/opensearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/opensearch/search/rescore/RescorerBuilder.java @@ -32,13 +32,14 @@ package org.opensearch.search.rescore; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -51,8 +52,9 @@ /** * The abstract base builder for instances of {@link RescorerBuilder}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class RescorerBuilder<RB extends RescorerBuilder<RB>> implements NamedWriteable, @@ -170,6 +172,6 @@ public boolean equals(Object obj) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/searchafter/SearchAfterBuilder.java b/server/src/main/java/org/opensearch/search/searchafter/SearchAfterBuilder.java index 516b388ce2186..a45b2bd40c03d 100644 --- a/server/src/main/java/org/opensearch/search/searchafter/SearchAfterBuilder.java +++ b/server/src/main/java/org/opensearch/search/searchafter/SearchAfterBuilder.java @@ -37,16 +37,15 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.OpenSearchException; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.text.Text; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.search.DocValueFormat; @@ -343,7 +342,7 @@ public String toString() { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.prettyPrint(); toXContent(builder, EMPTY_PARAMS); - return Strings.toString(builder); + return builder.toString(); } catch (Exception e) { throw new OpenSearchException("Failed to build xcontent.", e); } diff --git a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java index 85d003db5726f..856e103193463 100644 --- a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java @@ -32,8 +32,8 @@ package org.opensearch.search.slice; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; @@ -49,7 +49,7 @@ /** * A {@link SliceQuery} that uses the numeric doc values of a field to do the slicing. - * + * <p> * <b>NOTE</b>: With deterministic field values this query can be used across different readers safely. * If updates are accepted on the field you must ensure that the same reader is used for all `slice` queries. * diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 31e03f5ef511e..c9b8a896ed525 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -40,14 +40,14 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -76,12 +76,10 @@ * Otherwise the provided field must be a numeric and doc_values must be enabled. In that case a * {@link org.opensearch.search.slice.DocValuesSliceQuery} is used to filter the results. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SliceBuilder implements Writeable, ToXContentObject { - - private static final DeprecationLogger DEPRECATION_LOG = DeprecationLogger.getLogger(SliceBuilder.class); - public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField ID_FIELD = new ParseField("id"); public static final ParseField MAX_FIELD = new ParseField("max"); @@ -132,7 +130,7 @@ public void writeTo(StreamOutput out) throws IOException { } private SliceBuilder setField(String field) { - if (org.opensearch.core.common.Strings.isEmpty(field)) { + if (Strings.isEmpty(field)) { throw new IllegalArgumentException("field name is null or empty"); } this.field = field; @@ -328,6 +326,6 @@ private GroupShardsIterator<ShardIterator> buildShardIterator(ClusterService clu @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java index 630d17dfaed19..05f36b0d6f3cf 100644 --- a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java @@ -32,19 +32,19 @@ package org.opensearch.search.slice; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.DocIdSetBuilder; import org.apache.lucene.util.StringHelper; @@ -53,7 +53,7 @@ /** * A {@link SliceQuery} that uses the terms dictionary of a field to do the slicing. - * + * <p> * <b>NOTE</b>: The cost of this filter is O(N*M) where N is the number of unique terms in the dictionary * and M is the average number of documents per term. * For each segment this filter enumerates the terms dictionary, computes the hash code for each term and fills diff --git a/server/src/main/java/org/opensearch/search/sort/BucketedSort.java b/server/src/main/java/org/opensearch/search/sort/BucketedSort.java index cc82b85781097..4a7557740384b 100644 --- a/server/src/main/java/org/opensearch/search/sort/BucketedSort.java +++ b/server/src/main/java/org/opensearch/search/sort/BucketedSort.java @@ -34,16 +34,17 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.ScorerAware; -import org.opensearch.core.common.util.BigArray; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.FloatArray; import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.util.BigArray; import org.opensearch.search.DocValueFormat; import java.io.IOException; @@ -94,14 +95,16 @@ * of allocations and to play well with our paged arrays. * </p> * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class BucketedSort implements Releasable { /** * Callbacks for storing extra data along with competitive sorts. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface ExtraData { /** * Swap the position of two bits of extra data. @@ -121,9 +124,10 @@ public interface ExtraData { /** * Loader for extra data * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") interface Loader { /** * Load extra data from a doc. @@ -194,9 +198,10 @@ public int getBucketSize() { * Used with {@link BucketedSort#getValues(long, ResultBuilder)} to * build results from the sorting operation. * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface ResultBuilder<T> { T build(long index, SortValue sortValue); } @@ -408,7 +413,10 @@ public final void close() { /** * Performs the actual collection against a {@linkplain LeafReaderContext}. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract class Leaf implements ScorerAware { private final LeafReaderContext ctx; private ExtraData.Loader loader = null; diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index eb1c8639d7492..5cecda1346b90 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -40,18 +40,18 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.search.SortField; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.time.DateUtils; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.time.DateUtils; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContent; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -611,7 +611,8 @@ public static FieldSortBuilder getPrimaryFieldSortOrNull(SearchSourceBuilder sou * and configurations return <code>null</code>. */ public static MinAndMax<?> getMinMaxOrNull(QueryShardContext context, FieldSortBuilder sortBuilder) throws IOException { - return getMinMaxOrNullInternal(context.getIndexReader(), context, sortBuilder); + final SortAndFormats sort = SortBuilder.buildSort(Collections.singletonList(sortBuilder), context).get(); + return getMinMaxOrNullInternal(context.getIndexReader(), context, sortBuilder, sort); } /** @@ -619,14 +620,21 @@ public static MinAndMax<?> getMinMaxOrNull(QueryShardContext context, FieldSortB * The value can be extracted on non-nested indexed mapped fields of type keyword, numeric or date, other fields * and configurations return <code>null</code>. */ - public static MinAndMax<?> getMinMaxOrNullForSegment(QueryShardContext context, LeafReaderContext ctx, FieldSortBuilder sortBuilder) - throws IOException { - return getMinMaxOrNullInternal(ctx.reader(), context, sortBuilder); + public static MinAndMax<?> getMinMaxOrNullForSegment( + QueryShardContext context, + LeafReaderContext ctx, + FieldSortBuilder sortBuilder, + SortAndFormats sort + ) throws IOException { + return getMinMaxOrNullInternal(ctx.reader(), context, sortBuilder, sort); } - private static MinAndMax<?> getMinMaxOrNullInternal(IndexReader reader, QueryShardContext context, FieldSortBuilder sortBuilder) - throws IOException { - SortAndFormats sort = SortBuilder.buildSort(Collections.singletonList(sortBuilder), context).get(); + private static MinAndMax<?> getMinMaxOrNullInternal( + IndexReader reader, + QueryShardContext context, + FieldSortBuilder sortBuilder, + SortAndFormats sort + ) throws IOException { SortField sortField = sort.sort.getSort()[0]; if (sortField.getField() == null) { return null; diff --git a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java index 76df59617414a..0499bba3245c6 100644 --- a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java @@ -38,24 +38,25 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.DoubleComparator; import org.apache.lucene.util.BitSet; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.DistanceUnit; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.core.xcontent.XContent; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -734,8 +735,8 @@ private NumericDoubleValues getNumericDoubleValues(LeafReaderContext context) th } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { - return new DoubleComparator(numHits, null, null, reversed, enableSkipping) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { + return new DoubleComparator(numHits, null, null, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new DoubleLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/search/sort/MinAndMax.java b/server/src/main/java/org/opensearch/search/sort/MinAndMax.java index 16d8f0f6533bd..98213ec630e33 100644 --- a/server/src/main/java/org/opensearch/search/sort/MinAndMax.java +++ b/server/src/main/java/org/opensearch/search/sort/MinAndMax.java @@ -33,10 +33,10 @@ package org.opensearch.search.sort; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.Lucene; import java.io.IOException; import java.math.BigInteger; diff --git a/server/src/main/java/org/opensearch/search/sort/NestedSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/NestedSortBuilder.java index feabdfce4cd21..d101837b8734b 100644 --- a/server/src/main/java/org/opensearch/search/sort/NestedSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/NestedSortBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.search.sort; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,8 +51,9 @@ /** * Builds a sort on nested objects * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NestedSortBuilder implements Writeable, ToXContentObject { public static final ParseField NESTED_FIELD = new ParseField("nested"); public static final ParseField PATH_FIELD = new ParseField("path"); diff --git a/server/src/main/java/org/opensearch/search/sort/ScoreSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/ScoreSortBuilder.java index 8bc819b520d2b..1be49e5ca81ce 100644 --- a/server/src/main/java/org/opensearch/search/sort/ScoreSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/ScoreSortBuilder.java @@ -38,9 +38,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ObjectParser; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContent; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; diff --git a/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java index 6cb847c5fb739..bb1930eb3a953 100644 --- a/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/ScriptSortBuilder.java @@ -38,17 +38,17 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.util.BigArrays; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser.ValueType; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContent; import org.opensearch.index.fielddata.AbstractBinaryDocValues; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexFieldData; @@ -69,8 +69,11 @@ import org.opensearch.search.MultiValueMode; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Locale; +import java.util.Map; import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.search.sort.FieldSortBuilder.validateMaxChildrenExistOnlyInTopLevelNestedSort; @@ -355,11 +358,19 @@ private IndexFieldData.XFieldComparatorSource fieldComparatorSource(QueryShardCo final StringSortScript.Factory factory = context.compile(script, StringSortScript.CONTEXT); final StringSortScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); return new BytesRefFieldComparatorSource(null, null, valueMode, nested) { - StringSortScript leafScript; + // introducing a map to keep a mapping between the leaf reader context and leaf script + // such that the functions of the class are thread safe in case of concurrent search + final Map<LeafReaderContext, StringSortScript> leafContextSortScriptMap = new ConcurrentHashMap<>(); @Override protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException { - leafScript = searchScript.newInstance(context); + final StringSortScript leafScript = leafContextSortScriptMap.computeIfAbsent(context, ctx -> { + try { + return searchScript.newInstance(ctx); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); final BinaryDocValues values = new AbstractBinaryDocValues() { final BytesRefBuilder spare = new BytesRefBuilder(); @@ -379,8 +390,8 @@ public BytesRef binaryValue() { } @Override - protected void setScorer(Scorable scorer) { - leafScript.setScorer(scorer); + protected void setScorer(Scorable scorer, LeafReaderContext context) { + leafContextSortScriptMap.get(context).setScorer(scorer); } @Override @@ -403,11 +414,19 @@ public BucketedSort newBucketedSort( final NumberSortScript.Factory numberSortFactory = context.compile(script, NumberSortScript.CONTEXT); final NumberSortScript.LeafFactory numberSortScript = numberSortFactory.newFactory(script.getParams(), context.lookup()); return new DoubleValuesComparatorSource(null, Double.MAX_VALUE, valueMode, nested) { - NumberSortScript leafScript; + // introducing a map to keep a mapping between the leaf reader context and leaf script + // such that the functions of the class are thread safe in case of concurrent search + final Map<LeafReaderContext, NumberSortScript> leafContextSortScriptMap = new ConcurrentHashMap<>(); @Override protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws IOException { - leafScript = numberSortScript.newInstance(context); + final NumberSortScript leafScript = leafContextSortScriptMap.computeIfAbsent(context, ctx -> { + try { + return numberSortScript.newInstance(ctx); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); final NumericDoubleValues values = new NumericDoubleValues() { @Override public boolean advanceExact(int doc) throws IOException { @@ -424,8 +443,8 @@ public double doubleValue() { } @Override - protected void setScorer(Scorable scorer) { - leafScript.setScorer(scorer); + protected void setScorer(Scorable scorer, LeafReaderContext context) { + leafContextSortScriptMap.get(context).setScorer(scorer); } }; default: diff --git a/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java b/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java index 272b1e9c1dc8d..046faaa03f91f 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java +++ b/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java @@ -32,13 +32,16 @@ package org.opensearch.search.sort; import org.apache.lucene.search.Sort; +import org.opensearch.cluster.metadata.DataStream; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.search.DocValueFormat; /** * Utility class to hold sort and doc value format instances * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SortAndFormats { public final Sort sort; @@ -52,4 +55,13 @@ public SortAndFormats(Sort sort, DocValueFormat[] formats) { this.formats = formats; } + /** + * @return true: if sort is on timestamp field, false: otherwise + */ + public boolean isSortOnTimeSeriesField() { + return sort.getSort().length > 0 + && sort.getSort()[0].getField() != null + && sort.getSort()[0].getField().equals(DataStream.TIMESERIES_FIELDNAME); + } + } diff --git a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java index 5bffb8a9ca56e..a8c21e7311061 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java @@ -36,12 +36,13 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.ToChildBlockJoinQuery; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lucene.search.Queries; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentParser; @@ -64,8 +65,9 @@ /** * Base class for sort object builders * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SortBuilder<T extends SortBuilder<T>> implements NamedWriteable, ToXContentObject, Rewriteable<SortBuilder<?>> { protected SortOrder order = SortOrder.ASC; @@ -283,6 +285,6 @@ protected static QueryBuilder parseNestedFilter(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/sort/SortFieldAndFormat.java b/server/src/main/java/org/opensearch/search/sort/SortFieldAndFormat.java index c1441288bf732..4734e76317b01 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortFieldAndFormat.java +++ b/server/src/main/java/org/opensearch/search/sort/SortFieldAndFormat.java @@ -31,11 +31,11 @@ package org.opensearch.search.sort; -import java.util.Objects; - import org.apache.lucene.search.SortField; import org.opensearch.search.DocValueFormat; +import java.util.Objects; + /** * A holder for SortField and doc value format objects * diff --git a/server/src/main/java/org/opensearch/search/sort/SortOrder.java b/server/src/main/java/org/opensearch/search/sort/SortOrder.java index ed83a0667c5e7..f4b6701c91f58 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortOrder.java +++ b/server/src/main/java/org/opensearch/search/sort/SortOrder.java @@ -32,6 +32,7 @@ package org.opensearch.search.sort; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -43,8 +44,9 @@ /** * A sorting order. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SortOrder implements Writeable { /** * Ascending order. diff --git a/server/src/main/java/org/opensearch/search/sort/SortValue.java b/server/src/main/java/org/opensearch/search/sort/SortValue.java index 0c46428bc02d0..f521925695a0d 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortValue.java +++ b/server/src/main/java/org/opensearch/search/sort/SortValue.java @@ -32,6 +32,7 @@ package org.opensearch.search.sort; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; @@ -46,8 +47,9 @@ /** * A {@link Comparable}, {@link DocValueFormat} aware wrapper around a sort value. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SortValue implements NamedWriteable, Comparable<SortValue> { /** * Get a {@linkplain SortValue} for a double. diff --git a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java index bded2417ba6c1..10cc832fdb684 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java +++ b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.comparators.NumericComparator; @@ -44,13 +45,13 @@ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { * Creates and return a comparator, which always converts Numeric to double * and compare to support multi type comparison between numeric values * @param numHits number of top hits the queue will store - * @param enableSkipping true if the comparator can skip documents via {@link + * @param pruning controls how the comparator skips documents via {@link * LeafFieldComparator#competitiveIterator()} * @return NumericComparator */ @Override - public FieldComparator<?> getComparator(int numHits, boolean enableSkipping) { - return new NumericComparator<Number>(getField(), (Number) getMissingValue(), getReverse(), enableSkipping, Double.BYTES) { + public FieldComparator<?> getComparator(int numHits, Pruning pruning) { + return new NumericComparator<Number>(getField(), (Number) getMissingValue(), getReverse(), pruning, Double.BYTES) { @Override public int compare(int slot1, int slot2) { throw new UnsupportedOperationException(); diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 9020a036ade0a..b8f2f9b7279cf 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -33,21 +33,22 @@ import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.suggest.Suggest.Suggestion.Entry; @@ -70,8 +71,9 @@ /** * Top level suggest result, containing the result for each suggestion. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? extends Option>>>, Writeable, ToXContentFragment { public static final String NAME = "suggest"; @@ -232,7 +234,9 @@ public int hashCode() { /** * The suggestion responses corresponding with the suggestions in the request. + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Suggestion<T extends Suggestion.Entry> implements Iterable<T>, NamedWriteable, ToXContentFragment { public static final int TYPE = 0; @@ -263,7 +267,7 @@ public void addTerm(T entry) { /** * Returns a integer representing the type of the suggestion. This is used for * internal serialization over the network. - * + * <p> * This class is now serialized as a NamedWriteable and this method only remains for backwards compatibility */ @Deprecated @@ -423,7 +427,10 @@ protected static <E extends Suggestion.Entry<?>> void parseEntries( /** * Represents a part from the suggest text with suggested options. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Entry<O extends Option> implements Iterable<O>, Writeable, ToXContentFragment { private static final String TEXT = "text"; @@ -611,7 +618,10 @@ protected static void declareCommonFields(ObjectParser<? extends Entry<? extends /** * Contains the suggested text with its document frequency and score. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class Option implements Writeable, ToXContentFragment { public static final ParseField TEXT = new ParseField("text"); @@ -740,6 +750,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/suggest/SuggestBuilder.java b/server/src/main/java/org/opensearch/search/suggest/SuggestBuilder.java index 3daa4ac019cd5..f90280da6ce83 100644 --- a/server/src/main/java/org/opensearch/search/suggest/SuggestBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/SuggestBuilder.java @@ -32,14 +32,15 @@ package org.opensearch.search.suggest; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.BytesRefs; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -59,8 +60,9 @@ * Suggesting works by suggesting terms/phrases that appear in the suggest text that are similar compared * to the terms in provided text. These suggestions are based on several options described in this class. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SuggestBuilder implements Writeable, ToXContentObject { protected static final ParseField GLOBAL_TEXT_FIELD = new ParseField("text"); @@ -218,6 +220,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } } diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggester.java b/server/src/main/java/org/opensearch/search/suggest/Suggester.java index 29efc6c4a3a6a..cde491a4966ad 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggester.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggester.java @@ -34,14 +34,16 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; +import org.opensearch.common.annotation.PublicApi; import java.io.IOException; /** * Base class used for all suggester implementations * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Suggester<T extends SuggestionSearchContext.SuggestionContext> { protected abstract Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute( diff --git a/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java index 94dea8a65293a..ee465fc527ea3 100644 --- a/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/SuggestionBuilder.java @@ -34,12 +34,13 @@ import org.apache.lucene.analysis.Analyzer; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -54,8 +55,9 @@ /** * Base class for the different suggestion implementations. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class SuggestionBuilder<T extends SuggestionBuilder<T>> implements NamedWriteable, ToXContentFragment { protected final String field; diff --git a/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java b/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java index f0d8efc64b6b3..32f72d1115973 100644 --- a/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java +++ b/server/src/main/java/org/opensearch/search/suggest/SuggestionSearchContext.java @@ -33,6 +33,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryShardContext; import java.util.LinkedHashMap; @@ -41,8 +42,9 @@ /** * Context used for suggestion based search * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SuggestionSearchContext { private final Map<String, SuggestionContext> suggestions = new LinkedHashMap<>(4); @@ -58,8 +60,9 @@ public Map<String, SuggestionContext> suggestions() { /** * The suggestion context * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public abstract static class SuggestionContext { private BytesRef text; diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java index 2ca642292a7c0..ad91e75c591f3 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionStats.java @@ -33,10 +33,11 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,8 +46,9 @@ /** * Stats for completion suggester * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class CompletionStats implements Writeable, ToXContentFragment { private static final String COMPLETION = "completion"; diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java index 175503cb94e3d..fabb67d7fb841 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Weight; import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.TopSuggestDocs; @@ -103,20 +104,26 @@ protected Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Sugges } private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSuggestDocsCollector collector) throws IOException { - query = (CompletionQuery) query.rewrite(searcher.getIndexReader()); + query = (CompletionQuery) query.rewrite(searcher); Weight weight = query.createWeight(searcher, collector.scoreMode(), 1f); for (LeafReaderContext context : searcher.getIndexReader().leaves()) { BulkScorer scorer = weight.bulkScorer(context); if (scorer != null) { + LeafCollector leafCollector = null; try { - scorer.score(collector.getLeafCollector(context), context.reader().getLiveDocs()); + leafCollector = collector.getLeafCollector(context); + scorer.score(leafCollector, context.reader().getLiveDocs()); } catch (CollectionTerminatedException e) { // collection was terminated prematurely // continue with the following leaf } + // Note: this is called if collection ran successfully, including the above special cases of + // CollectionTerminatedException and TimeExceededException, but no other exception. + if (leafCollector != null) { + leafCollector.finish(); + } } } - collector.finish(); } @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java index 9cd32db4e2a98..e3e6cad65be62 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java @@ -34,10 +34,10 @@ import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.util.PriorityQueue; +import org.opensearch.common.lucene.Lucene; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.text.Text; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; @@ -63,14 +63,14 @@ /** * Suggestion response for {@link CompletionSuggester} results - * + * <p> * Response format for each entry: * { * "text" : STRING * "score" : FLOAT * "contexts" : CONTEXTS * } - * + * <p> * CONTEXTS : { * "CONTEXT_NAME" : ARRAY, * .. diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestionBuilder.java index 6f3c3f471b47c..6724a48c26a63 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -32,20 +32,20 @@ package org.opensearch.search.suggest.completion; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.ParseField; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -72,7 +72,7 @@ */ public class CompletionSuggestionBuilder extends SuggestionBuilder<CompletionSuggestionBuilder> { - private static final XContentType CONTEXT_BYTES_XCONTENT_TYPE = XContentType.JSON; + private static final MediaType CONTEXT_BYTES_XCONTENT_TYPE = MediaTypeRegistry.JSON; static final ParseField CONTEXTS_FIELD = new ParseField("contexts", "context"); static final ParseField SKIP_DUPLICATES_FIELD = new ParseField("skip_duplicates"); @@ -111,7 +111,7 @@ public class CompletionSuggestionBuilder extends SuggestionBuilder<CompletionSug PARSER.declareInt(CompletionSuggestionBuilder.InnerBuilder::shardSize, SHARDSIZE_FIELD); PARSER.declareField((p, v, c) -> { // Copy the current structure. We will parse, once the mapping is provided - XContentBuilder builder = XContentFactory.contentBuilder(CONTEXT_BYTES_XCONTENT_TYPE); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(CONTEXT_BYTES_XCONTENT_TYPE); builder.copyCurrentStructure(p); v.contextBytes = BytesReference.bytes(builder); p.skipChildren(); @@ -216,7 +216,7 @@ public CompletionSuggestionBuilder regex(String regex, RegexOptions regexOptions public CompletionSuggestionBuilder contexts(Map<String, List<? extends ToXContent>> queryContexts) { Objects.requireNonNull(queryContexts, "contexts must not be null"); try { - XContentBuilder contentBuilder = XContentFactory.contentBuilder(CONTEXT_BYTES_XCONTENT_TYPE); + XContentBuilder contentBuilder = MediaTypeRegistry.contentBuilder(CONTEXT_BYTES_XCONTENT_TYPE); contentBuilder.startObject(); for (Map.Entry<String, List<? extends ToXContent>> contextEntry : queryContexts.entrySet()) { contentBuilder.startArray(contextEntry.getKey()); diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java index 22157817a7124..139742f84b80b 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java @@ -34,11 +34,11 @@ import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery; import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -142,7 +142,7 @@ public int getEditDistance() { /** * Returns if transpositions option is set - * + * <p> * if transpositions is set, then swapping one character for another counts as one edit instead of two. */ public boolean isTranspositions() { diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index 4fbd661037aa9..fbc39536502de 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -43,7 +43,7 @@ /** * * Extension of the {@link TopSuggestDocsCollector} that returns top documents from the completion suggester. - * + * <p> * This collector groups suggestions coming from the same document but matching different contexts * or surface form together. When different contexts or surface forms match the same suggestion form only * the best one per document (sorted by weight) is kept. @@ -55,7 +55,7 @@ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { /** * Sole constructor - * + * <p> * Collects at most <code>num</code> completions * with corresponding document and weight */ diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java index b4b50f43bae31..94707ff2b4569 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java @@ -34,13 +34,12 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.ParseContext; @@ -55,7 +54,7 @@ /** * A {@link ContextMapping} defines criteria that can be used to * filter and/or boost suggestions at query time for {@link CompletionFieldMapper}. - * + * <p> * Implementations have to define how contexts are parsed at query/index time * * @opensearch.internal @@ -188,7 +187,7 @@ public int hashCode() { @Override public String toString() { try { - return Strings.toString(toXContent(JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS)); + return toXContent(JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS).toString(); } catch (IOException e) { return super.toString(); } diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java index 0f5781fefcf07..cff5a901a473f 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java @@ -124,11 +124,11 @@ public Iterator<ContextMapping<?>> iterator() { * Field prepends context values with a suggestion * Context values are associated with a type, denoted by * a type id, which is prepended to the context value. - * + * <p> * Every defined context mapping yields a unique type id (index of the * corresponding context mapping in the context mappings list) * for all its context values - * + * <p> * The type, context and suggestion values are encoded as follows: * <p> * TYPE_ID | CONTEXT_VALUE | CONTEXT_SEP | SUGGESTION_VALUE @@ -209,7 +209,7 @@ public ContextQuery toContextQuery(CompletionQuery query, Map<String, List<Conte /** * Maps an output context list to a map of context mapping names and their values - * + * <p> * see {@link org.opensearch.search.suggest.completion.context.ContextMappings.TypedContextField} * @return a map of context names and their values * @@ -232,7 +232,7 @@ public Map<String, Set<String>> getNamedContexts(List<CharSequence> contexts) { /** * Loads {@link ContextMappings} from configuration - * + * <p> * Expected configuration: * List of maps representing {@link ContextMapping} * [{"name": .., "type": .., ..}, {..}] @@ -286,7 +286,7 @@ private static String extractRequiredValue(Map<String, Object> contextConfig, St /** * Writes a list of objects specified by the defined {@link ContextMapping}s - * + * <p> * see {@link ContextMapping#toXContent(XContentBuilder, Params)} */ @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index 17f858b0b1302..0e29e928df760 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -71,7 +71,7 @@ * The suggestions can be boosted and/or filtered depending on * whether it falls within an area, represented by a query geo hash * with a specified precision - * + * <p> * {@link GeoQueryContext} defines the options for constructing * a unit of query context for this context type * diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoQueryContext.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoQueryContext.java index b4521f99a341d..82c508e82964b 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoQueryContext.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoQueryContext.java @@ -33,9 +33,9 @@ package org.opensearch.search.suggest.completion.context; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java index 6335629f61cf1..1a00cb9465771 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -147,9 +147,9 @@ public TermStats internalTermStats(BytesRef term) throws IOException { if (termsEnum.seekExact(term)) { return new TermStats( termsEnum.docFreq(), - /** - * We use the {@link TermsEnum#docFreq()} for fields that don't - * record the {@link TermsEnum#totalTermFreq()}. + /* + We use the {@link TermsEnum#docFreq()} for fields that don't + record the {@link TermsEnum#totalTermFreq()}. */ termsEnum.totalTermFreq() == -1 ? termsEnum.docFreq() : termsEnum.totalTermFreq() ); @@ -168,10 +168,10 @@ public CandidateSet drawCandidates(CandidateSet set) throws IOException { float origThreshold = spellchecker.getThresholdFrequency(); try { if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { - /** - * We use the {@link TermStats#docFreq} to compute the frequency threshold - * because that's what {@link DirectSpellChecker#suggestSimilar} expects - * when filtering terms. + /* + We use the {@link TermStats#docFreq} to compute the frequency threshold + because that's what {@link DirectSpellChecker#suggestSimilar} expects + when filtering terms. */ int threshold = thresholdTermFrequency(original.termStats.docFreq); if (threshold == Integer.MAX_VALUE) { diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index 337227821f688..2e53584384dd5 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -41,13 +41,12 @@ import org.apache.lucene.search.spell.SuggestMode; import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.opensearch.ExceptionsHelper; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.search.suggest.SortBy; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder.CandidateGenerator; @@ -508,7 +507,7 @@ public String toString() { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.prettyPrint(); toXContent(builder, EMPTY_PARAMS); - return Strings.toString(builder); + return builder.toString(); } catch (Exception e) { return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; } diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java index f1dba9793ba9e..bc942da738c7e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java @@ -70,7 +70,7 @@ public final class LinearInterpolation extends SmoothingModel { /** * Creates a linear interpolation smoothing model. - * + * <p> * Note: the lambdas must sum up to one. * * @param trigramLambda diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java index 4b1c43bf7a317..09862e42b5819 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java @@ -42,9 +42,9 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.opensearch.common.lucene.Lucene; -import org.opensearch.core.common.text.Text; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.ParsedQuery; @@ -160,7 +160,8 @@ public Suggestion<? extends Entry<? extends Option>> innerExecute( QueryShardContext shardContext = suggestion.getShardContext(); final String querySource = scriptFactory.newInstance(vars).execute(); try ( - XContentParser parser = XContentFactory.xContent(querySource) + XContentParser parser = MediaTypeRegistry.xContent(querySource) + .xContent() .createParser(shardContext.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, querySource) ) { QueryBuilder innerQueryBuilder = AbstractQueryBuilder.parseInnerQueryBuilder(parser); diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java index 8621eb1704053..a6bfb880cf249 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -33,12 +33,12 @@ import org.apache.lucene.analysis.Analyzer; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.lucene.BytesRefs; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -222,7 +222,7 @@ public Integer gramSize() { * misspellings in order to form a correction. This method accepts a float * value in the range [0..1) as a fraction of the actual query terms a * number {@code >=1} as an absolute number of query terms. - * + * <p> * The default is set to {@code 1.0} which corresponds to that only * corrections with at most 1 misspelled term are returned. */ diff --git a/server/src/main/java/org/opensearch/snapshots/ConcurrentSnapshotExecutionException.java b/server/src/main/java/org/opensearch/snapshots/ConcurrentSnapshotExecutionException.java index e0b4d3bf49d2e..b2f07d4d62f59 100644 --- a/server/src/main/java/org/opensearch/snapshots/ConcurrentSnapshotExecutionException.java +++ b/server/src/main/java/org/opensearch/snapshots/ConcurrentSnapshotExecutionException.java @@ -58,6 +58,6 @@ public ConcurrentSnapshotExecutionException(StreamInput in) throws IOException { @Override public RestStatus status() { - return RestStatus.SERVICE_UNAVAILABLE; + return RestStatus.CONFLICT; } } diff --git a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java index 3c7a14fb2854a..797a58f3b0d9b 100644 --- a/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java +++ b/server/src/main/java/org/opensearch/snapshots/InternalSnapshotsInfoService.java @@ -35,7 +35,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateListener; @@ -46,10 +45,12 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -364,9 +365,9 @@ private static Set<SnapshotShard> listOfSnapshotShards(final ClusterState state) /** * A snapshot of a shard * - * @opensearch.internal + * @opensearch.api */ - + @PublicApi(since = "1.0.0") public static class SnapshotShard { private final Snapshot snapshot; diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java b/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java index 1ed805c567d99..2ccbf308705bc 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreInfo.java @@ -31,17 +31,18 @@ package org.opensearch.snapshots; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.util.Collections; @@ -53,8 +54,9 @@ * <p> * Returned as part of {@link org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse} * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RestoreInfo implements ToXContentObject, Writeable { private String name; @@ -215,6 +217,6 @@ public int hashCode() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index d7e89172c5837..bf2c7fc74be92 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -35,12 +35,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.ClusterStateTaskConfig; @@ -62,13 +61,12 @@ import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; -import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; import org.opensearch.cluster.routing.RoutingChangesObserver; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterManagerTaskKeys; @@ -82,11 +80,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -104,6 +106,7 @@ import java.util.Set; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; @@ -113,12 +116,15 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.common.util.set.Sets.newHashSet; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; +import static org.opensearch.index.store.remote.filecache.FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING; +import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; import static org.opensearch.snapshots.SnapshotUtils.filterIndices; /** @@ -126,7 +132,7 @@ * <p> * Restore operation is performed in several stages. * <p> - * First {@link #restoreSnapshot(RestoreSnapshotRequest, org.opensearch.action.ActionListener)} + * First {@link #restoreSnapshot(RestoreSnapshotRequest, ActionListener)} * method reads information about snapshot and metadata from repository. In update cluster state task it checks restore * preconditions, restores global state if needed, creates {@link RestoreInProgress} record with list of shards that needs * to be restored and adds this shard to the routing table using @@ -177,6 +183,10 @@ public class RestoreService implements ClusterStateApplier { private final ClusterSettings clusterSettings; + private final IndicesService indicesService; + + private final Supplier<ClusterInfo> clusterInfoSupplier; + private final ClusterManagerTaskThrottler.ThrottlingKey restoreSnapshotTaskKey; private static final CleanRestoreStateTaskExecutor cleanRestoreStateTaskExecutor = new CleanRestoreStateTaskExecutor(); @@ -187,8 +197,9 @@ public RestoreService( AllocationService allocationService, MetadataCreateIndexService createIndexService, MetadataIndexUpgradeService metadataIndexUpgradeService, - ClusterSettings clusterSettings, - ShardLimitValidator shardLimitValidator + ShardLimitValidator shardLimitValidator, + IndicesService indicesService, + Supplier<ClusterInfo> clusterInfoSupplier ) { this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -200,113 +211,14 @@ public RestoreService( } this.clusterSettings = clusterService.getClusterSettings(); this.shardLimitValidator = shardLimitValidator; + this.indicesService = indicesService; + this.clusterInfoSupplier = clusterInfoSupplier; // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. restoreSnapshotTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.RESTORE_SNAPSHOT_KEY, true); } - /** - * Restores data from remote store for indices specified in the restore request. - * - * @param request restore request - * @param listener restore listener - */ - public void restoreFromRemoteStore(RestoreRemoteStoreRequest request, final ActionListener<RestoreCompletionResponse> listener) { - clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask() { - final String restoreUUID = UUIDs.randomBase64UUID(); - RestoreInfo restoreInfo = null; - - @Override - public ClusterState execute(ClusterState currentState) { - // Updating cluster state - ClusterState.Builder builder = ClusterState.builder(currentState); - Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - - List<String> indicesToBeRestored = new ArrayList<>(); - int totalShards = 0; - for (String index : request.indices()) { - IndexMetadata currentIndexMetadata = currentState.metadata().index(index); - if (currentIndexMetadata == null) { - // ToDo: Handle index metadata does not exist case. (GitHub #3457) - logger.warn("Remote store restore is not supported for non-existent index. Skipping: {}", index); - continue; - } - if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { - IndexMetadata updatedIndexMetadata = currentIndexMetadata; - Map<ShardId, ShardRouting> activeInitializingShards = new HashMap<>(); - if (request.restoreAllShards()) { - if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) { - throw new IllegalStateException( - "cannot restore index [" - + index - + "] because an open index " - + "with same name already exists in the cluster. Close the existing index" - ); - } - updatedIndexMetadata = IndexMetadata.builder(currentIndexMetadata) - .state(IndexMetadata.State.OPEN) - .version(1 + currentIndexMetadata.getVersion()) - .mappingVersion(1 + currentIndexMetadata.getMappingVersion()) - .settingsVersion(1 + currentIndexMetadata.getSettingsVersion()) - .aliasesVersion(1 + currentIndexMetadata.getAliasesVersion()) - .build(); - } else { - activeInitializingShards = currentState.routingTable() - .index(index) - .shards() - .values() - .stream() - .map(IndexShardRoutingTable::primaryShard) - .filter(shardRouting -> shardRouting.unassigned() == false) - .collect(Collectors.toMap(ShardRouting::shardId, Function.identity())); - } - - IndexId indexId = new IndexId(index, updatedIndexMetadata.getIndexUUID()); - - RemoteStoreRecoverySource recoverySource = new RemoteStoreRecoverySource( - restoreUUID, - updatedIndexMetadata.getCreationVersion(), - indexId - ); - rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, activeInitializingShards); - blocks.updateBlocks(updatedIndexMetadata); - mdBuilder.put(updatedIndexMetadata, true); - indicesToBeRestored.add(index); - totalShards += updatedIndexMetadata.getNumberOfShards(); - } else { - logger.warn("Remote store is not enabled for index: {}", index); - } - } - - restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); - - RoutingTable rt = rtBuilder.build(); - ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); - return allocationService.reroute(updatedState, "restored from remote store"); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn("failed to restore from remote store", e); - listener.onFailure(e); - } - - @Override - public TimeValue timeout() { - return request.masterNodeTimeout(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(new RestoreCompletionResponse(restoreUUID, null, restoreInfo)); - } - }); - - } - /** * Restores snapshot specified in the restore request. * @@ -315,6 +227,16 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS */ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionListener<RestoreCompletionResponse> listener) { try { + // Setting INDEX_STORE_TYPE_SETTING as REMOTE_SNAPSHOT is intended to be a system-managed index setting that is configured when + // restoring a snapshot and should not be manually set by user. + String storeTypeSetting = request.indexSettings().get(INDEX_STORE_TYPE_SETTING.getKey()); + if (storeTypeSetting != null && storeTypeSetting.equals(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT.toString())) { + throw new SnapshotRestoreException( + request.repository(), + request.snapshot(), + "cannot restore remote snapshot with index settings \"index.store.type\" set to \"remote_snapshot\". Instead use \"storage_type\": \"remote_snapshot\" as argument to restore." + ); + } // Read snapshot info and metadata from the repository final String repositoryName = request.repository(); Repository repository = repositoriesService.repository(repositoryName); @@ -415,7 +337,6 @@ public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey( @Override public ClusterState execute(ClusterState currentState) { - RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY); // Check if the snapshot to restore is currently being deleted SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, @@ -436,7 +357,9 @@ public ClusterState execute(ClusterState currentState) { ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); final Map<ShardId, RestoreInProgress.ShardRestoreStatus> shards; + final boolean isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(request.storageType().toString()); Set<String> aliases = new HashSet<>(); + long totalRestorableRemoteIndexesSize = 0; if (indices.isEmpty() == false) { // We have some indices to restore @@ -447,17 +370,14 @@ public ClusterState execute(ClusterState currentState) { String index = indexEntry.getValue(); boolean partial = checkPartial(index); + IndexId snapshotIndexId = repositoryData.resolveIndexId(index); IndexMetadata snapshotIndexMetadata = updateIndexSettings( metadata.index(index), request.indexSettings(), request.ignoreIndexSettings() ); - if (IndexModule.Type.REMOTE_SNAPSHOT.match(request.storageType().toString())) { - snapshotIndexMetadata = addSnapshotToIndexSettings( - snapshotIndexMetadata, - snapshot, - repositoryData.resolveIndexId(index) - ); + if (isRemoteSnapshot) { + snapshotIndexMetadata = addSnapshotToIndexSettings(snapshotIndexMetadata, snapshot, snapshotIndexId); } final boolean isSearchableSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match( snapshotIndexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) @@ -483,7 +403,7 @@ public ClusterState execute(ClusterState currentState) { restoreUUID, snapshot, snapshotInfo.version(), - repositoryData.resolveIndexId(index), + snapshotIndexId, isSearchableSnapshot, isRemoteStoreShallowCopy, request.getSourceRemoteStoreRepository() @@ -520,6 +440,10 @@ public ClusterState execute(ClusterState currentState) { createIndexService.validateIndexName(renamedIndexName, currentState); createIndexService.validateDotIndex(renamedIndexName, isHidden); createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetadata.getSettings(), false); + MetadataCreateIndexService.validateRefreshIntervalSettings( + snapshotIndexMetadata.getSettings(), + clusterSettings + ); IndexMetadata.Builder indexMdBuilder = IndexMetadata.builder(snapshotIndexMetadata) .state(IndexMetadata.State.OPEN) .index(renamedIndexName); @@ -602,6 +526,14 @@ public ClusterState execute(ClusterState currentState) { } for (int shard = 0; shard < snapshotIndexMetadata.getNumberOfShards(); shard++) { + if (isRemoteSnapshot) { + IndexShardSnapshotStatus.Copy shardStatus = repository.getShardSnapshotStatus( + snapshotInfo.snapshotId(), + snapshotIndexId, + new ShardId(metadata.index(index).getIndex(), shard) + ).asCopy(); + totalRestorableRemoteIndexesSize += shardStatus.getTotalSize(); + } if (!ignoreShards.contains(shard)) { shardsBuilder.put( new ShardId(renamedIndex, shard), @@ -638,6 +570,9 @@ public ClusterState execute(ClusterState currentState) { } checkAliasNameConflicts(indices, aliases); + if (isRemoteSnapshot) { + validateSearchableSnapshotRestorable(totalRestorableRemoteIndexesSize); + } Map<String, DataStream> updatedDataStreams = new HashMap<>(currentState.metadata().dataStreams()); updatedDataStreams.putAll( @@ -658,6 +593,7 @@ public ClusterState execute(ClusterState currentState) { if (metadata.templates() != null) { // TODO: Should all existing templates be deleted first? for (final IndexTemplateMetadata cursor : metadata.templates().values()) { + MetadataCreateIndexService.validateRefreshIntervalSettings(cursor.settings(), clusterSettings); mdBuilder.put(cursor); } } @@ -837,6 +773,45 @@ private IndexMetadata updateIndexSettings( return builder.settings(settingsBuilder).build(); } + private void validateSearchableSnapshotRestorable(long totalRestorableRemoteIndexesSize) { + ClusterInfo clusterInfo = clusterInfoSupplier.get(); + double remoteDataToFileCacheRatio = DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.get(clusterService.getSettings()); + Map<String, FileCacheStats> nodeFileCacheStats = clusterInfo.getNodeFileCacheStats(); + if (nodeFileCacheStats.isEmpty() || remoteDataToFileCacheRatio <= 0.01f) { + return; + } + + long totalNodeFileCacheSize = clusterInfo.getNodeFileCacheStats() + .values() + .stream() + .map(fileCacheStats -> fileCacheStats.getTotal().getBytes()) + .mapToLong(Long::longValue) + .sum(); + + Predicate<ShardRouting> isRemoteSnapshotShard = shardRouting -> shardRouting.primary() + && indicesService.indexService(shardRouting.index()).getIndexSettings().isRemoteSnapshot(); + + ShardsIterator shardsIterator = clusterService.state() + .routingTable() + .allShardsSatisfyingPredicate(isRemoteSnapshotShard); + + long totalRestoredRemoteIndexesSize = shardsIterator.getShardRoutings() + .stream() + .map(clusterInfo::getShardSize) + .mapToLong(Long::longValue) + .sum(); + + if (totalRestoredRemoteIndexesSize + totalRestorableRemoteIndexesSize > remoteDataToFileCacheRatio + * totalNodeFileCacheSize) { + throw new SnapshotRestoreException( + snapshot, + "Size of the indexes to be restored exceeds the file cache bounds. Increase the file cache capacity on the cluster nodes using " + + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + + " setting." + ); + } + } + @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); @@ -923,7 +898,7 @@ public static final class RestoreCompletionResponse { private final Snapshot snapshot; private final RestoreInfo restoreInfo; - private RestoreCompletionResponse(final String uuid, final Snapshot snapshot, final RestoreInfo restoreInfo) { + public RestoreCompletionResponse(final String uuid, final Snapshot snapshot, final RestoreInfo restoreInfo) { this.uuid = uuid; this.snapshot = snapshot; this.restoreInfo = restoreInfo; diff --git a/server/src/main/java/org/opensearch/snapshots/Snapshot.java b/server/src/main/java/org/opensearch/snapshots/Snapshot.java index e7c92195eff08..4dd930c7b59c0 100644 --- a/server/src/main/java/org/opensearch/snapshots/Snapshot.java +++ b/server/src/main/java/org/opensearch/snapshots/Snapshot.java @@ -32,6 +32,7 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -42,8 +43,9 @@ /** * Basic information about a snapshot - a SnapshotId and the repository that the snapshot belongs to. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Snapshot implements Writeable { private final String repository; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotId.java b/server/src/main/java/org/opensearch/snapshots/SnapshotId.java index aec3aebd93585..4eeb956a0cb19 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotId.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotId.java @@ -32,6 +32,7 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,8 +45,9 @@ /** * SnapshotId - snapshot name + snapshot UUID * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SnapshotId implements Comparable<SnapshotId>, Writeable, ToXContentObject { private static final String NAME = "name"; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 489086cb782fe..191b872cdd563 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -32,23 +32,24 @@ package org.opensearch.snapshots; import org.opensearch.Version; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.repositories.IndexId; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.time.Instant; @@ -64,8 +65,9 @@ /** * Information about a snapshot * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent, Writeable { public static final String CONTEXT_MODE_PARAM = "context_mode"; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java index 3af508b62b55c..5efcd5f12e37b 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardFailure.java @@ -33,18 +33,19 @@ package org.opensearch.snapshots; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.ConstructingObjectParser; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -52,8 +53,9 @@ /** * Stores information about failures that occurred during shard snapshotting process * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardFailure extends ShardOperationFailedException { @Nullable diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardSizeInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardSizeInfo.java index b5818c5650b3c..6c456be8ca9e3 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardSizeInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardSizeInfo.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.annotation.PublicApi; import java.util.Collections; import java.util.Map; @@ -41,8 +42,9 @@ /** * Information about a snapshot shard size * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SnapshotShardSizeInfo { public static final SnapshotShardSizeInfo EMPTY = new SnapshotShardSizeInfo(Map.of()); diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 0fcd3761c98ba..1c25d8c71f948 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -37,7 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; -import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.SnapshotsInProgress; @@ -47,19 +47,20 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.IndexShardSnapshotStatus.Stage; import org.opensearch.indices.IndicesService; @@ -73,6 +74,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -379,6 +381,12 @@ private void snapshot( if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } + if (indexShard.indexSettings().isSegRepEnabled() && indexShard.isPrimaryMode() == false) { + throw new IndexShardSnapshotFailedException( + shardId, + "snapshot triggered on a new primary following failover and cannot proceed until promotion is complete" + ); + } if (indexShard.routingEntry().relocating()) { // do not snapshot when in the process of relocation of primaries so we won't get conflicts throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); @@ -395,18 +403,32 @@ private void snapshot( try { if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); + long primaryTerm = indexShard.getOperationPrimaryTerm(); // we flush first to make sure we get the latest writes snapshotted wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - long primaryTerm = indexShard.getOperationPrimaryTerm(); - final IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); + IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); long commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + try { + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (NoSuchFileException e) { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } try { repository.snapshotRemoteStoreIndexShard( indexShard.store(), snapshot.getSnapshotId(), indexId, - wrappedSnapshot.get(), + snapshotIndexCommit, getShardStateId(indexShard, snapshotIndexCommit), snapshotStatus, primaryTerm, diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotState.java b/server/src/main/java/org/opensearch/snapshots/SnapshotState.java index dd1b3ebb8404d..7ad838f741a3f 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotState.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotState.java @@ -32,11 +32,14 @@ package org.opensearch.snapshots; +import org.opensearch.common.annotation.PublicApi; + /** * Represents the state that a snapshot can be in * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum SnapshotState { /** * Snapshot process has started diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java index e8115a97ac98a..e7338a29cafeb 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java @@ -42,11 +42,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Set; -import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index cd9c2c4ce325a..71918bc73b55a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest; @@ -79,15 +78,16 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -477,7 +477,7 @@ public ClusterState execute(ClusterState currentState) { "No indices in the source snapshot [" + sourceSnapshotId + "] matched requested pattern [" - + org.opensearch.core.common.Strings.arrayToCommaDelimitedString(request.indices()) + + Strings.arrayToCommaDelimitedString(request.indices()) + "]" ); } @@ -823,7 +823,7 @@ private static void validate(String repositoryName, String snapshotName, Cluster } private static void validate(final String repositoryName, final String snapshotName) { - if (org.opensearch.core.common.Strings.hasLength(snapshotName) == false) { + if (Strings.hasLength(snapshotName) == false) { throw new InvalidSnapshotNameException(repositoryName, snapshotName, "cannot be empty"); } if (snapshotName.contains(" ")) { @@ -1565,7 +1565,7 @@ private void runNextQueuedOperation(RepositoryData repositoryData, String reposi /** * Runs a cluster state update that checks whether we have outstanding snapshot deletions that can be executed and executes them. - * + * <p> * TODO: optimize this to execute in a single CS update together with finalizing the latest snapshot */ private void runReadyDeletions(RepositoryData repositoryData, String repository) { @@ -1806,7 +1806,7 @@ public void deleteSnapshots(final DeleteSnapshotRequest request, final ActionLis logger.info( () -> new ParameterizedMessage( "deleting snapshots [{}] from repository [{}]", - org.opensearch.core.common.Strings.arrayToCommaDelimitedString(snapshotNames), + Strings.arrayToCommaDelimitedString(snapshotNames), repoName ) ); @@ -2758,7 +2758,7 @@ public boolean assertAllListenersResolved() { * Every shard snapshot or clone state update can result in multiple snapshots being updated. In order to determine whether or not a * shard update has an effect we use an outer loop over all current executing snapshot operations that iterates over them in the order * they were started in and an inner loop over the list of shard update tasks. - * + * <p> * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. @@ -2768,7 +2768,7 @@ public boolean assertAllListenersResolved() { * a task in the executed tasks collection applied to a shard it was waiting for to become available, then the shard snapshot operation * will be started for that snapshot entry and the task removed from the collection of tasks that need to be applied to snapshot * entries since it can not have any further effects. - * + * <p> * Package private to allow for tests. */ static final ClusterStateTaskExecutor<ShardSnapshotUpdate> SHARD_STATE_EXECUTOR = new ClusterStateTaskExecutor<ShardSnapshotUpdate>() { @@ -3058,7 +3058,7 @@ private static ShardSnapshotStatus startShardSnapshotAfterClone(ClusterState cur /** * An update to the snapshot state of a shard. - * + * <p> * Package private for testing */ static final class ShardSnapshotUpdate { diff --git a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java index 7db9fdac510ab..6ae1e0d45cb6e 100644 --- a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java +++ b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java @@ -34,9 +34,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.SnapshotsInProgress; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.index.shard.ShardId; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusResponse.java b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusResponse.java index 0ae7a8367d1a8..6bcbc932f3eff 100644 --- a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusResponse.java +++ b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusResponse.java @@ -31,7 +31,7 @@ package org.opensearch.snapshots; -import org.opensearch.action.ActionResponse; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/tasks/CancellableTask.java b/server/src/main/java/org/opensearch/tasks/CancellableTask.java index dc28c26700e6c..22806a749c686 100644 --- a/server/src/main/java/org/opensearch/tasks/CancellableTask.java +++ b/server/src/main/java/org/opensearch/tasks/CancellableTask.java @@ -35,6 +35,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/tasks/RawTaskStatus.java b/server/src/main/java/org/opensearch/tasks/RawTaskStatus.java index 28a09fd80b408..28950f5a713fc 100644 --- a/server/src/main/java/org/opensearch/tasks/RawTaskStatus.java +++ b/server/src/main/java/org/opensearch/tasks/RawTaskStatus.java @@ -32,13 +32,12 @@ package org.opensearch.tasks; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; @@ -76,7 +75,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { try (InputStream stream = status.streamInput()) { - return builder.rawValue(stream, XContentHelper.xContentType(status)); + return builder.rawValue(stream, MediaTypeRegistry.xContentType(status)); } } @@ -87,7 +86,7 @@ public String getWriteableName() { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } /** diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index 3fa62ebbdcbed..a21a454a65d0e 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -32,13 +32,20 @@ package org.opensearch.tasks; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionResponse; -import org.opensearch.action.NotifyOnceListener; -import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.action.NotifyOnceListener; import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageInfo; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; +import org.opensearch.core.tasks.resourcetracker.TaskThreadUsage; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; @@ -54,12 +61,10 @@ /** * Current task information * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Task { - - private static final Logger logger = LogManager.getLogger(Task.class); - /** * The request header to mark tasks with specific ids */ @@ -73,8 +78,6 @@ public class Task { private static final String MAX = "max"; - public static final String THREAD_INFO = "thread_info"; - private final long id; private final String type; @@ -495,7 +498,10 @@ public boolean supportsResourceTracking() { * <b>can</b> change this on version upgrade but we should be careful * because some statuses (reindex) have become defacto standardized because * they are used by systems like Kibana. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Status extends ToXContentObject, NamedWriteable {} /** @@ -505,13 +511,13 @@ public String getHeader(String header) { return headers.get(header); } - public TaskResult result(DiscoveryNode node, Exception error) throws IOException { - return new TaskResult(taskInfo(node.getId(), true, true), error); + public TaskResult result(final String nodeId, Exception error) throws IOException { + return new TaskResult(taskInfo(nodeId, true, true), error); } - public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOException { + public TaskResult result(final String nodeId, ActionResponse response) throws IOException { if (response instanceof ToXContent) { - return new TaskResult(taskInfo(node.getId(), true, true), (ToXContent) response); + return new TaskResult(taskInfo(nodeId, true, true), (ToXContent) response); } else { throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } @@ -544,11 +550,11 @@ public int incrementResourceTrackingThreads() { * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method * is called exactly once on all registered listeners. - * + * <p> * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). * This ensures that the number of active threads doesn't drop to zero pre-maturely. - * + * <p> * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed * with the response, any thread usage info captured after the task is unregistered may be irrelevant. * diff --git a/server/src/main/java/org/opensearch/tasks/TaskAwareRequest.java b/server/src/main/java/org/opensearch/tasks/TaskAwareRequest.java index baf2d1d94ea04..6913f6cfc375c 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskAwareRequest.java +++ b/server/src/main/java/org/opensearch/tasks/TaskAwareRequest.java @@ -32,6 +32,8 @@ package org.opensearch.tasks; +import org.opensearch.core.tasks.TaskId; + import java.util.Map; /** diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java index b718bd2395cc5..2d152e513f197 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -9,7 +9,7 @@ package org.opensearch.tasks; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.List; @@ -18,7 +18,7 @@ /** * TaskCancellation represents a task eligible for cancellation. * It doesn't guarantee that the task will actually get cancelled or not; that decision is left to the caller. - * + * <p> * It contains a list of cancellation reasons along with callbacks that are invoked when cancel() is called. * * @opensearch.internal @@ -87,7 +87,7 @@ private void runOnCancelCallbacks() { /** * Returns the sum of all cancellation scores. - * + * <p> * A zero score indicates no reason to cancel the task. * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. */ diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java index 5b512af56e195..343d4571593a7 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationMonitoringService.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.search.SearchShardTask; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java index 7ff27fa1096dc..6955a5927ca23 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java @@ -36,20 +36,21 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchSecurityException; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.ChannelActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/tasks/TaskInfo.java b/server/src/main/java/org/opensearch/tasks/TaskInfo.java index 2f259f62b6909..3a04e8e4072b2 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/opensearch/tasks/TaskInfo.java @@ -33,16 +33,19 @@ package org.opensearch.tasks; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; import org.opensearch.core.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.ObjectParserHelper; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ObjectParserHelper; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -64,8 +67,9 @@ * and use in APIs. Instead, immutable and writeable TaskInfo objects are used to represent * snapshot information about currently running tasks. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskInfo implements Writeable, ToXContentFragment { private final TaskId taskId; @@ -398,7 +402,7 @@ public static TaskInfo fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(XContentType.JSON, this, true, true); + return Strings.toString(MediaTypeRegistry.JSON, this, true, true); } // Implements equals and hashCode for testing diff --git a/server/src/main/java/org/opensearch/tasks/TaskManager.java b/server/src/main/java/org/opensearch/tasks/TaskManager.java index bcd3004188bb6..a49968ab85e89 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskManager.java +++ b/server/src/main/java/org/opensearch/tasks/TaskManager.java @@ -35,29 +35,31 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; -import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.SetOnce; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.action.NotifyOnceListener; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpChannel; @@ -364,7 +366,7 @@ public <Response extends ActionResponse> void storeResult(Task task, Exception e } final TaskResult taskResult; try { - taskResult = task.result(localNode, error); + taskResult = task.result(localNode.getId(), error); } catch (IOException ex) { logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); @@ -397,7 +399,7 @@ public <Response extends ActionResponse> void storeResult(Task task, Response re } final TaskResult taskResult; try { - taskResult = task.result(localNode, response); + taskResult = task.result(localNode.getId(), response); } catch (IOException ex) { logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); listener.onFailure(ex); diff --git a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java index b4806b531429e..f32559f6314c0 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResourceTrackingService.java @@ -9,6 +9,7 @@ package org.opensearch.tasks; import com.sun.management.ThreadMXBean; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -21,6 +22,9 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.ThreadPool; @@ -30,7 +34,7 @@ import java.util.List; import java.util.Map; -import static org.opensearch.tasks.ResourceStatsType.WORKER_STATS; +import static org.opensearch.core.tasks.resourcetracker.ResourceStatsType.WORKER_STATS; /** * Service that helps track resource usage of tasks running on a node. diff --git a/server/src/main/java/org/opensearch/tasks/TaskResult.java b/server/src/main/java/org/opensearch/tasks/TaskResult.java index b6006636da2e9..846fbde48ea59 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResult.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResult.java @@ -34,20 +34,20 @@ import org.opensearch.OpenSearchException; import org.opensearch.client.Requests; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.InstantiatingObjectParser; -import org.opensearch.common.xcontent.ObjectParserHelper; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ObjectParserHelper; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentHelper; import java.io.IOException; import java.util.Map; @@ -55,16 +55,18 @@ import static java.util.Collections.emptyMap; import static java.util.Objects.requireNonNull; +import static org.opensearch.common.xcontent.XContentHelper.convertToMap; +import static org.opensearch.common.xcontent.XContentHelper.writeRawField; import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.opensearch.common.xcontent.XContentHelper.convertToMap; /** * Information about a running task or a task that stored its result. Running tasks just have a {@link #getTask()} while * tasks with stored result will have either a {@link #getError()} or {@link #getResponse()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskResult implements Writeable, ToXContentObject { private final boolean completed; private final TaskInfo task; @@ -182,10 +184,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t task.toXContent(builder, params); builder.endObject(); if (error != null) { - XContentHelper.writeRawField("error", error, builder, params); + writeRawField("error", error, builder, params); } if (response != null) { - XContentHelper.writeRawField("response", response, builder, params); + writeRawField("response", response, builder, params); } return builder; } @@ -208,7 +210,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } // Implements equals and hashcode for testing @@ -238,7 +240,7 @@ public int hashCode() { } private static BytesReference toXContent(Exception error) throws IOException { - try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { builder.startObject(); OpenSearchException.generateThrowableXContent(builder, ToXContent.EMPTY_PARAMS, error); builder.endObject(); diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index 1feb115cb585a..d1ee04bd5cb25 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -37,7 +37,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.bulk.BackoffPolicy; @@ -53,12 +52,12 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.Streams; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.Streams; import org.opensearch.threadpool.ThreadPool; import java.io.ByteArrayOutputStream; @@ -146,7 +145,7 @@ public void onFailure(Exception e) { client.admin() .indices() .preparePutMapping(TASK_INDEX) - .setSource(taskResultIndexMapping(), XContentType.JSON) + .setSource(taskResultIndexMapping(), MediaTypeRegistry.JSON) .execute(ActionListener.delegateFailure(listener, (l, r) -> doStoreResult(taskResult, listener))); } else { doStoreResult(taskResult, listener); @@ -169,7 +168,7 @@ private int getTaskResultMappingVersion(IndexMetadata metadata) { private void doStoreResult(TaskResult taskResult, ActionListener<Void> listener) { IndexRequestBuilder index = client.prepareIndex(TASK_INDEX).setId(taskResult.getTask().getTaskId().toString()); - try (XContentBuilder builder = XContentFactory.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(Requests.INDEX_CONTENT_TYPE)) { taskResult.toXContent(builder, ToXContent.EMPTY_PARAMS); index.setSource(builder); } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java index ed111b34f048f..99559e45aaaee 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java +++ b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java @@ -20,7 +20,9 @@ * Propagates TASK_ID across thread contexts */ public class TaskThreadContextStatePropagator implements ThreadContextStatePropagator { + @Override + @SuppressWarnings("removal") public Map<String, Object> transients(Map<String, Object> source) { final Map<String, Object> transients = new HashMap<>(); @@ -32,7 +34,18 @@ public Map<String, Object> transients(Map<String, Object> source) { } @Override + public Map<String, Object> transients(Map<String, Object> source, boolean isSystemContext) { + return transients(source); + } + + @Override + @SuppressWarnings("removal") public Map<String, String> headers(Map<String, Object> source) { return Collections.emptyMap(); } + + @Override + public Map<String, String> headers(Map<String, Object> source, boolean isSystemContext) { + return headers(source); + } } diff --git a/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java index 21cde24bd541d..6f977481852ec 100644 --- a/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java +++ b/server/src/main/java/org/opensearch/tasks/consumer/TopNSearchTasksLogger.java @@ -16,7 +16,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; import org.opensearch.tasks.Task; import java.util.Comparator; diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index 7c9e0d5ac8097..4b8897a318531 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -8,13 +8,18 @@ package org.opensearch.telemetry; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; /** * Wrapper class to encapsulate tracing related settings + * + * @opensearch.experimental */ +@ExperimentalApi public class TelemetrySettings { public static final Setting<Boolean> TRACER_ENABLED_SETTING = Setting.boolSetting( "telemetry.tracer.enabled", @@ -23,12 +28,55 @@ public class TelemetrySettings { Setting.Property.Dynamic ); + public static final Setting<Boolean> TRACER_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + public static final Setting<Boolean> METRICS_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Probability of sampler + */ + public static final Setting<Double> TRACER_SAMPLER_PROBABILITY = Setting.doubleSetting( + "telemetry.tracer.sampler.probability", + 0.01d, + 0.00d, + 1.00d, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * metrics publish interval in seconds. + */ + public static final Setting<TimeValue> METRICS_PUBLISH_INTERVAL_SETTING = Setting.timeSetting( + "telemetry.otel.metrics.publish.interval", + TimeValue.timeValueSeconds(60), + Setting.Property.NodeScope, + Setting.Property.Final + ); + private volatile boolean tracingEnabled; + private volatile double samplingProbability; + private final boolean tracingFeatureEnabled; + private final boolean metricsFeatureEnabled; public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); + this.samplingProbability = TRACER_SAMPLER_PROBABILITY.get(settings); + this.tracingFeatureEnabled = TRACER_FEATURE_ENABLED_SETTING.get(settings); + this.metricsFeatureEnabled = METRICS_FEATURE_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); + clusterSettings.addSettingsUpdateConsumer(TRACER_SAMPLER_PROBABILITY, this::setSamplingProbability); } public void setTracingEnabled(boolean tracingEnabled) { @@ -39,4 +87,28 @@ public boolean isTracingEnabled() { return tracingEnabled; } + /** + * Set sampling ratio + * @param samplingProbability double + */ + public void setSamplingProbability(double samplingProbability) { + this.samplingProbability = samplingProbability; + } + + /** + * Get sampling ratio + * @return double + */ + public double getSamplingProbability() { + return samplingProbability; + } + + public boolean isTracingFeatureEnabled() { + return tracingFeatureEnabled; + } + + public boolean isMetricsFeatureEnabled() { + return metricsFeatureEnabled; + } + } diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java new file mode 100644 index 0000000000000..c7e2229c18437 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; + +/** + * {@link MetricsRegistryFactory} represents a single global class that is used to access {@link MetricsRegistry}s. + * <p> + * The {@link MetricsRegistry} singleton object can be retrieved using MetricsRegistryFactory::getMetricsRegistry. The {@link MetricsRegistryFactory} object + * is created during class initialization and cannot subsequently be changed. + * + * @opensearch.internal + */ +@InternalApi +public class MetricsRegistryFactory implements Closeable { + + private static final Logger logger = LogManager.getLogger(MetricsRegistryFactory.class); + + private final TelemetrySettings telemetrySettings; + private final MetricsRegistry metricsRegistry; + + public MetricsRegistryFactory(TelemetrySettings telemetrySettings, Optional<Telemetry> telemetry) { + this.telemetrySettings = telemetrySettings; + this.metricsRegistry = metricsRegistry(telemetry); + } + + /** + * Returns the {@link MetricsRegistry} instance + * + * @return MetricsRegistry instance + */ + public MetricsRegistry getMetricsRegistry() { + return metricsRegistry; + } + + /** + * Closes the {@link Tracer} + */ + @Override + public void close() { + try { + metricsRegistry.close(); + } catch (IOException e) { + logger.warn("Error closing MetricsRegistry", e); + } + } + + private MetricsRegistry metricsRegistry(Optional<Telemetry> telemetry) { + MetricsRegistry metricsRegistry = telemetry.map(Telemetry::getMetricsTelemetry) + .map(metricsTelemetry -> createDefaultMetricsRegistry(metricsTelemetry)) + .orElse(NoopMetricsRegistry.INSTANCE); + return metricsRegistry; + } + + private MetricsRegistry createDefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + return new DefaultMetricsRegistry(metricsTelemetry); + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java new file mode 100644 index 0000000000000..5137cb18e2cc0 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; + +import java.util.Optional; + +/** + * No-op implementation of {@link MetricsRegistryFactory} + * + * @opensearch.internal + */ +@InternalApi +public class NoopMetricsRegistryFactory extends MetricsRegistryFactory { + public NoopMetricsRegistryFactory() { + super(null, Optional.empty()); + } + + @Override + public MetricsRegistry getMetricsRegistry() { + return NoopMetricsRegistry.INSTANCE; + } + + @Override + public void close() { + + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..ad4564e1d7773 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry.metrics; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java new file mode 100644 index 0000000000000..212ef3c713d8e --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Hold the Attribute names to avoid the duplication and consistency. + * + * @opensearch.experimental + */ +@ExperimentalApi +public final class AttributeNames { + + /** + * Constructor + */ + private AttributeNames() { + + } + + /** + * HTTP Protocol Version + */ + public static final String HTTP_PROTOCOL_VERSION = "http.version"; + + /** + * HTTP method + */ + public static final String HTTP_METHOD = "http.method"; + + /** + * HTTP Request URI. + */ + public static final String HTTP_URI = "http.uri"; + + /** + * Http Request Query Parameters. + */ + public static final String HTTP_REQ_QUERY_PARAMS = "url.query"; + + /** + * Rest Request ID. + */ + public static final String REST_REQ_ID = "rest.request_id"; + + /** + * Rest Request Raw Path. + */ + public static final String REST_REQ_RAW_PATH = "rest.raw_path"; + + /** + * Trace key. To be used for on demand sampling. + */ + public static final String TRACE = "trace"; + + /** + * Transport Service send request target host. + */ + public static final String TRANSPORT_TARGET_HOST = "target_host"; + + /** + * Transport Service send request local host. + */ + public static final String TRANSPORT_HOST = "host"; + + /** + * Action Name. + */ + public static final String TRANSPORT_ACTION = "action"; + + /** + * Task id + */ + public static final String TASK_ID = "task_id"; + + /** + * Parent task id + */ + public static final String PARENT_TASK_ID = "parent_task_id"; + + /** + * Index Name + */ + public static final String INDEX = "index"; + + /** + * Shard ID + */ + public static final String SHARD_ID = "shard_id"; + + /** + * Number of request items in bulk request + */ + public static final String BULK_REQUEST_ITEMS = "bulk_request_items"; + + /** + * Node ID + */ + public static final String NODE_ID = "node_id"; + + /** + * Refresh Policy + */ + public static final String REFRESH_POLICY = "refresh_policy"; + + /** + * Search Response Total Hits + */ + public static final String TOTAL_HITS = "total_hits"; +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java index f82a390dc1754..87762f342a653 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.tracing.noop.NoopTracer; import java.util.Optional; @@ -17,6 +18,7 @@ * * @opensearch.internal */ +@InternalApi public class NoopTracerFactory extends TracerFactory { public NoopTracerFactory() { super(null, Optional.empty(), null); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java new file mode 100644 index 0000000000000..42e64109b72fd --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.support.replication.ReplicatedWriteRequest; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.Strings; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.Transport; + +import java.util.Arrays; +import java.util.List; + +/** + * Utility class, helps in creating the {@link SpanCreationContext} for span. + * + * @opensearch.internal + */ +@InternalApi +public final class SpanBuilder { + + private static final List<String> HEADERS_TO_BE_ADDED_AS_ATTRIBUTES = Arrays.asList(AttributeNames.TRACE); + /** + * Attribute name Separator + */ + private static final String SEPARATOR = " "; + + /** + * Constructor + */ + private SpanBuilder() { + + } + + /** + * Creates {@link SpanCreationContext} from the {@link HttpRequest} + * @param request Http request. + * @return context. + */ + public static SpanCreationContext from(HttpRequest request) { + return SpanCreationContext.server().name(createSpanName(request)).attributes(buildSpanAttributes(request)); + } + + /** + * Creates {@link SpanCreationContext} from the {@link RestRequest} + * @param request Rest request + * @return context + */ + public static SpanCreationContext from(RestRequest request) { + return SpanCreationContext.client().name(createSpanName(request)).attributes(buildSpanAttributes(request)); + } + + /** + * Creates {@link SpanCreationContext} from Transport action and connection details. + * @param action action. + * @param connection transport connection. + * @return context + */ + public static SpanCreationContext from(String action, Transport.Connection connection) { + return SpanCreationContext.server().name(createSpanName(action, connection)).attributes(buildSpanAttributes(action, connection)); + } + + public static SpanCreationContext from(String spanName, String nodeId, ReplicatedWriteRequest request) { + return SpanCreationContext.server().name(spanName).attributes(buildSpanAttributes(nodeId, request)); + } + + private static String createSpanName(HttpRequest httpRequest) { + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String path = uriParts.v1(); + return httpRequest.method().name() + SEPARATOR + path; + } + + private static Attributes buildSpanAttributes(HttpRequest httpRequest) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.HTTP_URI, httpRequest.uri()) + .addAttribute(AttributeNames.HTTP_METHOD, httpRequest.method().name()) + .addAttribute(AttributeNames.HTTP_PROTOCOL_VERSION, httpRequest.protocolVersion().name()); + populateHeader(httpRequest, attributes); + + Tuple<String, String> uriParts = splitUri(httpRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + + return attributes; + } + + private static Tuple<String, String> splitUri(String uri) { + int index = uri.indexOf('?'); + if (index >= 0 && index < uri.length() - 1) { + String path = uri.substring(0, index); + String query = uri.substring(index + 1); + return new Tuple<>(path, query); + } + return new Tuple<>(uri, ""); + } + + private static void populateHeader(HttpRequest httpRequest, Attributes attributes) { + HEADERS_TO_BE_ADDED_AS_ATTRIBUTES.forEach(x -> { + if (httpRequest.getHeaders() != null + && httpRequest.getHeaders().get(x) != null + && (httpRequest.getHeaders().get(x).isEmpty() == false)) { + attributes.addAttribute(x, Strings.collectionToCommaDelimitedString(httpRequest.getHeaders().get(x))); + } + }); + } + + private static String createSpanName(RestRequest restRequest) { + String spanName = "rest_request"; + if (restRequest != null) { + try { + String methodName = restRequest.method().name(); + String rawPath = restRequest.rawPath(); + spanName = methodName + SEPARATOR + rawPath; + } catch (Exception e) { + // swallow the exception and keep the default name. + } + } + return spanName; + } + + private static Attributes buildSpanAttributes(RestRequest restRequest) { + if (restRequest != null) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.REST_REQ_ID, restRequest.getRequestId()) + .addAttribute(AttributeNames.REST_REQ_RAW_PATH, restRequest.rawPath()); + + Tuple<String, String> uriParts = splitUri(restRequest.uri()); + String query = uriParts.v2(); + if (query.isBlank() == false) { + attributes.addAttribute(AttributeNames.HTTP_REQ_QUERY_PARAMS, query); + } + return attributes; + } else { + return Attributes.EMPTY; + } + } + + private static String createSpanName(String action, Transport.Connection connection) { + return action + SEPARATOR + (connection.getNode() != null ? connection.getNode().getHostAddress() : null); + } + + private static Attributes buildSpanAttributes(String action, Transport.Connection connection) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, action); + if (connection != null && connection.getNode() != null) { + attributes.addAttribute(AttributeNames.TRANSPORT_TARGET_HOST, connection.getNode().getHostAddress()); + } + return attributes; + } + + /** + * Creates {@link SpanCreationContext} from Inbound Handler. + * @param action action. + * @param tcpChannel tcp channel. + * @return context + */ + public static SpanCreationContext from(String action, TcpChannel tcpChannel) { + return SpanCreationContext.server().name(createSpanName(action, tcpChannel)).attributes(buildSpanAttributes(action, tcpChannel)); + } + + private static String createSpanName(String action, TcpChannel tcpChannel) { + return action + SEPARATOR + (tcpChannel.getRemoteAddress() != null + ? tcpChannel.getRemoteAddress().getHostString() + : tcpChannel.getLocalAddress().getHostString()); + } + + private static Attributes buildSpanAttributes(String action, TcpChannel tcpChannel) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, action); + attributes.addAttribute(AttributeNames.TRANSPORT_HOST, tcpChannel.getLocalAddress().getHostString()); + return attributes; + } + + private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequest request) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.NODE_ID, nodeId) + .addAttribute(AttributeNames.REFRESH_POLICY, request.getRefreshPolicy().getValue()); + if (request.shardId() != null) { + attributes.addAttribute(AttributeNames.INDEX, request.shardId().getIndexName()) + .addAttribute(AttributeNames.SHARD_ID, request.shardId().getId()); + } + if (request instanceof BulkShardRequest) { + attributes.addAttribute(AttributeNames.BULK_REQUEST_ITEMS, ((BulkShardRequest) request).items().length); + } + return attributes; + } + + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param spanName name of span. + * @param parentSpan target parent span. + * @return context + */ + public static SpanCreationContext from(String spanName, SpanContext parentSpan) { + return SpanCreationContext.server().name(spanName).parent(parentSpan); + } + + /** + * Creates {@link SpanCreationContext} with parent set to specified SpanContext. + * @param task search task. + * @param actionName action. + * @return context + */ + public static SpanCreationContext from(Task task, String actionName) { + return SpanCreationContext.server().name(createSpanName(task, actionName)).attributes(buildSpanAttributes(task, actionName)); + } + + private static Attributes buildSpanAttributes(Task task, String actionName) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, actionName); + if (task != null) { + attributes.addAttribute(AttributeNames.TASK_ID, task.getId()); + if (task.getParentTaskId() != null && task.getParentTaskId().isSet()) { + attributes.addAttribute(AttributeNames.PARENT_TASK_ID, task.getParentTaskId().getId()); + } + } + return attributes; + + } + + private static String createSpanName(Task task, String actionName) { + if (task != null) { + return task.getType() + SEPARATOR + task.getAction(); + } else { + return actionName; + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java index c009ab2391aab..908164d1935a7 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -8,19 +8,21 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; -import java.util.Optional; /** * Core's ThreadContext based TracerContextStorage implementation * * @opensearch.internal */ +@InternalApi public class ThreadContextBasedTracerContextStorage implements TracerContextStorage<String, Span>, ThreadContextStatePropagator { private final ThreadContext threadContext; @@ -40,9 +42,6 @@ public Span get(String key) { @Override public void put(String key, Span span) { - if (span == null) { - return; - } SpanReference currentSpanRef = threadContext.getTransient(key); if (currentSpanRef == null) { threadContext.putTransient(key, new SpanReference(span)); @@ -52,26 +51,35 @@ public void put(String key, Span span) { } @Override + @SuppressWarnings("removal") public Map<String, Object> transients(Map<String, Object> source) { final Map<String, Object> transients = new HashMap<>(); - if (source.containsKey(CURRENT_SPAN)) { final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); if (current != null) { transients.put(CURRENT_SPAN, new SpanReference(current.getSpan())); } } - return transients; } @Override + public Map<String, Object> transients(Map<String, Object> source, boolean isSystemContext) { + if (isSystemContext == true) { + return Collections.emptyMap(); + } else { + return transients(source); + } + } + + @Override + @SuppressWarnings("removal") public Map<String, String> headers(Map<String, Object> source) { final Map<String, String> headers = new HashMap<>(); if (source.containsKey(CURRENT_SPAN)) { final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); - if (current != null) { + if (current != null && current.getSpan() != null) { tracingTelemetry.getContextPropagator().inject(current.getSpan(), (key, value) -> headers.put(key, value)); } } @@ -79,17 +87,13 @@ public Map<String, String> headers(Map<String, Object> source) { return headers; } - Span getCurrentSpan(String key) { - Optional<Span> optionalSpanFromContext = spanFromThreadContext(key); - return optionalSpanFromContext.orElse(spanFromHeader()); + @Override + public Map<String, String> headers(Map<String, Object> source, boolean isSystemContext) { + return headers(source); } - private Optional<Span> spanFromThreadContext(String key) { + Span getCurrentSpan(String key) { SpanReference currentSpanRef = threadContext.getTransient(key); - return (currentSpanRef == null) ? Optional.empty() : Optional.ofNullable(currentSpanRef.getSpan()); - } - - private Span spanFromHeader() { - return tracingTelemetry.getContextPropagator().extract(threadContext.getHeaders()); + return (currentSpanRef == null) ? null : currentSpanRef.getSpan(); } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java index d8fe812c82f53..b0cecb0ee485d 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; @@ -24,7 +25,10 @@ * <p> * The Tracer singleton object can be retrieved using tracerManager.getTracer(). The TracerManager object * is created during class initialization and cannot subsequently be changed. + * + * @opensearch.internal */ +@InternalApi public class TracerFactory implements Closeable { private static final Logger logger = LogManager.getLogger(TracerFactory.class); @@ -58,6 +62,13 @@ public void close() { } } + protected TracerContextStorage<String, Span> createTracerContextStorage( + TracingTelemetry tracingTelemetry, + ThreadContext threadContext + ) { + return new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry); + } + private Tracer tracer(Optional<Telemetry> telemetry, ThreadContext threadContext) { return telemetry.map(Telemetry::getTracingTelemetry) .map(tracingTelemetry -> createDefaultTracer(tracingTelemetry, threadContext)) @@ -66,10 +77,7 @@ private Tracer tracer(Optional<Telemetry> telemetry, ThreadContext threadContext } private Tracer createDefaultTracer(TracingTelemetry tracingTelemetry, ThreadContext threadContext) { - TracerContextStorage<String, Span> tracerContextStorage = new ThreadContextBasedTracerContextStorage( - threadContext, - tracingTelemetry - ); + TracerContextStorage<String, Span> tracerContextStorage = createTracerContextStorage(tracingTelemetry, threadContext); return new DefaultTracer(tracingTelemetry, tracerContextStorage); } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index 0ba9a8ea5fd88..dfe456a0a6784 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -8,16 +8,20 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.telemetry.tracing.noop.NoopTracer; import java.io.IOException; +import java.util.Collection; +import java.util.Map; /** * Wrapper implementation of Tracer. This delegates call to right tracer based on the tracer settings * * @opensearch.internal */ +@InternalApi final class WrappedTracer implements Tracer { private final Tracer defaultTracer; @@ -35,9 +39,29 @@ public WrappedTracer(TelemetrySettings telemetrySettings, Tracer defaultTracer) } @Override - public SpanScope startSpan(String spanName) { + public Span startSpan(SpanCreationContext context) { + return getDelegateTracer().startSpan(context); + } + + @Override + public SpanContext getCurrentSpan() { Tracer delegateTracer = getDelegateTracer(); - return delegateTracer.startSpan(spanName); + return delegateTracer.getCurrentSpan(); + } + + @Override + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + return getDelegateTracer().startScopedSpan(spanCreationContext); + } + + @Override + public SpanScope withSpanInScope(Span span) { + return getDelegateTracer().withSpanInScope(span); + } + + @Override + public boolean isRecording() { + return getDelegateTracer().isRecording(); } @Override @@ -49,4 +73,9 @@ public void close() throws IOException { Tracer getDelegateTracer() { return telemetrySettings.isTracingEnabled() ? defaultTracer : NoopTracer.INSTANCE; } + + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map<String, Collection<String>> headers) { + return defaultTracer.startSpan(spanCreationContext, headers); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java new file mode 100644 index 0000000000000..e0fb690bd29be --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; + +import java.net.InetSocketAddress; +import java.util.Objects; +import java.util.Optional; + +/** + * Tracer wrapped {@link HttpChannel} + */ +public class TraceableHttpChannel implements HttpChannel { + private final HttpChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableHttpChannel(HttpChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return http channel + */ + public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableHttpChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void handleException(Exception ex) { + span.addEvent("The HttpChannel was closed without sending the response"); + span.setError(ex); + span.endSpan(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public void addCloseListener(ActionListener<Void> listener) { + delegate.addCloseListener(listener); + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener<Void> listener) { + delegate.sendResponse(response, TraceableActionListener.create(listener, span, tracer)); + } + + @Override + public InetSocketAddress getLocalAddress() { + return delegate.getLocalAddress(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return delegate.getRemoteAddress(); + } + + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + return delegate.get(name, clazz); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java new file mode 100644 index 0000000000000..32769dd1d848d --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link RestChannel} + */ +public class TraceableRestChannel implements RestChannel { + + private final RestChannel delegate; + private final Span span; + + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableRestChannel(RestChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return rest channel + */ + public static RestChannel create(RestChannel delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableRestChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public XContentBuilder newBuilder() throws IOException { + return delegate.newBuilder(); + } + + @Override + public XContentBuilder newErrorBuilder() throws IOException { + return delegate.newErrorBuilder(); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, useFiltering); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, MediaType responseContentType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, responseContentType, useFiltering); + } + + @Override + public BytesStreamOutput bytesOutput() { + return delegate.bytesOutput(); + } + + @Override + public RestRequest request() { + return delegate.request(); + } + + @Override + public boolean detailedErrorsEnabled() { + return delegate.detailedErrorsEnabled(); + } + + @Override + public void sendResponse(RestResponse response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } finally { + span.endSpan(); + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java new file mode 100644 index 0000000000000..45268b4807cd9 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.Version; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.BaseTcpTransportChannel; +import org.opensearch.transport.TcpTransportChannel; +import org.opensearch.transport.TransportChannel; + +import java.io.IOException; +import java.util.Optional; + +/** + * Tracer wrapped {@link TransportChannel} + */ +public class TraceableTcpTransportChannel extends BaseTcpTransportChannel { + + private final TransportChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + public TraceableTcpTransportChannel(TcpTransportChannel delegate, Span span, Tracer tracer) { + super(delegate.getChannel()); + this.delegate = delegate; + this.span = span; + this.tracer = tracer; + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transport channel + */ + public static TransportChannel create(TcpTransportChannel delegate, final Span span, final Tracer tracer) { + if (tracer.isRecording() == true) { + delegate.getChannel().addCloseListener(new ActionListener<Void>() { + @Override + public void onResponse(Void unused) { + onFailure(null); + } + + @Override + public void onFailure(Exception e) { + span.addEvent("The TransportChannel was closed without sending the response"); + span.setError(e); + span.endSpan(); + } + }); + + return new TraceableTcpTransportChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public String getProfileName() { + return delegate.getProfileName(); + } + + @Override + public String getChannelType() { + return delegate.getChannelType(); + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } catch (final IOException ex) { + span.setError(ex); + throw ex; + } finally { + span.endSpan(); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(exception); + } finally { + span.setError(exception); + span.endSpan(); + } + } + + @Override + public Version getVersion() { + return delegate.getVersion(); + } + + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + return delegate.get(name, clazz); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java new file mode 100644 index 0000000000000..ee4b675d5dc30 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.channels; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java new file mode 100644 index 0000000000000..eb9d53d2df51b --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.handler; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link TransportResponseHandler} + * @param <T> TransportResponse + */ +public class TraceableTransportResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> { + + private final Span span; + private final TransportResponseHandler<T> delegate; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableTransportResponseHandler(TransportResponseHandler<T> delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transportResponseHandler + */ + public static <S extends TransportResponse> TransportResponseHandler<S> create( + TransportResponseHandler<S> delegate, + Span span, + Tracer tracer + ) { + if (tracer.isRecording() == true) { + return new TraceableTransportResponseHandler<S>(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleResponse(response); + } finally { + span.endSpan(); + } + } + + @Override + public void handleException(TransportException exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleException(exp); + } finally { + span.setError(exp); + span.endSpan(); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void handleRejection(Exception exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleRejection(exp); + } finally { + span.setError(exp); + span.endSpan(); + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java new file mode 100644 index 0000000000000..ff9f8f57dc07c --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.handler; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java new file mode 100644 index 0000000000000..0cb4ce71d05f8 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.util.Objects; + +/** + * Tracer wrapped {@link ActionListener} + * @param <Response> response. + */ +public class TraceableActionListener<Response> implements ActionListener<Response> { + + private final ActionListener<Response> delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableActionListener(ActionListener<Response> delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return action listener + */ + public static <Response> ActionListener<Response> create(ActionListener<Response> delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableActionListener<Response>(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void onResponse(Response response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onResponse(response); + } finally { + span.endSpan(); + } + + } + + @Override + public void onFailure(Exception e) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onFailure(e); + } finally { + span.setError(e); + span.endSpan(); + } + + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java new file mode 100644 index 0000000000000..71fb59194c447 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableSearchRequestOperationsListener.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchRequestContext; +import org.opensearch.action.search.SearchRequestOperationsListener; +import org.opensearch.telemetry.tracing.AttributeNames; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * SearchRequestOperationsListener subscriber for search request tracing + * + * @opensearch.internal + */ +public final class TraceableSearchRequestOperationsListener extends SearchRequestOperationsListener { + private final Tracer tracer; + private final Span requestSpan; + private SpanContext phaseSpanContext; + + public TraceableSearchRequestOperationsListener(final Tracer tracer, final Span requestSpan) { + this.tracer = tracer; + this.requestSpan = requestSpan; + this.phaseSpanContext = null; + } + + public static SearchRequestOperationsListener create(final Tracer tracer, final Span requestSpan) { + if (tracer.isRecording()) { + return new TraceableSearchRequestOperationsListener(tracer, requestSpan); + } else { + return SearchRequestOperationsListener.NOOP; + } + } + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assert phaseSpanContext == null : "There should be only one search phase active at a time"; + phaseSpanContext = tracer.getCurrentSpan(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assert phaseSpanContext != null : "There should be a search phase active at that time"; + phaseSpanContext.setError((Exception) cause); + phaseSpanContext.endSpan(); + phaseSpanContext = null; + } + + @Override + public void onRequestStart(SearchRequestContext searchRequestContext) {} + + @Override + public void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + // add response-related attributes on request end + requestSpan.addAttribute( + AttributeNames.TOTAL_HITS, + searchRequestContext.totalHits() == null ? 0 : searchRequestContext.totalHits().value + ); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java new file mode 100644 index 0000000000000..5dcb570c2bb2e --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.listener; diff --git a/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java index d26131484ba71..d57b74b3a45e0 100644 --- a/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/opensearch/threadpool/ExecutorBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.threadpool; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; @@ -44,8 +45,9 @@ * * @param <U> the underlying type of the executor settings * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings> { private final String name; diff --git a/server/src/main/java/org/opensearch/threadpool/Scheduler.java b/server/src/main/java/org/opensearch/threadpool/Scheduler.java index 86c322ec89dd7..9733db29f5939 100644 --- a/server/src/main/java/org/opensearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/opensearch/threadpool/Scheduler.java @@ -34,6 +34,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -60,7 +61,7 @@ public interface Scheduler { /** * Create a scheduler that can be used client side. Server side, please use <code>ThreadPool.schedule</code> instead. - * + * <p> * Notice that if any scheduled jobs fail with an exception, these will bubble up to the uncaught exception handler where they will * be logged as a warning. This includes jobs started using execute, submit and schedule. * @param settings the settings to use @@ -154,7 +155,10 @@ static ScheduledCancellable wrapAsScheduledCancellable(ScheduledFuture<?> schedu /** * This interface represents an object whose execution may be cancelled during runtime. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Cancellable { /** @@ -171,14 +175,17 @@ interface Cancellable { /** * A scheduled cancellable allow cancelling and reading the remaining delay of a scheduled task. + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface ScheduledCancellable extends Delayed, Cancellable {} /** * This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value * for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between * executions of this runnable. <em>NOTE:</em> the runnable is only rescheduled to run again after completion of the runnable. - * + * <p> * For this class, <i>completion</i> means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In * case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the * {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java index 183b9b2f4cf9a..90f50f78d84ad 100644 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -25,7 +25,7 @@ /** * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to * entities listening to the events. - * + * <p> * It's able to associate runnable with a task with the help of task Id available in thread context. */ public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 0851677bcb13a..0b9026b81eb4e 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -37,23 +37,23 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.XRejectedExecutionHandler; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.ArrayList; @@ -78,8 +78,9 @@ /** * The OpenSearch threadpool class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPool implements ReportingService<ThreadPoolInfo>, Scheduler { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -113,14 +114,16 @@ public static class Names { public static final String TRANSLOG_SYNC = "translog_sync"; public static final String REMOTE_PURGE = "remote_purge"; public static final String REMOTE_REFRESH_RETRY = "remote_refresh_retry"; + public static final String REMOTE_RECOVERY = "remote_recovery"; public static final String INDEX_SEARCHER = "index_searcher"; } /** * The threadpool type. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), @@ -182,9 +185,8 @@ public static ThreadPoolType fromType(String type) { map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); - } + map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); + map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -226,6 +228,7 @@ public ThreadPool( final Map<String, ExecutorBuilder> builders = new HashMap<>(); final int allocatedProcessors = OpenSearchExecutors.allocatedProcessors(settings); + final int halfProc = halfAllocatedProcessors(allocatedProcessors); final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); @@ -259,20 +262,33 @@ public ThreadPool( builders.put(Names.SYSTEM_WRITE, new FixedExecutorBuilder(settings, Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, false)); builders.put( Names.TRANSLOG_TRANSFER, - new ScalingExecutorBuilder(Names.TRANSLOG_TRANSFER, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + new ScalingExecutorBuilder(Names.TRANSLOG_TRANSFER, 1, halfProc, TimeValue.timeValueMinutes(5)) ); builders.put(Names.TRANSLOG_SYNC, new FixedExecutorBuilder(settings, Names.TRANSLOG_SYNC, allocatedProcessors * 4, 10000)); - builders.put(Names.REMOTE_PURGE, new ScalingExecutorBuilder(Names.REMOTE_PURGE, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + builders.put(Names.REMOTE_PURGE, new ScalingExecutorBuilder(Names.REMOTE_PURGE, 1, halfProc, TimeValue.timeValueMinutes(5))); builders.put( Names.REMOTE_REFRESH_RETRY, - new ScalingExecutorBuilder(Names.REMOTE_REFRESH_RETRY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) + new ScalingExecutorBuilder(Names.REMOTE_REFRESH_RETRY, 1, halfProc, TimeValue.timeValueMinutes(5)) + ); + builders.put( + Names.REMOTE_RECOVERY, + new ScalingExecutorBuilder( + Names.REMOTE_RECOVERY, + 1, + twiceAllocatedProcessors(allocatedProcessors), + TimeValue.timeValueMinutes(5) + ) ); - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { - builders.put( + builders.put( + Names.INDEX_SEARCHER, + new ResizableExecutorBuilder( + settings, Names.INDEX_SEARCHER, - new ResizableExecutorBuilder(settings, Names.INDEX_SEARCHER, allocatedProcessors, 1000, runnableTaskListener) - ); - } + twiceAllocatedProcessors(allocatedProcessors), + 1000, + runnableTaskListener + ) + ); for (final ExecutorBuilder<?> builder : customBuilders) { if (builders.containsKey(builder.name())) { @@ -312,7 +328,7 @@ public ThreadPool( /** * Returns a value of milliseconds that may be used for relative time calculations. - * + * <p> * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -322,7 +338,7 @@ public long relativeTimeInMillis() { /** * Returns a value of nanoseconds that may be used for relative time calculations. - * + * <p> * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -335,7 +351,7 @@ public long relativeTimeInNanos() { * that require the highest precision possible. Performance critical code must use * either {@link #relativeTimeInNanos()} or {@link #relativeTimeInMillis()} which * give better performance at the cost of lower precision. - * + * <p> * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -345,7 +361,7 @@ public long preciseRelativeTimeInNanos() { /** * Returns the value of milliseconds since UNIX epoch. - * + * <p> * This method should only be used for exact date/time formatting. For calculating * time deltas that should not suffer from negative deltas, which are possible with * this method, see {@link #relativeTimeInMillis()}. @@ -381,19 +397,21 @@ public ThreadPoolStats stats() { long rejected = -1; int largest = -1; long completed = -1; - if (holder.executor() instanceof ThreadPoolExecutor) { - ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor(); + long waitTimeNanos = -1; + if (holder.executor() instanceof OpenSearchThreadPoolExecutor) { + OpenSearchThreadPoolExecutor threadPoolExecutor = (OpenSearchThreadPoolExecutor) holder.executor(); threads = threadPoolExecutor.getPoolSize(); queue = threadPoolExecutor.getQueue().size(); active = threadPoolExecutor.getActiveCount(); largest = threadPoolExecutor.getLargestPoolSize(); completed = threadPoolExecutor.getCompletedTaskCount(); + waitTimeNanos = threadPoolExecutor.getPoolWaitTimeNanos(); RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) { rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected(); } } - stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed)); + stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed, waitTimeNanos)); } return new ThreadPoolStats(stats); } @@ -539,6 +557,10 @@ static int boundedBy(int value, int min, int max) { return Math.min(max, Math.max(min, value)); } + static int halfAllocatedProcessors(int allocatedProcessors) { + return (allocatedProcessors + 1) / 2; + } + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { return boundedBy((allocatedProcessors + 1) / 2, 1, 5); } @@ -639,7 +661,7 @@ public String toString() { /** * A thread to cache millisecond time values from * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. - * + * <p> * The values are updated at a specified interval. */ static class CachedTimeThread extends Thread { @@ -719,8 +741,9 @@ ExecutorService executor() { /** * The thread pool information. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Info implements Writeable, ToXContentFragment { private final String name; diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java b/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java index 5087624ec0b3f..49d961f0b506f 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPoolInfo.java @@ -32,10 +32,11 @@ package org.opensearch.threadpool; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.Collections; @@ -45,8 +46,9 @@ /** * Information about a threadpool * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPoolInfo implements ReportingService.Info, Iterable<ThreadPool.Info> { private final List<ThreadPool.Info> infos; diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java index b4d7e4a3fbf7a..968c2cc4c4887 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java @@ -32,6 +32,9 @@ package org.opensearch.threadpool; +import org.opensearch.Version; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -47,15 +50,17 @@ /** * Stats for a threadpool * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPoolStats implements Writeable, ToXContentFragment, Iterable<ThreadPoolStats.Stats> { /** * The statistics. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Stats implements Writeable, ToXContentFragment, Comparable<Stats> { private final String name; @@ -65,8 +70,9 @@ public static class Stats implements Writeable, ToXContentFragment, Comparable<S private final long rejected; private final int largest; private final long completed; + private final long waitTimeNanos; - public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed) { + public Stats(String name, int threads, int queue, int active, long rejected, int largest, long completed, long waitTimeNanos) { this.name = name; this.threads = threads; this.queue = queue; @@ -74,6 +80,7 @@ public Stats(String name, int threads, int queue, int active, long rejected, int this.rejected = rejected; this.largest = largest; this.completed = completed; + this.waitTimeNanos = waitTimeNanos; } public Stats(StreamInput in) throws IOException { @@ -84,6 +91,7 @@ public Stats(StreamInput in) throws IOException { rejected = in.readLong(); largest = in.readInt(); completed = in.readLong(); + waitTimeNanos = in.getVersion().onOrAfter(Version.V_2_11_0) ? in.readLong() : -1; } @Override @@ -95,6 +103,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(rejected); out.writeInt(largest); out.writeLong(completed); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeLong(waitTimeNanos); + } } public String getName() { @@ -125,6 +136,14 @@ public long getCompleted() { return this.completed; } + public TimeValue getWaitTime() { + return TimeValue.timeValueNanos(waitTimeNanos); + } + + public long getWaitTimeNanos() { + return waitTimeNanos; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); @@ -146,6 +165,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (completed != -1) { builder.field(Fields.COMPLETED, completed); } + if (waitTimeNanos != -1) { + if (builder.humanReadable()) { + builder.field(Fields.WAIT_TIME, getWaitTime()); + } + builder.field(Fields.WAIT_TIME_NANOS, getWaitTimeNanos()); + } builder.endObject(); return builder; } @@ -197,6 +222,8 @@ static final class Fields { static final String REJECTED = "rejected"; static final String LARGEST = "largest"; static final String COMPLETED = "completed"; + static final String WAIT_TIME = "total_wait_time"; + static final String WAIT_TIME_NANOS = "total_wait_time_in_nanos"; } @Override diff --git a/server/src/main/java/org/opensearch/transport/ActionTransportException.java b/server/src/main/java/org/opensearch/transport/ActionTransportException.java index 97e9a986db7f4..fb5dd2c75dc75 100644 --- a/server/src/main/java/org/opensearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/opensearch/transport/ActionTransportException.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java new file mode 100644 index 0000000000000..14e065d3350c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/BaseTcpTransportChannel.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +/** + * Base class TcpTransportChannel + */ +public abstract class BaseTcpTransportChannel implements TransportChannel { + private final TcpChannel channel; + + /** + * Constructor. + * @param channel tcp channel + */ + public BaseTcpTransportChannel(TcpChannel channel) { + this.channel = channel; + } + + /** + * Returns {@link TcpChannel} + * @return TcpChannel + */ + public TcpChannel getChannel() { + return channel; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/CloseableConnection.java b/server/src/main/java/org/opensearch/transport/CloseableConnection.java index b612f2bd17e0f..018575e15d8f1 100644 --- a/server/src/main/java/org/opensearch/transport/CloseableConnection.java +++ b/server/src/main/java/org/opensearch/transport/CloseableConnection.java @@ -32,8 +32,8 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; /** * Abstract Transport.Connection that provides common close logic. diff --git a/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java b/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java index 1a4854cfad8db..e634323d58269 100644 --- a/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java @@ -33,15 +33,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ListenableFuture; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import java.util.Collections; import java.util.Iterator; diff --git a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java index 1deddf93b6252..57707d3b44477 100644 --- a/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java +++ b/server/src/main/java/org/opensearch/transport/CompressibleBytesOutputStream.java @@ -32,12 +32,12 @@ package org.opensearch.transport; +import org.opensearch.common.io.Streams; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; -import org.opensearch.common.io.Streams; import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.compress.CompressorRegistry; import java.io.IOException; import java.io.OutputStream; @@ -48,11 +48,11 @@ * requires that the underlying {@link DeflaterOutputStream} be closed to write EOS bytes. However, the * {@link BytesStream} should not be closed yet, as we have not used the bytes. This class handles these * intricacies. - * + * <p> * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. - * + * <p> * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed * in the constructor must be closed individually. * @@ -68,7 +68,7 @@ final class CompressibleBytesOutputStream extends StreamOutput { this.bytesStreamOutput = bytesStreamOutput; this.shouldCompress = shouldCompress; if (shouldCompress) { - this.stream = CompressorFactory.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(bytesStreamOutput)); + this.stream = CompressorRegistry.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(bytesStreamOutput)); } else { this.stream = bytesStreamOutput; } diff --git a/server/src/main/java/org/opensearch/transport/ConnectionManager.java b/server/src/main/java/org/opensearch/transport/ConnectionManager.java index d48f2eb4e0bb1..10cfc2907098f 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionManager.java @@ -32,8 +32,8 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import java.io.Closeable; import java.util.Set; diff --git a/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java index 3b97a81faf192..1691b427ffca1 100644 --- a/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/transport/Header.java b/server/src/main/java/org/opensearch/transport/Header.java index ba4a8cfef5534..a179cfb35288e 100644 --- a/server/src/main/java/org/opensearch/transport/Header.java +++ b/server/src/main/java/org/opensearch/transport/Header.java @@ -34,8 +34,8 @@ import org.opensearch.Version; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/transport/InboundAggregator.java b/server/src/main/java/org/opensearch/transport/InboundAggregator.java index 60d45c8b3ad2b..e894331f3b64e 100644 --- a/server/src/main/java/org/opensearch/transport/InboundAggregator.java +++ b/server/src/main/java/org/opensearch/transport/InboundAggregator.java @@ -32,14 +32,14 @@ package org.opensearch.transport; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index 0dd39ed73d787..82fc09a985446 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -33,12 +33,12 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index bb04f149d39a9..a8315c3cae4e0 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -37,20 +37,30 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; /** * Handler for inbound data @@ -73,6 +83,8 @@ public class InboundHandler { private volatile long slowLogThresholdMs = Long.MAX_VALUE; + private final Tracer tracer; + InboundHandler( ThreadPool threadPool, OutboundHandler outboundHandler, @@ -80,7 +92,8 @@ public class InboundHandler { TransportHandshaker handshaker, TransportKeepAlive keepAlive, Transport.RequestHandlers requestHandlers, - Transport.ResponseHandlers responseHandlers + Transport.ResponseHandlers responseHandlers, + Tracer tracer ) { this.threadPool = threadPool; this.outboundHandler = outboundHandler; @@ -89,6 +102,7 @@ public class InboundHandler { this.keepAlive = keepAlive; this.requestHandlers = requestHandlers; this.responseHandlers = responseHandlers; + this.tracer = tracer; } void setMessageListener(TransportMessageListener listener) { @@ -107,7 +121,6 @@ void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { keepAlive.receiveKeepAlive(channel); } else { @@ -122,7 +135,6 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st final InetSocketAddress remoteAddress = channel.getRemoteAddress(); final Header header = message.getHeader(); assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext existing = threadContext.stashContext()) { // Place the context with the headers from the message @@ -164,6 +176,7 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); } } + } } finally { final long took = threadPool.relativeTimeInMillis() - startTime; @@ -179,84 +192,98 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st } } + private Map<String, Collection<String>> extractHeaders(Map<String, String> headers) { + return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); + } + private <T extends TransportRequest> void handleRequest(TcpChannel channel, Header header, InboundMessage message) throws IOException { final String action = header.getActionName(); final long requestId = header.getRequestId(); final Version version = header.getVersion(); - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { - handshaker.handleHandshake(transportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, transportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { + final Map<String, Collection<String>> headers = extractHeaders(header.getHeaders().v1()); + Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, transportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry<T> reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, transportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); - } + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, transportChannel)); + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); } } - } catch (Exception e) { - sendErrorResponse(action, transportChannel, e); + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry<T> reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; } } diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index e19e5b45025aa..a1ed682ff7d7f 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -33,10 +33,10 @@ package org.opensearch.transport; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/InboundPipeline.java b/server/src/main/java/org/opensearch/transport/InboundPipeline.java index 4a4f928860819..dd4690e5e6abf 100644 --- a/server/src/main/java/org/opensearch/transport/InboundPipeline.java +++ b/server/src/main/java/org/opensearch/transport/InboundPipeline.java @@ -33,12 +33,12 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.CompositeBytesReference; import java.io.IOException; import java.util.ArrayDeque; diff --git a/server/src/main/java/org/opensearch/transport/NetworkMessage.java b/server/src/main/java/org/opensearch/transport/NetworkMessage.java index a24dd282031fd..f02d664b65929 100644 --- a/server/src/main/java/org/opensearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/opensearch/transport/NetworkMessage.java @@ -32,8 +32,8 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.Writeable; /** * Represents a transport message sent over the network. Subclasses implement serialization and diff --git a/server/src/main/java/org/opensearch/transport/OutboundHandler.java b/server/src/main/java/org/opensearch/transport/OutboundHandler.java index 3e493267242fb..b83dbdd0effe4 100644 --- a/server/src/main/java/org/opensearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/OutboundHandler.java @@ -36,20 +36,21 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; -import org.opensearch.action.NotifyOnceListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedSupplier; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.transport.NetworkExceptionHelper; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.NotifyOnceListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/OutboundMessage.java b/server/src/main/java/org/opensearch/transport/OutboundMessage.java index d969351425acd..3dafc6fb5eea9 100644 --- a/server/src/main/java/org/opensearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/OutboundMessage.java @@ -32,13 +32,13 @@ package org.opensearch.transport; import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ThreadContext; import java.io.IOException; import java.util.Set; diff --git a/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java b/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java index 53d71c135a8dd..ff9ca8b189904 100644 --- a/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java +++ b/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java @@ -34,8 +34,9 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.util.concurrent.BaseFuture; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.ExecutionException; diff --git a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java index b1eefb9fac245..b4477edaba687 100644 --- a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java @@ -34,18 +34,18 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -309,7 +309,7 @@ public void onResponse(Void v) { @Override public void onFailure(Exception e) { - logger.debug( + logger.error( new ParameterizedMessage( "failed to open remote connection [remote cluster: {}, address: {}]", clusterAlias, diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/opensearch/transport/RemoteClusterAwareClient.java index ac79b21d73f69..0389c76d1ade6 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterAwareClient.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterAwareClient.java @@ -31,15 +31,15 @@ package org.opensearch.transport; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.client.Client; import org.opensearch.client.support.AbstractClient; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index d95c0c647916b..8a5f6dfffb036 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -31,18 +31,18 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; @@ -54,10 +54,10 @@ * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. - * + * <p> * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. - * + * <p> * In the case of a disconnection, this class will issue a re-connect task to establish at most * {@link SniffConnectionStrategy#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. @@ -123,7 +123,7 @@ void ensureConnected(ActionListener<Void> listener) { /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns <code>null</code> if the node ID is not found. - * + * <p> * The requests to get cluster state on the connected cluster are made in the system context because logically * they are equivalent to checking a single detail in the local cluster state and should not require that the * user who made the request that is using this method in its implementation is authorized to view the entire diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java index 60e166a4e300c..87786fb22f22e 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterService.java @@ -34,14 +34,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.IndicesOptions; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -49,6 +47,8 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; @@ -270,7 +270,7 @@ protected void updateRemoteCluster(String clusterAlias, Settings settings) { // are on the cluster state thread and our custom future implementation will throw an // assertion. if (latch.await(10, TimeUnit.SECONDS) == false) { - logger.warn("failed to connect to new remote cluster {} within {}", clusterAlias, TimeValue.timeValueSeconds(10)); + logger.error("failed to connect to new remote cluster {} within {}", clusterAlias, TimeValue.timeValueSeconds(10)); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index 9271fd36b8cbe..280dd958358fd 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -32,10 +32,10 @@ package org.opensearch.transport; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java index 4da23175c4a0e..bd646f10df517 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java @@ -32,8 +32,8 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.action.ActionListener; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java index 76afeb20d18cf..f0b37c617725e 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionStrategy.java @@ -36,15 +36,15 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ContextPreservingActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/transport/RemoteTransportException.java b/server/src/main/java/org/opensearch/transport/RemoteTransportException.java index 041a70795b8de..de3d6bb9d775e 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteTransportException.java +++ b/server/src/main/java/org/opensearch/transport/RemoteTransportException.java @@ -34,7 +34,7 @@ import org.opensearch.OpenSearchWrapperException; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index bd1ccda2cbe30..98c182c562928 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -32,11 +32,11 @@ package org.opensearch.transport; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; @@ -91,14 +91,14 @@ public void processMessageReceived(Request request, TransportChannel channel) th Releasable unregisterTask = () -> taskManager.unregister(task); try { - if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { + if (channel instanceof BaseTcpTransportChannel && task instanceof CancellableTask) { if (request instanceof ShardSearchRequest) { // on receiving request, update the inbound network time to reflect time spent in transit over the network ((ShardSearchRequest) request).setInboundNetworkTime( Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) ); } - final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final TcpChannel tcpChannel = ((BaseTcpTransportChannel) channel).getChannel(); final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (CancellableTask) task); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } diff --git a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java index 5e00704b3baaf..07ba96b135189 100644 --- a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.admin.cluster.state.ClusterStateAction; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; @@ -45,16 +44,17 @@ import org.opensearch.common.Booleans; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 4dceee4c48d4d..4dab0039ec878 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -34,8 +34,10 @@ import org.opensearch.Version; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * Transport channel for tasks @@ -88,4 +90,9 @@ public Version getVersion() { public TransportChannel getChannel() { return channel; } + + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index 2e13fb49dcd15..f98b65d0a4df1 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -32,12 +32,13 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; +import java.util.Optional; /** * This is a tcp channel representing a single channel connection to another node. It is the base channel @@ -96,6 +97,20 @@ public interface TcpChannel extends CloseableChannel { */ ChannelStats getChannelStats(); + /** + * Returns the contextual property associated with this specific TCP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default <T> Optional<T> get(String name, Class<T> clazz) { + return Optional.empty(); + } + /** * Channel statistics * diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 75676307f4026..d0e6516973382 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -36,17 +36,11 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Booleans; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.network.NetworkAddress; @@ -54,20 +48,27 @@ import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.Node; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -159,7 +160,8 @@ public TcpTransport( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { this.settings = settings; this.profileSettings = getProfileSettings(settings); @@ -208,7 +210,8 @@ public TcpTransport( handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + tracer ); } diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index e25003648794d..81de0af07ea7c 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -34,9 +34,11 @@ import org.opensearch.Version; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -45,11 +47,10 @@ * * @opensearch.internal */ -public final class TcpTransportChannel implements TransportChannel { +public final class TcpTransportChannel extends BaseTcpTransportChannel { private final AtomicBoolean released = new AtomicBoolean(); private final OutboundHandler outboundHandler; - private final TcpChannel channel; private final String action; private final long requestId; private final Version version; @@ -69,9 +70,9 @@ public final class TcpTransportChannel implements TransportChannel { boolean isHandshake, Releasable breakerRelease ) { + super(channel); this.version = version; this.features = features; - this.channel = channel; this.outboundHandler = outboundHandler; this.action = action; this.requestId = requestId; @@ -82,7 +83,7 @@ public final class TcpTransportChannel implements TransportChannel { @Override public String getProfileName() { - return channel.getProfile(); + return getChannel().getProfile(); } @Override @@ -92,7 +93,7 @@ public void sendResponse(TransportResponse response) throws IOException { // update outbound network time with current time before sending response over network ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); } - outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, isHandshake); + outboundHandler.sendResponse(version, features, getChannel(), requestId, action, response, compressResponse, isHandshake); } finally { release(false); } @@ -101,7 +102,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(Exception exception) throws IOException { try { - outboundHandler.sendErrorResponse(version, features, channel, requestId, action, exception); + outboundHandler.sendErrorResponse(version, features, getChannel(), requestId, action, exception); } finally { release(true); } @@ -130,7 +131,8 @@ public Version getVersion() { return version; } - public TcpChannel getChannel() { - return channel; + @Override + public <T> Optional<T> get(String name, Class<T> clazz) { + return getChannel().get(name, clazz); } } diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index fbf6b3ef350d8..8abedff37db14 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -33,15 +33,17 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; -import org.opensearch.common.component.LifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; import java.io.Closeable; import java.io.IOException; @@ -110,7 +112,10 @@ default boolean isSecure() { /** * A unidirectional connection to a {@link DiscoveryNode} + * + * @opensearch.api */ + @PublicApi(since = "1.0.0") interface Connection extends Closeable { /** * The node this connection is associated with diff --git a/server/src/main/java/org/opensearch/transport/TransportActionProxy.java b/server/src/main/java/org/opensearch/transport/TransportActionProxy.java index 8ad6010800ad8..a61aec8a34e20 100644 --- a/server/src/main/java/org/opensearch/transport/TransportActionProxy.java +++ b/server/src/main/java/org/opensearch/transport/TransportActionProxy.java @@ -35,6 +35,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index b660db029c0b2..f84ee5dc745c3 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -36,8 +36,10 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * A transport channel allows to send a response to a request on the channel. @@ -77,4 +79,18 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans ); } } + + /** + * Returns the contextual property associated with this specific transport channel (the + * implementation of how such properties are managed depends on the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property. + */ + default <T> Optional<T> get(String name, Class<T> clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportDecompressor.java b/server/src/main/java/org/opensearch/transport/TransportDecompressor.java index 3e53c10c6a069..8fbc3b7ce6803 100644 --- a/server/src/main/java/org/opensearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/opensearch/transport/TransportDecompressor.java @@ -34,13 +34,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.compress.Compressor; -import org.opensearch.common.compress.CompressorFactory; import org.opensearch.common.recycler.Recycler; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.CompressorRegistry; import java.io.Closeable; import java.io.IOException; @@ -70,7 +70,7 @@ public TransportDecompressor(PageCacheRecycler recycler) { public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasReadHeader == false) { - final Compressor compressor = CompressorFactory.defaultCompressor(); + final Compressor compressor = CompressorRegistry.defaultCompressor(); if (compressor.isCompressed(bytesReference) == false) { int maxToRead = Math.min(bytesReference.length(), 10); StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead) @@ -137,7 +137,7 @@ public int decompress(BytesReference bytesReference) throws IOException { } public boolean canDecompress(int bytesAvailable) { - return hasReadHeader || bytesAvailable >= CompressorFactory.defaultCompressor().headerLength(); + return hasReadHeader || bytesAvailable >= CompressorRegistry.defaultCompressor().headerLength(); } public boolean isEOS() { diff --git a/server/src/main/java/org/opensearch/transport/TransportException.java b/server/src/main/java/org/opensearch/transport/TransportException.java index 1522f2378bca3..5e557a77ff704 100644 --- a/server/src/main/java/org/opensearch/transport/TransportException.java +++ b/server/src/main/java/org/opensearch/transport/TransportException.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -40,8 +41,9 @@ /** * Thrown for any transport errors * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TransportException extends OpenSearchException { public TransportException(Throwable cause) { super(cause); diff --git a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java index 7b64b328469ad..d0b00ec9c59db 100644 --- a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java @@ -32,14 +32,15 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; diff --git a/server/src/main/java/org/opensearch/transport/TransportInfo.java b/server/src/main/java/org/opensearch/transport/TransportInfo.java index 0ee8672e8df8c..75179f7932238 100644 --- a/server/src/main/java/org/opensearch/transport/TransportInfo.java +++ b/server/src/main/java/org/opensearch/transport/TransportInfo.java @@ -33,14 +33,14 @@ package org.opensearch.transport; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.node.ReportingService; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java index f4b003cae4864..e8efbeb7de3f9 100644 --- a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java +++ b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java @@ -34,6 +34,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.Writeable.Reader; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; /** * This interface allows plugins to intercept requests on both the sender and the receiver side. @@ -56,6 +58,19 @@ default <T extends TransportRequest> TransportRequestHandler<T> interceptHandler return actualHandler; } + /** + * This is called for handlers that needs admission control support + */ + default <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler, + AdmissionControlActionType admissionControlActionType + ) { + return interceptHandler(action, executor, forceExecution, actualHandler); + } + /** * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. * The returned sender is used to send all requests that come in via diff --git a/server/src/main/java/org/opensearch/transport/TransportKeepAlive.java b/server/src/main/java/org/opensearch/transport/TransportKeepAlive.java index e48267d943739..bbf4a9b668d5e 100644 --- a/server/src/main/java/org/opensearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/opensearch/transport/TransportKeepAlive.java @@ -34,15 +34,15 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.common.AsyncBiFunction; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.component.Lifecycle; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractLifecycleRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.threadpool.ThreadPool; import java.io.Closeable; diff --git a/server/src/main/java/org/opensearch/transport/TransportLogger.java b/server/src/main/java/org/opensearch/transport/TransportLogger.java index d7d00a20964f8..997b3bb5ba18e 100644 --- a/server/src/main/java/org/opensearch/transport/TransportLogger.java +++ b/server/src/main/java/org/opensearch/transport/TransportLogger.java @@ -34,12 +34,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.compress.CompressorRegistry; import java.io.IOException; @@ -179,7 +179,7 @@ private static String format(TcpChannel channel, InboundMessage message, String private static StreamInput decompressingStream(byte status, StreamInput streamInput) throws IOException { if (TransportStatus.isCompress(status) && streamInput.available() > 0) { try { - return new InputStreamStreamInput(CompressorFactory.defaultCompressor().threadLocalInputStream(streamInput)); + return new InputStreamStreamInput(CompressorRegistry.defaultCompressor().threadLocalInputStream(streamInput)); } catch (IllegalArgumentException e) { throw new IllegalStateException("stream marked as compressed, but is missing deflate header"); } diff --git a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java index 8a7612d3bd99a..dfcd7acce3706 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java @@ -32,6 +32,7 @@ package org.opensearch.transport; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.transport.TransportResponse; /** * Listens for transport messages diff --git a/server/src/main/java/org/opensearch/transport/TransportRequest.java b/server/src/main/java/org/opensearch/transport/TransportRequest.java index 95b038303f530..c62cf59d3be2f 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequest.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequest.java @@ -32,18 +32,21 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportMessage; import org.opensearch.tasks.TaskAwareRequest; -import org.opensearch.tasks.TaskId; import java.io.IOException; /** * A transport request * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class TransportRequest extends TransportMessage implements TaskAwareRequest { /** * Empty transport request diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java index eec0310557690..9746c5f5cb94e 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestDeduplicator.java @@ -32,8 +32,8 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/transport/TransportRequestOptions.java b/server/src/main/java/org/opensearch/transport/TransportRequestOptions.java index fe54d8185ea5f..9f44d93f0cd71 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequestOptions.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequestOptions.java @@ -32,13 +32,15 @@ package org.opensearch.transport; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; /** * Options for transport requests * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TransportRequestOptions { private final TimeValue timeout; @@ -62,8 +64,9 @@ public Type type() { /** * Type of transport request * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Type { RECOVERY, BULK, @@ -79,8 +82,9 @@ public static Builder builder() { /** * Builder for transport request options * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { private TimeValue timeout; private Type type = Type.REG; diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 674a62fb75b7d..8992af18edb48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -34,6 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.function.Function; @@ -51,6 +52,13 @@ public interface TransportResponseHandler<T extends TransportResponse> extends W String executor(); + /** + * This method should be handling the rejection/failure scenarios where connection to the node is rejected or failed. + * It should be used to clear up the resources held by the {@link TransportResponseHandler}. + * @param exp exception + */ + default void handleRejection(Exception exp) {} + default <Q extends TransportResponse> TransportResponseHandler<Q> wrap(Function<Q, T> converter, Writeable.Reader<Q> reader) { final TransportResponseHandler<T> self = this; return new TransportResponseHandler<Q>() { diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index b8d7d130e846b..652d57f4c5348 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -37,36 +37,41 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchServerException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Streamables; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.logging.Loggers; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.core.xcontent.MediaTypeParserRegistry; +import org.opensearch.core.service.ReportingService; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.NodeClosedException; -import org.opensearch.node.ReportingService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.handler.TraceableTransportResponseHandler; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -133,11 +138,11 @@ protected boolean removeEldestEntry(Map.Entry eldest) { // tracer log private final Logger tracerLog; - volatile String[] tracerLogInclude; volatile String[] tracerLogExclude; private final RemoteClusterService remoteClusterService; + private final Tracer tracer; /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -166,15 +171,13 @@ public void close() {} }; static { - /** - * Registers server specific types as a streamables for serialization - * over the {@link StreamOutput} and {@link StreamInput} wire + /* + Registers server specific types as a streamables for serialization + over the {@link StreamOutput} and {@link StreamInput} wire */ Streamables.registerStreamables(); - /** Registers OpenSearch server specific exceptions (exceptions outside of core library) */ + /* Registers OpenSearch server specific exceptions (exceptions outside of core library) */ OpenSearchServerException.registerExceptions(); - // set the default media type to JSON (fallback if a media type is not specified) - MediaTypeParserRegistry.setDefaultMediaType(XContentType.JSON); } /** does nothing. easy way to ensure class is loaded so the above static block is called to register the streamables */ @@ -193,7 +196,8 @@ public TransportService( TransportInterceptor transportInterceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { this( settings, @@ -203,7 +207,8 @@ public TransportService( localNodeFactory, clusterSettings, taskHeaders, - new ClusterConnectionManager(settings, transport) + new ClusterConnectionManager(settings, transport), + tracer ); } @@ -215,7 +220,8 @@ public TransportService( Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, Set<String> taskHeaders, - ConnectionManager connectionManager + ConnectionManager connectionManager, + Tracer tracer ) { this.transport = transport; transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings)); @@ -230,6 +236,7 @@ public TransportService( this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); + this.tracer = tracer; remoteClusterService = new RemoteClusterService(settings, this); responseHandlers = transport.getResponseHandlers(); if (clusterSettings != null) { @@ -331,6 +338,7 @@ protected void doStop() { getExecutorService().execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + holderToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -343,6 +351,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + holderToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", @@ -859,53 +868,10 @@ public final <T extends TransportResponse> void sendRequest( final TransportRequestOptions options, final TransportResponseHandler<T> handler ) { - try { - logger.debug("Action: " + action); - final TransportResponseHandler<T> delegate; - if (request.getParentTask().isSet()) { - // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. - final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); - delegate = new TransportResponseHandler<T>() { - @Override - public void handleResponse(T response) { - unregisterChildNode.close(); - handler.handleResponse(response); - } - - @Override - public void handleException(TransportException exp) { - unregisterChildNode.close(); - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - public String toString() { - return getClass().getName() + "/[" + action + "]:" + handler.toString(); - } - }; - } else { - delegate = handler; - } - asyncSender.sendRequest(connection, action, request, options, delegate); - } catch (final Exception ex) { - // the caller might not handle this so we invoke the handler - final TransportException te; - if (ex instanceof TransportException) { - te = (TransportException) ex; - } else { - te = new TransportException("failure to send", ex); - } - handler.handleException(te); + final Span span = tracer.startSpan(SpanBuilder.from(action, connection)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + TransportResponseHandler<T> traceableTransportResponseHandler = TraceableTransportResponseHandler.create(handler, span, tracer); + sendRequestAsync(connection, action, request, options, traceableTransportResponseHandler); } } @@ -1015,6 +981,7 @@ private <T extends TransportResponse> void sendRequestInternal( threadPool.executor(executor).execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + contextToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -1027,6 +994,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + contextToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", @@ -1137,7 +1105,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost "cluster:admin", "cluster:monitor", "cluster:internal", - "internal:" + "internal:", + "views:" ) ) ); @@ -1224,6 +1193,40 @@ public <Request extends TransportRequest> void registerRequestHandler( transport.registerRequestHandler(reg); } + /** + * Registers a new request handler with admission control support + * + * @param action The action the request handler is associated with + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param canTripCircuitBreaker Check the request size and raise an exception in case the limit is breached. + * @param admissionControlActionType Admission control based on resource usage limits of provided action type + * @param requestReader The request class that will be used to construct new instances for streaming + * @param handler The handler itself that implements the request handling + */ + public <Request extends TransportRequest> void registerRequestHandler( + String action, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker, + AdmissionControlActionType admissionControlActionType, + Writeable.Reader<Request> requestReader, + TransportRequestHandler<Request> handler + ) { + validateActionName(action); + handler = interceptor.interceptHandler(action, executor, forceExecution, handler, admissionControlActionType); + RequestHandlerRegistry<Request> reg = new RequestHandlerRegistry<>( + action, + requestReader, + taskManager, + handler, + executor, + forceExecution, + canTripCircuitBreaker + ); + transport.registerRequestHandler(reg); + } + /** * called by the {@link Transport} implementation when an incoming request arrives but before * any parsing of it has happened (with the exception of the requestId and action) @@ -1673,4 +1676,61 @@ public void onResponseReceived(long requestId, Transport.ResponseContext holder) } } } + + private <T extends TransportResponse> void sendRequestAsync( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + final TransportResponseHandler<T> handler + ) { + try { + logger.debug("Action: " + action); + final TransportResponseHandler<T> delegate; + if (request.getParentTask().isSet()) { + // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. + final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); + delegate = new TransportResponseHandler<T>() { + @Override + public void handleResponse(T response) { + unregisterChildNode.close(); + handler.handleResponse(response); + } + + @Override + public void handleException(TransportException exp) { + unregisterChildNode.close(); + handler.handleException(exp); + } + + @Override + public String executor() { + return handler.executor(); + } + + @Override + public T read(StreamInput in) throws IOException { + return handler.read(in); + } + + @Override + public String toString() { + return getClass().getName() + "/[" + action + "]:" + handler.toString(); + } + }; + } else { + delegate = handler; + } + asyncSender.sendRequest(connection, action, request, options, delegate); + } catch (final Exception ex) { + // the caller might not handle this so we invoke the handler + final TransportException te; + if (ex instanceof TransportException) { + te = (TransportException) ex; + } else { + te = new TransportException("failure to send", ex); + } + handler.handleException(te); + } + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportSettings.java b/server/src/main/java/org/opensearch/transport/TransportSettings.java index b4d72ab40409b..23305c58ef834 100644 --- a/server/src/main/java/org/opensearch/transport/TransportSettings.java +++ b/server/src/main/java/org/opensearch/transport/TransportSettings.java @@ -35,8 +35,8 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import java.util.Arrays; import java.util.List; diff --git a/server/src/main/java/org/opensearch/transport/TransportStats.java b/server/src/main/java/org/opensearch/transport/TransportStats.java index 29544754a5729..e3c4773f4a472 100644 --- a/server/src/main/java/org/opensearch/transport/TransportStats.java +++ b/server/src/main/java/org/opensearch/transport/TransportStats.java @@ -35,7 +35,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/watcher/FileWatcher.java b/server/src/main/java/org/opensearch/watcher/FileWatcher.java index c27ade408e3af..d773e3b5d7c9e 100644 --- a/server/src/main/java/org/opensearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/FileWatcher.java @@ -33,8 +33,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.common.util.CollectionUtils; import java.io.IOException; import java.nio.file.Files; @@ -44,7 +44,7 @@ /** * File resources watcher - * + * <p> * The file watcher checks directory and all its subdirectories for file changes and notifies its listeners accordingly * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java index 0c656c52388df..a54ba60815883 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcher.java @@ -31,6 +31,8 @@ package org.opensearch.watcher; +import org.opensearch.common.annotation.PublicApi; + import java.io.IOException; /** @@ -39,8 +41,9 @@ * Different resource watchers can be registered with {@link ResourceWatcherService} to be called * periodically in order to check for changes in different external resources. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ResourceWatcher { /** * Called once when the resource watcher is added to {@link ResourceWatcherService} diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java index 5ac1b735f72ae..519d7c6e68a57 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java @@ -33,12 +33,13 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.Scheduler.Cancellable; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import java.io.Closeable; @@ -48,22 +49,24 @@ /** * Generic resource watcher service - * + * <p> * Other opensearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)} * method. This service will call {@link org.opensearch.watcher.ResourceWatcher#checkAndNotify()} method of all * registered watcher periodically. The frequency of checks can be specified using {@code resource.reload.interval} setting, which * defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResourceWatcherService implements Closeable { private static final Logger logger = LogManager.getLogger(ResourceWatcherService.class); /** * Frequency level to watch. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Frequency { /** diff --git a/server/src/main/java/org/opensearch/watcher/WatcherHandle.java b/server/src/main/java/org/opensearch/watcher/WatcherHandle.java index 6890364cd0cd6..dd8f898e11860 100644 --- a/server/src/main/java/org/opensearch/watcher/WatcherHandle.java +++ b/server/src/main/java/org/opensearch/watcher/WatcherHandle.java @@ -32,11 +32,14 @@ package org.opensearch.watcher; +import org.opensearch.common.annotation.PublicApi; + /** * Handle to a watcher * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class WatcherHandle<W extends ResourceWatcher> { private final ResourceWatcherService.ResourceMonitor monitor; diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec deleted file mode 100644 index 8b37d91cd8bc4..0000000000000 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ /dev/null @@ -1,2 +0,0 @@ -org.opensearch.index.codec.customcodecs.ZstdCodec -org.opensearch.index.codec.customcodecs.ZstdNoDictCodec diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 2c92f0ecd3f51..80b1d25064885 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1 +1,2 @@ org.apache.lucene.search.suggest.document.Completion50PostingsFormat +org.opensearch.index.codec.fuzzy.FuzzyFilterPostingsFormat diff --git a/server/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider b/server/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider new file mode 100644 index 0000000000000..8d93d45035f3f --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.opensearch.core.compress.spi.CompressorProvider @@ -0,0 +1,9 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +org.opensearch.common.compress.spi.ServerCompressorProvider diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 77cd0ab05278e..e1226345ef961 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -187,5 +187,4 @@ grant { permission java.io.FilePermission "/sys/fs/cgroup/cpuacct/-", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory", "read"; permission java.io.FilePermission "/sys/fs/cgroup/memory/-", "read"; - }; diff --git a/server/src/test/java/org/opensearch/BuildTests.java b/server/src/test/java/org/opensearch/BuildTests.java index a7daa5d901109..8a09278f58559 100644 --- a/server/src/test/java/org/opensearch/BuildTests.java +++ b/server/src/test/java/org/opensearch/BuildTests.java @@ -32,12 +32,12 @@ package org.opensearch; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.InputStream; diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 2656810af0379..d7026159d9ec0 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -40,6 +40,8 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.TimestampParsingException; +import org.opensearch.action.admin.indices.view.ViewAlreadyExistsException; +import org.opensearch.action.admin.indices.view.ViewNotFoundException; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; @@ -50,6 +52,7 @@ import org.opensearch.cluster.block.IndexCreateBlockException; import org.opensearch.cluster.coordination.CoordinationStateRejectedException; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; +import org.opensearch.cluster.coordination.NodeHealthCheckFailureException; import org.opensearch.cluster.decommission.DecommissioningFailedException; import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.node.DiscoveryNode; @@ -61,28 +64,31 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.routing.UnsupportedWeightedRoutingStateException; import org.opensearch.cluster.service.ClusterManagerThrottlingException; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.util.CancellableThreadsTests; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.CancellableThreadsTests; -import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.snapshots.IndexShardSnapshotException; import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentLocation; +import org.opensearch.crypto.CryptoRegistryException; import org.opensearch.discovery.MasterNotDiscoveredException; import org.opensearch.env.ShardLockObtainFailedException; -import org.opensearch.core.index.Index; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.query.QueryShardException; import org.opensearch.index.seqno.RetentionLeaseAlreadyExistsException; @@ -91,7 +97,6 @@ import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.PrimaryShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.indices.InvalidIndexTemplateException; @@ -99,9 +104,7 @@ import org.opensearch.indices.recovery.RecoverFilesRecoveryException; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.ingest.IngestProcessorException; -import org.opensearch.cluster.coordination.NodeHealthCheckFailureException; import org.opensearch.repositories.RepositoryException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; import org.opensearch.search.SearchContextMissingException; import org.opensearch.search.SearchException; @@ -545,12 +548,12 @@ public void testNotSerializableExceptionWrapper() throws IOException { NotSerializableExceptionWrapper ex = serialize(new NotSerializableExceptionWrapper(new NullPointerException())); assertEquals( "{\"type\":\"null_pointer_exception\",\"reason\":\"null_pointer_exception: null\"}", - Strings.toString(XContentType.JSON, ex) + Strings.toString(MediaTypeRegistry.JSON, ex) ); ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException("nono!"))); assertEquals( "{\"type\":\"illegal_argument_exception\",\"reason\":\"illegal_argument_exception: nono!\"}", - Strings.toString(XContentType.JSON, ex) + Strings.toString(MediaTypeRegistry.JSON, ex) ); class UnknownException extends Exception { @@ -852,7 +855,7 @@ public void testIds() { ids.put(130, org.opensearch.action.NoShardAvailableActionException.class); ids.put(131, org.opensearch.action.UnavailableShardsException.class); ids.put(132, org.opensearch.index.engine.FlushFailedEngineException.class); - ids.put(133, org.opensearch.common.breaker.CircuitBreakingException.class); + ids.put(133, org.opensearch.core.common.breaker.CircuitBreakingException.class); ids.put(134, org.opensearch.transport.NodeNotConnectedException.class); ids.put(135, org.opensearch.index.mapper.StrictDynamicMappingException.class); ids.put(136, org.opensearch.action.support.replication.TransportReplicationAction.RetryOnReplicaException.class); @@ -865,7 +868,7 @@ public void testIds() { ids.put(143, org.opensearch.script.ScriptException.class); ids.put(144, org.opensearch.cluster.NotClusterManagerException.class); ids.put(145, org.opensearch.OpenSearchStatusException.class); - ids.put(146, org.opensearch.tasks.TaskCancelledException.class); + ids.put(146, org.opensearch.core.tasks.TaskCancelledException.class); ids.put(147, org.opensearch.env.ShardLockObtainFailedException.class); ids.put(148, null); ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); @@ -890,6 +893,9 @@ public void testIds() { ids.put(168, PreferenceBasedSearchNotAllowedException.class); ids.put(169, NodeWeighedAwayException.class); ids.put(170, SearchPipelineProcessingException.class); + ids.put(171, CryptoRegistryException.class); + ids.put(172, ViewNotFoundException.class); + ids.put(173, ViewAlreadyExistsException.class); ids.put(10001, IndexCreateBlockException.class); Map<Class<? extends OpenSearchException>, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java index f5513271b8479..e2b22ccbba768 100644 --- a/server/src/test/java/org/opensearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/opensearch/ExceptionsHelperTests.java @@ -33,18 +33,19 @@ package org.opensearch; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.commons.codec.DecoderException; import org.apache.lucene.index.CorruptIndexException; import org.opensearch.action.OriginalIndices; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.ParsingException; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; -import org.opensearch.index.query.QueryShardException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.query.QueryShardException; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.RemoteClusterAware; diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index ca94462160f23..d83539a2fef61 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -43,34 +43,35 @@ import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.Tuple; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.QueryShardException; import org.opensearch.index.shard.IndexShardRecoveringException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.NodeClosedException; import org.opensearch.repositories.RepositoryException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.ScriptException; import org.opensearch.search.SearchContextMissingException; import org.opensearch.search.SearchParseException; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.snapshots.ConcurrentSnapshotExecutionException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.RemoteTransportException; @@ -119,6 +120,9 @@ public void testStatus() { exception = new RemoteTransportException("test", new IllegalStateException("foobar")); assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + + exception = new ConcurrentSnapshotExecutionException("testRepo", "testSnap", "test"); + assertSame(exception.status(), RestStatus.CONFLICT); } public void testGuessRootCause() { @@ -250,7 +254,7 @@ public void testDeduplicate() throws IOException { String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\"," + "\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":" + "{\"type\":\"parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2}}]}"; - assertEquals(expected, Strings.toString(builder)); + assertEquals(expected, builder.toString()); } { ShardSearchFailure failure = new ShardSearchFailure( @@ -279,7 +283,7 @@ public void testDeduplicate() throws IOException { + "\"reason\":{\"type\":\"parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2}},{\"shard\":1," + "\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"query_shard_exception\",\"reason\":\"foobar\"," + "\"index\":\"foo1\",\"index_uuid\":\"_na_\"}}]}"; - assertEquals(expected, Strings.toString(builder)); + assertEquals(expected, builder.toString()); } { ShardSearchFailure failure = new ShardSearchFailure( @@ -306,7 +310,7 @@ public void testDeduplicate() throws IOException { + "\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\"," + "\"reason\":{\"type\":\"parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2}}]," + "\"caused_by\":{\"type\":\"null_pointer_exception\",\"reason\":null}}"; - assertEquals(expected, Strings.toString(builder)); + assertEquals(expected, builder.toString()); } } @@ -412,11 +416,11 @@ public void testToXContent() throws IOException { Collections.singletonMap(OpenSearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false") ); String actual; - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent())) { builder.startObject(); e.toXContent(builder, params); builder.endObject(); - actual = Strings.toString(builder); + actual = builder.toString(); } assertThat( actual, @@ -449,8 +453,8 @@ public void testGenerateThrowableToXContent() throws IOException { { // test equivalence OpenSearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); - String toXContentString = Strings.toString(XContentType.JSON, ex); - String throwableString = Strings.toString(XContentType.JSON, (builder, params) -> { + String toXContentString = Strings.toString(MediaTypeRegistry.JSON, ex); + String throwableString = Strings.toString(MediaTypeRegistry.JSON, (builder, params) -> { OpenSearchException.generateThrowableXContent(builder, params, ex); return builder; }); @@ -515,7 +519,7 @@ public void testToXContentWithHeadersAndMetadata() throws IOException { assertExceptionAsJson(e, expectedJson); OpenSearchException parsed; - try (XContentParser parser = createParser(XContentType.JSON.xContent(), expectedJson)) { + try (XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), expectedJson)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); parsed = OpenSearchException.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); @@ -972,13 +976,13 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { /** * Builds a {@link ToXContent} using a JSON XContentBuilder and compares the result to the given json in string format. - * + * <p> * By default, the stack trace of the exception is not rendered. The parameter `errorTrace` forces the stack trace to * be rendered like the REST API does when the "error_trace" parameter is set to true. */ private static void assertToXContentAsJson(ToXContent e, String expectedJson) throws IOException { - BytesReference actual = XContentHelper.toXContent(e, XContentType.JSON, randomBoolean()); - assertToXContentEquivalent(new BytesArray(expectedJson), actual, XContentType.JSON); + BytesReference actual = org.opensearch.core.xcontent.XContentHelper.toXContent(e, MediaTypeRegistry.JSON, randomBoolean()); + assertToXContentEquivalent(new BytesArray(expectedJson), actual, MediaTypeRegistry.JSON); } private static void assertExceptionAsJson(Exception e, String expectedJson) throws IOException { diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 521736cc75cba..56fac9619e213 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -49,8 +49,8 @@ import java.util.Map; import java.util.Set; -import static org.opensearch.Version.V_2_3_0; import static org.opensearch.Version.MASK; +import static org.opensearch.Version.V_2_3_0; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.randomOpenSearchVersion; import static org.opensearch.test.VersionUtils.randomVersion; diff --git a/server/src/test/java/org/opensearch/action/ActionModuleTests.java b/server/src/test/java/org/opensearch/action/ActionModuleTests.java index 109c60aa1e4f1..8479f011adf48 100644 --- a/server/src/test/java/org/opensearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/opensearch/action/ActionModuleTests.java @@ -32,7 +32,6 @@ package org.opensearch.action; -import java.util.ArrayList; import org.opensearch.action.main.MainAction; import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.support.ActionFilters; @@ -46,11 +45,12 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.identity.IdentityService; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.ActionPlugin.ActionHandler; - import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; @@ -65,6 +65,7 @@ import org.opensearch.usage.UsageService; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.function.Supplier; @@ -142,7 +143,7 @@ public void testSetupRestHandlerContainsKnownBuiltin() throws IOException { usageService, null, new IdentityService(Settings.EMPTY, new ArrayList<>()), - new ExtensionsManager(Set.of()) + new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())) ); actionModule.initRestHandlers(null); // At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail diff --git a/server/src/test/java/org/opensearch/action/ActionTests.java b/server/src/test/java/org/opensearch/action/ActionTests.java index 9ed9ecffb134d..49cf15065ff45 100644 --- a/server/src/test/java/org/opensearch/action/ActionTests.java +++ b/server/src/test/java/org/opensearch/action/ActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action; +import org.opensearch.core.action.ActionResponse; import org.opensearch.test.OpenSearchTestCase; public class ActionTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java b/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java index 46b4eae77f99b..f432d13b10604 100644 --- a/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/DocWriteResponseTests.java @@ -34,14 +34,14 @@ import org.opensearch.action.DocWriteResponse.Result; import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index c06791f0f0245..20c2b1f17124c 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -12,13 +12,15 @@ import org.opensearch.action.main.MainAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportAction; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.extensions.action.ExtensionAction; import org.opensearch.extensions.action.ExtensionTransportAction; +import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestRequest; -import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 23b1a8d8462cb..c7e1039686cc9 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -8,28 +8,47 @@ package org.opensearch.action; -import org.junit.After; import org.opensearch.OpenSearchParseException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.action.admin.cluster.RestCleanupRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestClusterGetSettingsAction; import org.opensearch.rest.action.admin.cluster.RestClusterHealthAction; import org.opensearch.rest.action.admin.cluster.RestClusterRerouteAction; import org.opensearch.rest.action.admin.cluster.RestClusterStateAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; +import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.opensearch.rest.action.admin.cluster.RestGetSnapshotsAction; +import org.opensearch.rest.action.admin.cluster.RestGetStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; +import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; +import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; import org.opensearch.rest.action.admin.cluster.dangling.RestDeleteDanglingIndexAction; import org.opensearch.rest.action.admin.cluster.dangling.RestImportDanglingIndexAction; import org.opensearch.rest.action.admin.indices.RestAddIndexBlockAction; import org.opensearch.rest.action.admin.indices.RestCloseIndexAction; import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; +import org.opensearch.rest.action.admin.indices.RestDeleteComponentTemplateAction; +import org.opensearch.rest.action.admin.indices.RestDeleteComposableIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestDeleteIndexAction; +import org.opensearch.rest.action.admin.indices.RestDeleteIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetComponentTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetComposableIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestGetIndicesAction; import org.opensearch.rest.action.admin.indices.RestGetMappingAction; import org.opensearch.rest.action.admin.indices.RestGetSettingsAction; @@ -37,61 +56,42 @@ import org.opensearch.rest.action.admin.indices.RestIndexPutAliasAction; import org.opensearch.rest.action.admin.indices.RestIndicesAliasesAction; import org.opensearch.rest.action.admin.indices.RestOpenIndexAction; -import org.opensearch.rest.action.admin.indices.RestPutMappingAction; -import org.opensearch.rest.action.admin.indices.RestResizeHandler; -import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; -import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction; -import org.opensearch.rest.action.admin.indices.RestDeleteComponentTemplateAction; -import org.opensearch.rest.action.admin.indices.RestDeleteComposableIndexTemplateAction; -import org.opensearch.rest.action.admin.indices.RestDeleteIndexTemplateAction; -import org.opensearch.rest.action.admin.indices.RestGetComponentTemplateAction; -import org.opensearch.rest.action.admin.indices.RestGetComposableIndexTemplateAction; -import org.opensearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestPutComponentTemplateAction; import org.opensearch.rest.action.admin.indices.RestPutComposableIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.opensearch.rest.action.admin.indices.RestPutMappingAction; +import org.opensearch.rest.action.admin.indices.RestResizeHandler; +import org.opensearch.rest.action.admin.indices.RestRolloverIndexAction; import org.opensearch.rest.action.admin.indices.RestSimulateIndexTemplateAction; import org.opensearch.rest.action.admin.indices.RestSimulateTemplateAction; -import org.opensearch.rest.action.admin.cluster.RestCleanupRepositoryAction; -import org.opensearch.rest.action.admin.cluster.RestCloneSnapshotAction; -import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; -import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; -import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; -import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; -import org.opensearch.rest.action.admin.cluster.RestGetSnapshotsAction; -import org.opensearch.rest.action.admin.cluster.RestPutRepositoryAction; -import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction; -import org.opensearch.rest.action.admin.cluster.RestSnapshotsStatusAction; -import org.opensearch.rest.action.admin.cluster.RestVerifyRepositoryAction; -import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; -import org.opensearch.rest.action.admin.cluster.RestGetStoredScriptAction; -import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.opensearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.opensearch.rest.action.cat.RestAllocationAction; -import org.opensearch.rest.action.cat.RestMasterAction; -import org.opensearch.rest.action.cat.RestRepositoriesAction; -import org.opensearch.rest.action.cat.RestThreadPoolAction; import org.opensearch.rest.action.cat.RestClusterManagerAction; -import org.opensearch.rest.action.cat.RestShardsAction; -import org.opensearch.rest.action.cat.RestPluginsAction; +import org.opensearch.rest.action.cat.RestIndicesAction; +import org.opensearch.rest.action.cat.RestMasterAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; import org.opensearch.rest.action.cat.RestNodesAction; -import org.opensearch.rest.action.cat.RestIndicesAction; -import org.opensearch.rest.action.cat.RestTemplatesAction; import org.opensearch.rest.action.cat.RestPendingClusterTasksAction; +import org.opensearch.rest.action.cat.RestPluginsAction; +import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestSegmentsAction; +import org.opensearch.rest.action.cat.RestShardsAction; import org.opensearch.rest.action.cat.RestSnapshotAction; +import org.opensearch.rest.action.cat.RestTemplatesAction; +import org.opensearch.rest.action.cat.RestThreadPoolAction; import org.opensearch.rest.action.ingest.RestDeletePipelineAction; import org.opensearch.rest.action.ingest.RestGetPipelineAction; import org.opensearch.rest.action.ingest.RestPutPipelineAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; +import org.junit.After; import java.io.IOException; import java.util.Collections; -import static org.hamcrest.Matchers.containsString; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.hamcrest.Matchers.containsString; /** * As of 2.0, the request parameter 'master_timeout' in all applicable REST APIs is deprecated, @@ -715,6 +715,6 @@ private FakeRestRequest getRestRequestWithBodyWithBothParams() { } private FakeRestRequest getFakeRestRequestWithBody() { - return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray("{}"), XContentType.JSON).build(); + return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(new BytesArray("{}"), MediaTypeRegistry.JSON).build(); } } diff --git a/server/src/test/java/org/opensearch/action/ShardOperationFailedExceptionTests.java b/server/src/test/java/org/opensearch/action/ShardOperationFailedExceptionTests.java index 8232a78a0aba0..2844fd6c837c1 100644 --- a/server/src/test/java/org/opensearch/action/ShardOperationFailedExceptionTests.java +++ b/server/src/test/java/org/opensearch/action/ShardOperationFailedExceptionTests.java @@ -35,8 +35,8 @@ import org.opensearch.common.Nullable; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/ShardValidateQueryRequestTests.java b/server/src/test/java/org/opensearch/action/ShardValidateQueryRequestTests.java index e24dc7f99cad9..3528e53ae786d 100644 --- a/server/src/test/java/org/opensearch/action/ShardValidateQueryRequestTests.java +++ b/server/src/test/java/org/opensearch/action/ShardValidateQueryRequestTests.java @@ -34,12 +34,12 @@ import org.opensearch.action.admin.indices.validate.query.ShardValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.query.QueryBuilders; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesModule; import org.opensearch.search.SearchModule; import org.opensearch.search.internal.AliasFilter; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index eb5a7df7a443c..d01265edbe874 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -44,10 +44,9 @@ import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -138,7 +137,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing + "\"},\"explanation\":\"" + explanation + "\"}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index abc3e9c303e33..21bd4333dd881 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -43,14 +43,13 @@ import org.opensearch.cluster.routing.allocation.MoveDecision; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.cluster.routing.allocation.decider.Decision; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import static java.util.Collections.emptyMap; @@ -109,7 +108,7 @@ public void testExplanationToXContent() throws Exception { + "\",\"weight_ranking\":3},\"can_remain_on_current_node\":\"yes\",\"can_rebalance_cluster\":\"yes\"," + "\"can_rebalance_to_other_node\":\"no\",\"rebalance_explanation\":\"cannot rebalance as no target node exists " + "that can both allocate this shard and improve the cluster balance\"}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 59180539243f5..a015e671f4872 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -49,12 +49,13 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -139,7 +140,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java index 4136d1cf31f3a..10e4ab6388be4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java @@ -45,10 +45,11 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -111,7 +112,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); new TransportClearVotingConfigExclusionsAction( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java index 55528cfb71a6a..c71d30ccac2b8 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.configuration; -import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -20,6 +19,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; import java.util.Set; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java index 66356d4916bf1..c740fccabfbc2 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestTests.java @@ -37,8 +37,8 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import java.util.Locale; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index cb199c664eb5a..bcd63dade191c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -43,19 +43,18 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.AbstractSerializingTestCase; import org.opensearch.test.OpenSearchTestCase; - import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java index 412b546e134b7..d0a75b007a218 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequestTests.java @@ -86,15 +86,18 @@ public void testRemoveSingleMetric() throws Exception { } /** - * Test that a newly constructed NodesInfoRequestObject requests all of the - * possible metrics defined in {@link NodesInfoRequest.Metric}. + * Test that a newly constructed NodesInfoRequestObject does not request all the + * possible metrics defined in {@link NodesInfoRequest.Metric} but only the default metrics + * according to {@link NodesInfoRequest.Metric#defaultMetrics()}. */ public void testNodesInfoRequestDefaults() { - NodesInfoRequest defaultNodesInfoRequest = new NodesInfoRequest(randomAlphaOfLength(8)); - NodesInfoRequest allMetricsNodesInfoRequest = new NodesInfoRequest(randomAlphaOfLength(8)); - allMetricsNodesInfoRequest.all(); + NodesInfoRequest requestOOTB = new NodesInfoRequest(randomAlphaOfLength(8)); + NodesInfoRequest requestAll = new NodesInfoRequest(randomAlphaOfLength(8)).all(); + NodesInfoRequest requestDefault = new NodesInfoRequest(randomAlphaOfLength(8)).defaultMetrics(); - assertThat(defaultNodesInfoRequest.requestedMetrics(), equalTo(allMetricsNodesInfoRequest.requestedMetrics())); + assertTrue(requestAll.requestedMetrics().size() > requestOOTB.requestedMetrics().size()); + assertTrue(requestDefault.requestedMetrics().size() == requestOOTB.requestedMetrics().size()); + assertThat(requestOOTB.requestedMetrics(), equalTo(requestDefault.requestedMetrics())); } /** @@ -107,6 +110,21 @@ public void testNodesInfoRequestAll() throws Exception { assertThat(request.requestedMetrics(), equalTo(NodesInfoRequest.Metric.allMetrics())); } + /** + * Test that the {@link NodesInfoRequest#defaultMetrics()} method enables default metrics. + */ + public void testNodesInfoRequestDefault() { + NodesInfoRequest request = new NodesInfoRequest("node"); + request.defaultMetrics(); + + assertEquals(11, request.requestedMetrics().size()); + assertThat(request.requestedMetrics(), equalTo(NodesInfoRequest.Metric.defaultMetrics())); + assertTrue(request.requestedMetrics().contains(NodesInfoRequest.Metric.JVM.metricName())); + assertTrue(request.requestedMetrics().contains(NodesInfoRequest.Metric.AGGREGATIONS.metricName())); + // search_pipelines metrics are not included + assertFalse(request.requestedMetrics().contains(NodesInfoRequest.Metric.SEARCH_PIPELINES.metricName())); + } + /** * Test that the {@link NodesInfoRequest#clear()} method disables all metrics. */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index cbf7032b50ca5..1b8b6243aa805 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -32,25 +32,48 @@ package org.opensearch.action.admin.cluster.node.stats; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchRequestStats; +import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.metrics.OperationStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; +import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.discovery.DiscoveryStats; -import org.opensearch.cluster.coordination.PendingClusterStateStats; -import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.http.HttpStats; -import org.opensearch.indices.breaker.AllCircuitBreakerStats; -import org.opensearch.indices.breaker.CircuitBreakerStats; +import org.opensearch.index.ReplicationStats; +import org.opensearch.index.SegmentReplicationRejectionStats; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.translog.RemoteTranslogStats; +import org.opensearch.indices.NodeIndicesStats; import org.opensearch.ingest.IngestStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.jvm.JvmStats; import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.IoUsageStats; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.test.OpenSearchTestCase; @@ -60,6 +83,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -76,7 +100,7 @@ public class NodeStatsTests extends OpenSearchTestCase { public void testSerialization() throws IOException { - NodeStats nodeStats = createNodeStats(); + NodeStats nodeStats = createNodeStats(true); try (BytesStreamOutput out = new BytesStreamOutput()) { nodeStats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { @@ -273,6 +297,10 @@ public void testSerialization() throws IOException { assertEquals(ioStats.getTotalReadOperations(), deserializedIoStats.getTotalReadOperations()); assertEquals(ioStats.getTotalWriteKilobytes(), deserializedIoStats.getTotalWriteKilobytes()); assertEquals(ioStats.getTotalWriteOperations(), deserializedIoStats.getTotalWriteOperations()); + assertEquals(ioStats.getTotalReadTime(), deserializedIoStats.getTotalReadTime()); + assertEquals(ioStats.getTotalWriteTime(), deserializedIoStats.getTotalWriteTime()); + assertEquals(ioStats.getTotalQueueSize(), deserializedIoStats.getTotalQueueSize()); + assertEquals(ioStats.getTotalIOTimeMillis(), deserializedIoStats.getTotalIOTimeMillis()); assertEquals(ioStats.getDevicesStats().length, deserializedIoStats.getDevicesStats().length); for (int i = 0; i < ioStats.getDevicesStats().length; i++) { FsInfo.DeviceStats deviceStats = ioStats.getDevicesStats()[i]; @@ -333,6 +361,26 @@ public void testSerialization() throws IOException { assertEquals(queueStats.getTotal(), deserializedDiscoveryStats.getQueueStats().getTotal()); assertEquals(queueStats.getPending(), deserializedDiscoveryStats.getQueueStats().getPending()); } + ClusterStateStats stateStats = discoveryStats.getClusterStateStats(); + if (stateStats == null) { + assertNull(deserializedDiscoveryStats.getClusterStateStats()); + } else { + assertEquals(stateStats.getUpdateFailed(), deserializedDiscoveryStats.getClusterStateStats().getUpdateFailed()); + assertEquals(stateStats.getUpdateSuccess(), deserializedDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + assertEquals( + stateStats.getUpdateTotalTimeInMillis(), + deserializedDiscoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() + ); + assertEquals(1, deserializedDiscoveryStats.getClusterStateStats().getPersistenceStats().size()); + PersistedStateStats deserializedRemoteStateStats = deserializedDiscoveryStats.getClusterStateStats() + .getPersistenceStats() + .get(0); + PersistedStateStats remoteStateStats = stateStats.getPersistenceStats().get(0); + assertEquals(remoteStateStats.getStatsName(), deserializedRemoteStateStats.getStatsName()); + assertEquals(remoteStateStats.getFailedCount(), deserializedRemoteStateStats.getFailedCount()); + assertEquals(remoteStateStats.getSuccessCount(), deserializedRemoteStateStats.getSuccessCount()); + assertEquals(remoteStateStats.getTotalTimeInMillis(), deserializedRemoteStateStats.getTotalTimeInMillis()); + } } IngestStats ingestStats = nodeStats.getIngestStats(); IngestStats deserializedIngestStats = deserializedNodeStats.getIngestStats(); @@ -384,6 +432,35 @@ public void testSerialization() throws IOException { assertEquals(aStats.responseTime, bStats.responseTime, 0.01); }); } + NodesResourceUsageStats resourceUsageStats = nodeStats.getResourceUsageStats(); + NodesResourceUsageStats deserializedResourceUsageStats = deserializedNodeStats.getResourceUsageStats(); + if (resourceUsageStats == null) { + assertNull(deserializedResourceUsageStats); + } else { + resourceUsageStats.getNodeIdToResourceUsageStatsMap().forEach((k, v) -> { + NodeResourceUsageStats aResourceUsageStats = resourceUsageStats.getNodeIdToResourceUsageStatsMap().get(k); + NodeResourceUsageStats bResourceUsageStats = deserializedResourceUsageStats.getNodeIdToResourceUsageStatsMap() + .get(k); + assertEquals( + aResourceUsageStats.getMemoryUtilizationPercent(), + bResourceUsageStats.getMemoryUtilizationPercent(), + 0.0 + ); + assertEquals(aResourceUsageStats.getCpuUtilizationPercent(), bResourceUsageStats.getCpuUtilizationPercent(), 0.0); + assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); + }); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = nodeStats.getSegmentReplicationRejectionStats(); + SegmentReplicationRejectionStats deserializedSegmentReplicationRejectionStats = deserializedNodeStats + .getSegmentReplicationRejectionStats(); + if (segmentReplicationRejectionStats == null) { + assertNull(deserializedSegmentReplicationRejectionStats); + } else { + assertEquals( + segmentReplicationRejectionStats.getTotalRejectionCount(), + deserializedSegmentReplicationRejectionStats.getTotalRejectionCount() + ); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -436,11 +513,79 @@ public void testSerialization() throws IOException { assertEquals(weightedRoutingStats.getFailOpenCount(), deserializedWeightedRoutingStats.getFailOpenCount()); } + + NodeIndicesStats nodeIndicesStats = nodeStats.getIndices(); + NodeIndicesStats deserializedNodeIndicesStats = deserializedNodeStats.getIndices(); + if (nodeIndicesStats == null) { + assertNull(deserializedNodeIndicesStats); + } else { + RemoteSegmentStats remoteSegmentStats = nodeIndicesStats.getSegments().getRemoteSegmentStats(); + RemoteSegmentStats deserializedRemoteSegmentStats = deserializedNodeIndicesStats.getSegments().getRemoteSegmentStats(); + assertEquals(remoteSegmentStats.getDownloadBytesStarted(), deserializedRemoteSegmentStats.getDownloadBytesStarted()); + assertEquals( + remoteSegmentStats.getDownloadBytesSucceeded(), + deserializedRemoteSegmentStats.getDownloadBytesSucceeded() + ); + assertEquals(remoteSegmentStats.getDownloadBytesFailed(), deserializedRemoteSegmentStats.getDownloadBytesFailed()); + assertEquals(remoteSegmentStats.getUploadBytesStarted(), deserializedRemoteSegmentStats.getUploadBytesStarted()); + assertEquals(remoteSegmentStats.getUploadBytesSucceeded(), deserializedRemoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(remoteSegmentStats.getUploadBytesFailed(), deserializedRemoteSegmentStats.getUploadBytesFailed()); + assertEquals(remoteSegmentStats.getMaxRefreshTimeLag(), deserializedRemoteSegmentStats.getMaxRefreshTimeLag()); + assertEquals(remoteSegmentStats.getMaxRefreshBytesLag(), deserializedRemoteSegmentStats.getMaxRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalRefreshBytesLag(), deserializedRemoteSegmentStats.getTotalRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalUploadTime(), deserializedRemoteSegmentStats.getTotalUploadTime()); + assertEquals(remoteSegmentStats.getTotalDownloadTime(), deserializedRemoteSegmentStats.getTotalDownloadTime()); + + RemoteTranslogStats remoteTranslogStats = nodeIndicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats deserializedRemoteTranslogStats = deserializedNodeIndicesStats.getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStats, deserializedRemoteTranslogStats); + + ReplicationStats replicationStats = nodeIndicesStats.getSegments().getReplicationStats(); + + ReplicationStats deserializedReplicationStats = deserializedNodeIndicesStats.getSegments().getReplicationStats(); + assertEquals(replicationStats.getMaxBytesBehind(), deserializedReplicationStats.getMaxBytesBehind()); + assertEquals(replicationStats.getTotalBytesBehind(), deserializedReplicationStats.getTotalBytesBehind()); + assertEquals(replicationStats.getMaxReplicationLag(), deserializedReplicationStats.getMaxReplicationLag()); + } + AdmissionControlStats admissionControlStats = nodeStats.getAdmissionControlStats(); + AdmissionControlStats deserializedAdmissionControlStats = deserializedNodeStats.getAdmissionControlStats(); + if (admissionControlStats == null) { + assertNull(deserializedAdmissionControlStats); + } else { + assertEquals( + admissionControlStats.getAdmissionControllerStatsList().size(), + deserializedAdmissionControlStats.getAdmissionControllerStatsList().size() + ); + AdmissionControllerStats admissionControllerStats = admissionControlStats.getAdmissionControllerStatsList().get(0); + AdmissionControllerStats deserializedAdmissionControllerStats = deserializedAdmissionControlStats + .getAdmissionControllerStatsList() + .get(0); + assertEquals( + admissionControllerStats.getAdmissionControllerName(), + deserializedAdmissionControllerStats.getAdmissionControllerName() + ); + assertEquals(1, (long) admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType())); + assertEquals( + admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType()), + deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.SEARCH.getType()) + ); + + assertEquals(2, (long) admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType())); + assertEquals( + admissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()), + deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()) + ); + } } } } - public static NodeStats createNodeStats() { + public static NodeStats createNodeStats() throws IOException { + return createNodeStats(false); + } + + public static NodeStats createNodeStats(boolean remoteStoreStats) throws IOException { DiscoveryNode node = new DiscoveryNode( "test_node", buildNewFakeTransportAddress(), @@ -555,7 +700,8 @@ public static NodeStats createNodeStats() { randomIntBetween(1, 1000), randomNonNegativeLong(), randomIntBetween(1, 1000), - randomIntBetween(1, 1000) + randomIntBetween(1, 1000), + randomIntBetween(-1, 10) ) ); } @@ -576,6 +722,10 @@ public static NodeStats createNodeStats() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), null ); deviceStatsArray[i] = new FsInfo.DeviceStats( @@ -586,6 +736,10 @@ public static NodeStats createNodeStats() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), previousDeviceStats ); } @@ -632,12 +786,16 @@ public static NodeStats createNodeStats() { ScriptStats scriptStats = frequently() ? new ScriptStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) : null; + ClusterStateStats stateStats = new ClusterStateStats(); + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + stateStats.setPersistenceStats(Arrays.asList(remoteStateStats)); DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats( randomBoolean() ? new PendingClusterStateStats(randomInt(), randomInt(), randomInt()) : null, randomBoolean() ? new PublishClusterStateStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) - : null + : null, + randomBoolean() ? stateStats : null ) : null; IngestStats ingestStats = null; @@ -707,22 +865,75 @@ public static NodeStats createNodeStats() { } adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats); } + NodesResourceUsageStats nodesResourceUsageStats = null; + if (frequently()) { + int numNodes = randomIntBetween(0, 10); + Map<String, Long> nodeConnections = new HashMap<>(); + Map<String, NodeResourceUsageStats> resourceUsageStatsMap = new HashMap<>(); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAlphaOfLengthBetween(3, 10); + // add outgoing connection info + if (frequently()) { + nodeConnections.put(nodeId, randomLongBetween(0, 100)); + } + // add node calculations + if (frequently()) { + NodeResourceUsageStats stats = new NodeResourceUsageStats( + nodeId, + System.currentTimeMillis(), + randomDoubleBetween(1.0, 100.0, true), + randomDoubleBetween(1.0, 100.0, true), + new IoUsageStats(100.0) + ); + resourceUsageStatsMap.put(nodeId, stats); + } + } + nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = null; + if (frequently()) { + segmentReplicationRejectionStats = new SegmentReplicationRejectionStats(randomNonNegativeLong()); + } + ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); clusterManagerThrottlingStats.onThrottle("test-task", randomInt()); } + + AdmissionControlStats admissionControlStats = null; + if (frequently()) { + AdmissionController admissionController = new AdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + null + ) { + @Override + public void apply(String action, AdmissionControlActionType admissionControlActionType) { + return; + } + }; + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + AdmissionControllerStats stats = new AdmissionControllerStats(admissionController); + List<AdmissionControllerStats> statsList = new ArrayList(); + statsList.add(stats); + admissionControlStats = new AdmissionControlStats(statsList); + } ScriptCacheStats scriptCacheStats = scriptStats != null ? scriptStats.toScriptCacheStats() : null; WeightedRoutingStats weightedRoutingStats = null; weightedRoutingStats = WeightedRoutingStats.getInstance(); weightedRoutingStats.updateFailOpenCount(); - // TODO NodeIndicesStats are not tested here, way too complicated to create, also they need to be migrated to Writeable yet + NodeIndicesStats indicesStats = getNodeIndicesStats(remoteStoreStats); + + // TODO: Only remote_store based aspects of NodeIndicesStats are being tested here. + // It is possible to test other metrics in NodeIndicesStats as well since it extends Writeable now return new NodeStats( node, randomNonNegativeLong(), - null, + indicesStats, osStats, processStats, jvmStats, @@ -735,6 +946,7 @@ public static NodeStats createNodeStats() { discoveryStats, ingestStats, adaptiveSelectionStats, + nodesResourceUsageStats, scriptCacheStats, null, null, @@ -743,7 +955,64 @@ public static NodeStats createNodeStats() { weightedRoutingStats, null, null, - null + null, + segmentReplicationRejectionStats, + null, + admissionControlStats + ); + } + + private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { + NodeIndicesStats indicesStats = null; + if (remoteStoreStats) { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + indicesStats = new NodeIndicesStats( + new CommonStats(CommonStatsFlags.ALL), + new HashMap<>(), + new SearchRequestStats(clusterSettings) + ); + RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); + remoteSegmentStats.addUploadBytesStarted(10L); + remoteSegmentStats.addUploadBytesSucceeded(10L); + remoteSegmentStats.addUploadBytesFailed(1L); + remoteSegmentStats.addDownloadBytesStarted(10L); + remoteSegmentStats.addDownloadBytesSucceeded(10L); + remoteSegmentStats.addDownloadBytesFailed(1L); + remoteSegmentStats.addTotalRefreshBytesLag(5L); + remoteSegmentStats.addMaxRefreshBytesLag(2L); + remoteSegmentStats.setMaxRefreshTimeLag(2L); + remoteSegmentStats.addTotalUploadTime(20L); + remoteSegmentStats.addTotalDownloadTime(20L); + remoteSegmentStats.addTotalRejections(5L); + + RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); + remoteTranslogStats.add(otherRemoteTranslogStats); + } + return indicesStats; + } + + private static RemoteTranslogTransferTracker.Stats getRandomRemoteTranslogTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java index 130c60539b74b..fe9ddf6935948 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java @@ -36,6 +36,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; + import java.util.HashSet; import java.util.Set; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index c16584877c07d..7d706411b6f0d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -33,8 +33,8 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; @@ -45,18 +45,20 @@ import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java index 768a6c73af380..8102059a030a3 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/RecordingTaskManagerListener.java @@ -34,10 +34,10 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.ThreadResourceInfo; import org.opensearch.test.tasks.MockTaskManagerListener; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 96f2365412e7b..6c4337d267c8d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -9,27 +9,30 @@ package org.opensearch.action.admin.cluster.node.tasks; import com.sun.management.ThreadMXBean; + import org.apache.lucene.util.Constants; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.action.NotifyOnceListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.opensearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.NotifyOnceListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.TaskResourceUsage; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.tasks.MockTaskManagerListener; import org.opensearch.threadpool.ThreadPool; @@ -49,9 +52,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; @SuppressForbidden(reason = "ThreadMXBean#getThreadAllocatedBytes") public class ResourceAwareTasksTests extends TaskManagerTestCase { @@ -562,8 +565,57 @@ public void testOnDemandRefreshWhileFetchingTasks() throws InterruptedException assertNotNull(taskInfo.getResourceStats()); assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); - assertTrue(taskInfo.getResourceStats().getResourceUsageInfo().get("total") instanceof TaskResourceUsage); - TaskResourceUsage taskResourceUsage = (TaskResourceUsage) taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); + assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); + assertTrue(taskResourceUsage.getMemoryInBytes() > 0); + }; + + taskTestContext.operationFinishedValidator = (task, threadId) -> { assertEquals(0, resourceTasks.size()); }; + + startResourceAwareNodesAction(testNodes[0], false, taskTestContext, new ActionListener<NodesResponse>() { + @Override + public void onResponse(NodesResponse listTasksResponse) { + responseReference.set(listTasksResponse); + taskTestContext.requestCompleteLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throwableReference.set(e); + taskTestContext.requestCompleteLatch.countDown(); + } + }); + + // Waiting for whole request to complete and return successfully till client + taskTestContext.requestCompleteLatch.await(); + + assertTasksRequestFinishedSuccessfully(responseReference.get(), throwableReference.get()); + } + + public void testOnDemandRefreshWhileGetTask() throws InterruptedException { + setup(true, false); + + final AtomicReference<Throwable> throwableReference = new AtomicReference<>(); + final AtomicReference<NodesResponse> responseReference = new AtomicReference<>(); + + TaskTestContext taskTestContext = new TaskTestContext(); + + Map<Long, Task> resourceTasks = testNodes[0].taskResourceTrackingService.getResourceAwareTasks(); + + taskTestContext.operationStartValidator = (task, threadId) -> { + assertFalse(resourceTasks.isEmpty()); + GetTaskResponse getTaskResponse = ActionTestUtils.executeBlocking( + testNodes[0].transportGetTaskAction, + new GetTaskRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), new ArrayList<>(resourceTasks.values()).get(0).getId())) + ); + + TaskInfo taskInfo = getTaskResponse.getTask().getTask(); + + assertNotNull(taskInfo.getResourceStats()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo()); + assertNotNull(taskInfo.getResourceStats().getResourceUsageInfo().get("total")); + TaskResourceUsage taskResourceUsage = taskInfo.getResourceStats().getResourceUsageInfo().get("total"); assertCPUTime(taskResourceUsage.getCpuTimeInNanos()); assertTrue(taskResourceUsage.getMemoryInBytes() > 0); }; @@ -672,8 +724,8 @@ private void assertTasksRequestFinishedSuccessfully(NodesResponse nodesResponse, } private void assertMemoryUsageWithinLimits(long actual, long expected) { - // 5% buffer up to 200 KB to account for classloading overhead. - long maxOverhead = Math.min(200000, expected * 5 / 100); + // 5% buffer up to 500 KB to account for classloading overhead. + long maxOverhead = Math.min(500000, expected * 5 / 100); assertThat(actual, lessThanOrEqualTo(expected + maxOverhead)); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index abbaf3a3f7b96..8d87fd5135663 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.FailedNodeException; import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; +import org.opensearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.BaseNodeResponse; @@ -41,25 +42,28 @@ import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.action.support.replication.ClusterStateCreationUtils; +import org.opensearch.client.Client; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.lease.Releasable; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.RunnableTaskExecutionListener; @@ -84,6 +88,7 @@ import static java.util.Collections.emptySet; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.mockito.Mockito.mock; /** * The test case for unit testing task manager and related transport actions @@ -210,13 +215,15 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override protected TaskManager createTaskManager( @@ -246,6 +253,17 @@ protected TaskManager createTaskManager( taskResourceTrackingService ); transportCancelTasksAction = new TransportCancelTasksAction(clusterService, transportService, actionFilters); + Client mockClient = mock(Client.class); + NamedXContentRegistry namedXContentRegistry = mock(NamedXContentRegistry.class); + transportGetTaskAction = new TransportGetTaskAction( + threadPool, + transportService, + actionFilters, + clusterService, + mockClient, + namedXContentRegistry, + taskResourceTrackingService + ); transportService.acceptIncomingRequests(); } @@ -255,6 +273,7 @@ protected TaskManager createTaskManager( private final SetOnce<DiscoveryNode> discoveryNode = new SetOnce<>(); public final TransportListTasksAction transportListTasksAction; public final TransportCancelTasksAction transportCancelTasksAction; + public final TransportGetTaskAction transportGetTaskAction; @Override public void close() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java index 84a7fdff3dfa2..34387b0fc7b7d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskTests.java @@ -32,14 +32,14 @@ package org.opensearch.action.admin.cluster.node.tasks; import org.opensearch.action.search.SearchAction; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; -import org.opensearch.tasks.ResourceUsageMetric; -import org.opensearch.tasks.ResourceStats; -import org.opensearch.tasks.ResourceStatsType; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index b46d4fcfea2c9..e793501ab7239 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -32,13 +32,11 @@ package org.opensearch.action.admin.cluster.node.tasks; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.ActionResponse; -import org.opensearch.action.IndicesRequest; -import org.opensearch.action.FailedNodeException; import org.opensearch.action.ActionType; +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.IndicesRequest; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.nodes.BaseNodeResponse; @@ -53,28 +51,30 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.ActionPlugin; -import org.opensearch.plugins.Plugin; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportService; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 6885278b08413..0759d9a0d4640 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -31,8 +31,6 @@ package org.opensearch.action.admin.cluster.node.tasks; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; @@ -50,19 +48,20 @@ import org.opensearch.action.support.tasks.TransportTasksAction; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskInfo; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; @@ -343,7 +342,7 @@ public void onFailure(Exception e) { "local tasks [{}]", localTasks.values() .stream() - .map(t -> Strings.toString(XContentType.JSON, t.taskInfo(testNodes[0].getNodeId(), true))) + .map(t -> Strings.toString(MediaTypeRegistry.JSON, t.taskInfo(testNodes[0].getNodeId(), true))) .collect(Collectors.joining(",")) ); assertEquals(2, localTasks.size()); // all node tasks + 1 coordinating task @@ -761,7 +760,7 @@ public void testTasksToXContentGrouping() throws Exception { } private Map<String, Object> serialize(ListTasksResponse response, boolean byParents) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); if (byParents) { DiscoveryNodes nodes = testNodes[0].clusterService.state().nodes(); @@ -771,7 +770,7 @@ private Map<String, Object> serialize(ListTasksResponse response, boolean byPare } builder.endObject(); builder.flush(); - logger.info(Strings.toString(builder)); + logger.info(builder.toString()); return XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java index 4c8f525541681..8a6cc2a8cc396 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.node.tasks.cancel; +import org.opensearch.core.tasks.TaskId; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java index 2edfa23286658..7fff55f1d1259 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java @@ -8,21 +8,21 @@ package org.opensearch.action.admin.cluster.remotestore.restore; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; -import java.util.List; import java.util.ArrayList; -import java.util.Map; import java.util.Collections; +import java.util.List; +import java.util.Map; public class RestoreRemoteStoreRequestTests extends AbstractWireSerializingTestCase<RestoreRemoteStoreRequest> { private RestoreRemoteStoreRequest randomState(RestoreRemoteStoreRequest instance) { @@ -70,7 +70,7 @@ protected RestoreRemoteStoreRequest mutateInstance(RestoreRemoteStoreRequest ins public void testSource() throws IOException { RestoreRemoteStoreRequest original = createTestInstance(); XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); Map<String, Object> map = parser.mapOrdered(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java index a476b66719d3f..8cc0982c86233 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java @@ -8,13 +8,15 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -23,7 +25,12 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; -import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createPressureTrackerStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createEmptyTranslogStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createTranslogStats; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsResponseTests extends OpenSearchTestCase { @@ -43,11 +50,13 @@ public void tearDown() throws Exception { threadPool.shutdownNow(); } - public void testSerialization() throws Exception { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testSerializationForPrimary() throws Exception { + RemoteSegmentTransferTracker.Stats mockSegmentTrackerStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats mockTranslogTrackerStats = createTranslogStats(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockSegmentTrackerStats, mockTranslogTrackerStats, primaryShardRouting); RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( - new RemoteStoreStats[] { stats }, + new RemoteStoreStats[] { primaryShardStats }, 1, 1, 0, @@ -58,15 +67,116 @@ public void testSerialization() throws Exception { statsResponse.toXContent(builder, EMPTY_PARAMS); Map<String, Object> jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) .v2(); + Map<String, Object> metadataShardsObject = (Map<String, Object>) jsonResponseObject.get("_shards"); + assertEquals(metadataShardsObject.get("total"), 1); + assertEquals(metadataShardsObject.get("successful"), 1); + assertEquals(metadataShardsObject.get("failed"), 0); + Map<String, Object> indicesObject = (Map<String, Object>) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map<String, Object> shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList<Map<String, Object>> perShardNumberObject = (ArrayList<Map<String, Object>>) shardsObject.get("0"); + assertEquals(perShardNumberObject.size(), 1); + Map<String, Object> perShardCopyObject = perShardNumberObject.get(0); + compareStatsResponse(perShardCopyObject, mockSegmentTrackerStats, mockTranslogTrackerStats, primaryShardRouting); + } + + public void testSerializationForBothPrimaryAndReplica() throws Exception { + RemoteSegmentTransferTracker.Stats mockPrimarySegmentTrackerStats = createStatsForNewPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaSegmentTrackerStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats mockPrimaryTranslogTrackerStats = createTranslogStats(shardId); + RemoteTranslogTransferTracker.Stats mockReplicaTranslogTrackerStats = createEmptyTranslogStats(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + ShardRouting replicaShardRouting = createShardRouting(shardId, false); + RemoteStoreStats primaryShardStats = new RemoteStoreStats( + mockPrimarySegmentTrackerStats, + mockPrimaryTranslogTrackerStats, + primaryShardRouting + ); + RemoteStoreStats replicaShardStats = new RemoteStoreStats( + mockReplicaSegmentTrackerStats, + mockReplicaTranslogTrackerStats, + replicaShardRouting + ); + RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( + new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, + 2, + 2, + 0, + new ArrayList<DefaultShardOperationFailedException>() + ); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + statsResponse.toXContent(builder, EMPTY_PARAMS); + Map<String, Object> jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) + .v2(); + Map<String, Object> metadataShardsObject = (Map<String, Object>) jsonResponseObject.get("_shards"); + assertEquals(2, metadataShardsObject.get("total")); + assertEquals(2, metadataShardsObject.get("successful")); + assertEquals(0, metadataShardsObject.get("failed")); + Map<String, Object> indicesObject = (Map<String, Object>) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map<String, Object> shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList<Map<String, Object>> perShardNumberObject = (ArrayList<Map<String, Object>>) shardsObject.get("0"); + assertEquals(2, perShardNumberObject.size()); + perShardNumberObject.forEach(shardObject -> { + boolean isPrimary = (boolean) ((Map) shardObject.get(RemoteStoreStats.Fields.ROUTING)).get( + RemoteStoreStats.RoutingFields.PRIMARY + ); + if (isPrimary) { + compareStatsResponse(shardObject, mockPrimarySegmentTrackerStats, mockPrimaryTranslogTrackerStats, primaryShardRouting); + } else { + compareStatsResponse(shardObject, mockReplicaSegmentTrackerStats, mockReplicaTranslogTrackerStats, replicaShardRouting); + } + }); + } - ArrayList<Map<String, Object>> statsObjectArray = (ArrayList<Map<String, Object>>) jsonResponseObject.get("stats"); - assertEquals(statsObjectArray.size(), 1); - Map<String, Object> statsObject = statsObjectArray.get(0); - Map<String, Object> shardsObject = (Map<String, Object>) jsonResponseObject.get("_shards"); + public void testSerializationForBothRemoteStoreRestoredPrimaryAndReplica() throws Exception { + RemoteSegmentTransferTracker.Stats mockPrimarySegmentTrackerStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaSegmentTrackerStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats mockPrimaryTranslogTrackerStats = createTranslogStats(shardId); + RemoteTranslogTransferTracker.Stats mockReplicaTranslogTrackerStats = createEmptyTranslogStats(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + ShardRouting replicaShardRouting = createShardRouting(shardId, false); + RemoteStoreStats primaryShardStats = new RemoteStoreStats( + mockPrimarySegmentTrackerStats, + mockPrimaryTranslogTrackerStats, + primaryShardRouting + ); + RemoteStoreStats replicaShardStats = new RemoteStoreStats( + mockReplicaSegmentTrackerStats, + mockReplicaTranslogTrackerStats, + replicaShardRouting + ); + RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( + new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, + 2, + 2, + 0, + new ArrayList<DefaultShardOperationFailedException>() + ); - assertEquals(shardsObject.get("total"), 1); - assertEquals(shardsObject.get("successful"), 1); - assertEquals(shardsObject.get("failed"), 0); - compareStatsResponse(statsObject, pressureTrackerStats); + XContentBuilder builder = XContentFactory.jsonBuilder(); + statsResponse.toXContent(builder, EMPTY_PARAMS); + Map<String, Object> jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) + .v2(); + Map<String, Object> metadataShardsObject = (Map<String, Object>) jsonResponseObject.get("_shards"); + assertEquals(2, metadataShardsObject.get("total")); + assertEquals(2, metadataShardsObject.get("successful")); + assertEquals(0, metadataShardsObject.get("failed")); + Map<String, Object> indicesObject = (Map<String, Object>) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map<String, Object> shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList<Map<String, Object>> perShardNumberObject = (ArrayList<Map<String, Object>>) shardsObject.get("0"); + assertEquals(2, perShardNumberObject.size()); + perShardNumberObject.forEach(shardObject -> { + boolean isPrimary = (boolean) ((Map) shardObject.get(RemoteStoreStats.Fields.ROUTING)).get( + RemoteStoreStats.RoutingFields.PRIMARY + ); + if (isPrimary) { + compareStatsResponse(shardObject, mockPrimarySegmentTrackerStats, mockPrimaryTranslogTrackerStats, primaryShardRouting); + } else { + compareStatsResponse(shardObject, mockReplicaSegmentTrackerStats, mockReplicaTranslogTrackerStats, replicaShardRouting); + } + }); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java index 747dc692b1d5d..5886d47a7be24 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java @@ -8,80 +8,410 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.store.DirectoryFileTransferTracker; import java.util.Map; import static org.opensearch.test.OpenSearchTestCase.assertEquals; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; /** * Helper utilities for Remote Store stats tests */ public class RemoteStoreStatsTestHelper { - static RemoteRefreshSegmentTracker.Stats createPressureTrackerStats(ShardId shardId) { - return new RemoteRefreshSegmentTracker.Stats(shardId, 101, 102, 100, 3, 2, 10, 5, 5, 10, 5, 5, 3, 2, 5, 2, 3, 4, 9); + static RemoteSegmentTransferTracker.Stats createStatsForNewPrimary(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 101, + 102, + 100, + 0, + 10, + 2, + 10, + 5, + 5, + 0, + 0, + 0, + 5, + 5, + 5, + 0, + 0, + 0, + 10, + createZeroDirectoryFileTransferStats() + ); } - static void compareStatsResponse(Map<String, Object> statsObject, RemoteRefreshSegmentTracker.Stats pressureTrackerStats) { - assertEquals(statsObject.get(RemoteStoreStats.Fields.SHARD_ID), pressureTrackerStats.shardId.toString()); - assertEquals(statsObject.get(RemoteStoreStats.Fields.LOCAL_REFRESH_TIMESTAMP), (int) pressureTrackerStats.localRefreshClockTimeMs); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_TIMESTAMP), - (int) pressureTrackerStats.remoteRefreshClockTimeMs - ); - assertEquals(statsObject.get(RemoteStoreStats.Fields.REFRESH_TIME_LAG_IN_MILLIS), (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.REFRESH_LAG), - (int) (pressureTrackerStats.localRefreshNumber - pressureTrackerStats.remoteRefreshNumber) + static RemoteSegmentTransferTracker.Stats createStatsForNewReplica(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + createSampleDirectoryFileTransferStats() ); - assertEquals(statsObject.get(RemoteStoreStats.Fields.BYTES_LAG), (int) pressureTrackerStats.bytesLag); + } - assertEquals(statsObject.get(RemoteStoreStats.Fields.BACKPRESSURE_REJECTION_COUNT), (int) pressureTrackerStats.rejectionCount); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.CONSECUTIVE_FAILURE_COUNT), - (int) pressureTrackerStats.consecutiveFailuresCount + static RemoteSegmentTransferTracker.Stats createStatsForRemoteStoreRestoredPrimary(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 50, + 50, + 0, + 50, + 11, + 11, + 10, + 10, + 0, + 10, + 10, + 0, + 10, + 10, + 0, + 0, + 0, + 100, + 10, + createSampleDirectoryFileTransferStats() ); + } + static DirectoryFileTransferTracker.Stats createSampleDirectoryFileTransferStats() { + return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5, 10); + } + + static DirectoryFileTransferTracker.Stats createZeroDirectoryFileTransferStats() { + return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0, 0); + } + + static ShardRouting createShardRouting(ShardId shardId, boolean isPrimary) { + return TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(4), isPrimary, ShardRoutingState.STARTED); + } + + static RemoteTranslogTransferTracker.Stats createTranslogStats(ShardId shardId) { + return new RemoteTranslogTransferTracker.Stats(shardId, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9D, 10D, 11D, 1L, 2L, 3L, 4L, 9D, 10D, 11D); + } + + static RemoteTranslogTransferTracker.Stats createEmptyTranslogStats(ShardId shardId) { + return new RemoteTranslogTransferTracker.Stats(shardId, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0D, 0D, 0D, 0L, 0L, 0L, 0L, 0D, 0D, 0D); + } + + static void compareStatsResponse( + Map<String, Object> statsObject, + RemoteSegmentTransferTracker.Stats segmentTransferStats, + RemoteTranslogTransferTracker.Stats translogTransferStats, + ShardRouting routing + ) { + // Compare Remote Segment Store stats assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.STARTED), - (int) pressureTrackerStats.uploadBytesStarted - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.SUCCEEDED), - (int) pressureTrackerStats.uploadBytesSucceeded - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.FAILED), - (int) pressureTrackerStats.uploadBytesFailed - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_SIZE_IN_BYTES)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadBytesMovingAverage - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_SIZE_IN_BYTES)).get(RemoteStoreStats.SubFields.LAST_SUCCESSFUL), - (int) pressureTrackerStats.lastSuccessfulRemoteRefreshBytes - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.UPLOAD_LATENCY_IN_BYTES_PER_SEC)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadBytesPerSecMovingAverage - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.STARTED), - (int) pressureTrackerStats.totalUploadsStarted - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.SUCCEEDED), - (int) pressureTrackerStats.totalUploadsSucceeded + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.NODE_ID), + routing.currentNodeId() ); assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.FAILED), - (int) pressureTrackerStats.totalUploadsFailed + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.STATE), + routing.state().toString() ); assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_LATENCY_IN_MILLIS)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadTimeMovingAverage + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.PRIMARY), + routing.primary() ); + + Map<String, Object> segment = ((Map) statsObject.get(RemoteStoreStats.Fields.SEGMENT)); + Map<String, Object> segmentDownloads = ((Map) segment.get(RemoteStoreStats.SubFields.DOWNLOAD)); + Map<String, Object> segmentUploads = ((Map) segment.get(RemoteStoreStats.SubFields.UPLOAD)); + + if (segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { + assertEquals( + segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.LAST_SYNC_TIMESTAMP), + (int) segmentTransferStats.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES + ), + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ), + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES + ), + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesFailed + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.LAST_SUCCESSFUL + ), + (int) segmentTransferStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + ); + } else { + assertTrue(segmentDownloads.isEmpty()); + } + + if (segmentTransferStats.totalUploadsStarted != 0) { + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.LOCAL_REFRESH_TIMESTAMP), + (int) segmentTransferStats.localRefreshClockTimeMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_TIMESTAMP), + (int) segmentTransferStats.remoteRefreshClockTimeMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_TIME_LAG_IN_MILLIS), + (int) segmentTransferStats.refreshTimeLagMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_LAG), + (int) (segmentTransferStats.localRefreshNumber - segmentTransferStats.remoteRefreshNumber) + ); + assertEquals(segmentUploads.get(RemoteStoreStats.UploadStatsFields.BYTES_LAG), (int) segmentTransferStats.bytesLag); + + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.BACKPRESSURE_REJECTION_COUNT), + (int) segmentTransferStats.rejectionCount + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.CONSECUTIVE_FAILURE_COUNT), + (int) segmentTransferStats.consecutiveFailuresCount + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES + ), + (int) segmentTransferStats.uploadBytesStarted + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ), + (int) segmentTransferStats.uploadBytesSucceeded + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES + ), + (int) segmentTransferStats.uploadBytesFailed + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + segmentTransferStats.uploadBytesMovingAverage + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.LAST_SUCCESSFUL + ), + (int) segmentTransferStats.lastSuccessfulRemoteRefreshBytes + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + segmentTransferStats.uploadBytesPerSecMovingAverage + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.STARTED), + (int) segmentTransferStats.totalUploadsStarted + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.SUCCEEDED), + (int) segmentTransferStats.totalUploadsSucceeded + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.FAILED), + (int) segmentTransferStats.totalUploadsFailed + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + segmentTransferStats.uploadTimeMovingAverage + ); + } else { + assertTrue(segmentUploads.isEmpty()); + } + + // Compare Remote Translog Store stats + Map<?, ?> tlogStatsObj = (Map<?, ?>) statsObject.get(RemoteStoreStats.Fields.TRANSLOG); + Map<?, ?> tlogUploadStatsObj = (Map<?, ?>) tlogStatsObj.get(RemoteStoreStats.SubFields.UPLOAD); + if (translogTransferStats.totalUploadsStarted > 0) { + assertEquals( + translogTransferStats.lastSuccessfulUploadTimestamp, + Long.parseLong(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.LAST_SUCCESSFUL_UPLOAD_TIMESTAMP).toString()) + ); + + assertEquals( + translogTransferStats.totalUploadsStarted, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.STARTED + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalUploadsSucceeded, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalUploadsFailed, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.FAILED + ).toString() + ) + ); + + assertEquals( + translogTransferStats.uploadBytesStarted, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.uploadBytesSucceeded, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.uploadBytesFailed, + Long.parseLong( + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES + ).toString() + ) + ); + + assertEquals( + translogTransferStats.totalUploadTimeInMillis, + Long.parseLong(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_TIME_IN_MILLIS).toString()) + ); + + assertEquals( + translogTransferStats.uploadBytesMovingAverage, + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.uploadBytesPerSecMovingAverage, + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.uploadTimeMovingAverage, + ((Map<?, ?>) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_TIME_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + } else { + assertNull(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)); + } + + Map<?, ?> tlogDownloadStatsObj = (Map<?, ?>) tlogStatsObj.get(RemoteStoreStats.SubFields.DOWNLOAD); + if (translogTransferStats.totalDownloadsSucceeded > 0) { + assertEquals( + translogTransferStats.lastSuccessfulDownloadTimestamp, + Long.parseLong(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP).toString()) + ); + assertEquals( + translogTransferStats.totalDownloadsSucceeded, + Long.parseLong( + ((Map<?, ?>) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ).toString() + ) + ); + assertEquals( + translogTransferStats.downloadBytesSucceeded, + Long.parseLong( + ((Map<?, ?>) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalDownloadTimeInMillis, + Long.parseLong(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_TIME_IN_MILLIS).toString()) + ); + + assertEquals( + translogTransferStats.downloadBytesMovingAverage, + ((Map<?, ?>) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.downloadBytesPerSecMovingAverage, + ((Map<?, ?>) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.downloadTimeMovingAverage, + ((Map<?, ?>) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_TIME_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + } else { + assertNull(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)); + } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java index fc057b71b15f8..1c78539a00a07 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java @@ -8,14 +8,16 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -24,7 +26,12 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; -import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createPressureTrackerStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createEmptyTranslogStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createTranslogStats; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsTests extends OpenSearchTestCase { @@ -44,43 +51,80 @@ public void tearDown() throws Exception { threadPool.shutdownNow(); } - public void testXContentBuilder() throws IOException { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testXContentBuilderWithPrimaryShard() throws IOException { + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + ShardRouting routing = createShardRouting(shardId, true); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + stats.toXContent(builder, EMPTY_PARAMS); + Map<String, Object> jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); + } + + public void testXContentBuilderWithReplicaShard() throws IOException { + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createEmptyTranslogStats(shardId); + ShardRouting routing = createShardRouting(shardId, false); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + stats.toXContent(builder, EMPTY_PARAMS); + Map<String, Object> jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); + } + + public void testXContentBuilderWithRemoteStoreRestoredShard() throws IOException { + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + ShardRouting routing = createShardRouting(shardId, true); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); XContentBuilder builder = XContentFactory.jsonBuilder(); stats.toXContent(builder, EMPTY_PARAMS); Map<String, Object> jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - compareStatsResponse(jsonObject, pressureTrackerStats); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); + } + + public void testSerializationForPrimaryShard() throws Exception { + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, createShardRouting(shardId, true)); + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteStoreStats deserializedStats = new RemoteStoreStats(in); + assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); + } + } + } + + public void testSerializationForReplicaShard() throws Exception { + RemoteSegmentTransferTracker.Stats replicaShardStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createEmptyTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(replicaShardStats, translogTransferStats, createShardRouting(shardId, false)); + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteStoreStats deserializedStats = new RemoteStoreStats(in); + assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); + } + } } - public void testSerialization() throws Exception { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testSerializationForRemoteStoreRestoredPrimaryShard() throws Exception { + RemoteSegmentTransferTracker.Stats primaryShardStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(primaryShardStats, translogTransferStats, createShardRouting(shardId, true)); try (BytesStreamOutput out = new BytesStreamOutput()) { stats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { RemoteStoreStats deserializedStats = new RemoteStoreStats(in); - assertEquals(deserializedStats.getStats().shardId.toString(), stats.getStats().shardId.toString()); - assertEquals(deserializedStats.getStats().refreshTimeLagMs, stats.getStats().refreshTimeLagMs); - assertEquals(deserializedStats.getStats().localRefreshNumber, stats.getStats().localRefreshNumber); - assertEquals(deserializedStats.getStats().remoteRefreshNumber, stats.getStats().remoteRefreshNumber); - assertEquals(deserializedStats.getStats().uploadBytesStarted, stats.getStats().uploadBytesStarted); - assertEquals(deserializedStats.getStats().uploadBytesSucceeded, stats.getStats().uploadBytesSucceeded); - assertEquals(deserializedStats.getStats().uploadBytesFailed, stats.getStats().uploadBytesFailed); - assertEquals(deserializedStats.getStats().totalUploadsStarted, stats.getStats().totalUploadsStarted); - assertEquals(deserializedStats.getStats().totalUploadsFailed, stats.getStats().totalUploadsFailed); - assertEquals(deserializedStats.getStats().totalUploadsSucceeded, stats.getStats().totalUploadsSucceeded); - assertEquals(deserializedStats.getStats().rejectionCount, stats.getStats().rejectionCount); - assertEquals(deserializedStats.getStats().consecutiveFailuresCount, stats.getStats().consecutiveFailuresCount); - assertEquals(deserializedStats.getStats().uploadBytesMovingAverage, stats.getStats().uploadBytesMovingAverage, 0); - assertEquals( - deserializedStats.getStats().uploadBytesPerSecMovingAverage, - stats.getStats().uploadBytesPerSecMovingAverage, - 0 - ); - assertEquals(deserializedStats.getStats().uploadTimeMovingAverage, stats.getStats().uploadTimeMovingAverage, 0); - assertEquals(deserializedStats.getStats().bytesLag, stats.getStats().bytesLag); + assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index 25e44884814a5..ed73c2ef6ace5 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -8,7 +8,6 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.ClusterName; @@ -24,33 +23,36 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportService; import java.util.Collections; import java.util.stream.Collectors; +import org.mockito.Mockito; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; public class TransportRemoteStoreStatsActionTests extends IndexShardTestCase { private IndicesService indicesService; - private RemoteRefreshSegmentPressureService pressureService; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private IndexMetadata remoteStoreIndexMetadata; private TransportService transportService; private ClusterService clusterService; @@ -64,12 +66,13 @@ public void setUp() throws Exception { indicesService = mock(IndicesService.class); IndexService indexService = mock(IndexService.class); clusterService = mock(ClusterService.class); - pressureService = mock(RemoteRefreshSegmentPressureService.class); + remoteStoreStatsTrackerFactory = mock(RemoteStoreStatsTrackerFactory.class); MockTransport mockTransport = new MockTransport(); localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT); remoteStoreIndexMetadata = IndexMetadata.builder(INDEX.getName()) .settings( settings(Version.CURRENT).put(SETTING_INDEX_UUID, INDEX.getUUID()) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SETTING_REMOTE_STORE_ENABLED, true) .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-test-repo") .build() @@ -83,10 +86,11 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); - when(pressureService.getRemoteRefreshSegmentTracker(any())).thenReturn(mock(RemoteRefreshSegmentTracker.class)); + when(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(any())).thenReturn(mock(RemoteSegmentTransferTracker.class)); when(indicesService.indexService(INDEX)).thenReturn(indexService); when(indexService.getIndexSettings()).thenReturn(new IndexSettings(remoteStoreIndexMetadata, Settings.EMPTY)); statsAction = new TransportRemoteStoreStatsAction( @@ -95,7 +99,7 @@ public void setUp() throws Exception { indicesService, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), - pressureService + remoteStoreStatsTrackerFactory ); } @@ -108,8 +112,7 @@ public void tearDown() throws Exception { clusterService.close(); } - public void testOnlyPrimaryShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + public void testAllShardCopies() throws Exception { RoutingTable routingTable = RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build(); Metadata metadata = Metadata.builder().put(remoteStoreIndexMetadata, false).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); @@ -125,11 +128,10 @@ public void testOnlyPrimaryShards() throws Exception { new String[] { INDEX.getName() } ); - assertEquals(shardsIterator.size(), 2); + assertEquals(shardsIterator.size(), 4); } public void testOnlyLocalShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); String[] concreteIndices = new String[] { INDEX.getName() }; RoutingTable routingTable = spy(RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build()); doReturn(new PlainShardsIterator(routingTable.allShards(INDEX.getName()).stream().map(Mockito::spy).collect(Collectors.toList()))) @@ -153,11 +155,10 @@ public void testOnlyLocalShards() throws Exception { remoteStoreStatsRequest.local(true); ShardsIterator shardsIterator = statsAction.shards(clusterService.state(), remoteStoreStatsRequest, concreteIndices); - assertEquals(shardsIterator.size(), 1); + assertEquals(shardsIterator.size(), 2); } - public void testOnlyRemoteStoreEnabledShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + public void testOnlyRemoteStoreEnabledShardCopies() throws Exception { Index NEW_INDEX = new Index("newIndex", "newUUID"); IndexMetadata indexMetadataWithoutRemoteStore = IndexMetadata.builder(NEW_INDEX.getName()) .settings( @@ -189,6 +190,6 @@ public void testOnlyRemoteStoreEnabledShards() throws Exception { new String[] { INDEX.getName() } ); - assertEquals(shardsIterator.size(), 2); + assertEquals(shardsIterator.size(), 4); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java index 3e9759411733e..51b3d9548c3a0 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java @@ -31,11 +31,11 @@ package org.opensearch.action.admin.cluster.repositories.put; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index e92096f139d17..129475e0fa3fb 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -32,25 +32,25 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.network.NetworkModule; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.admin.cluster.RestClusterRerouteAction; import org.opensearch.test.OpenSearchTestCase; @@ -212,7 +212,7 @@ private ClusterRerouteRequest roundTripThroughRestRequest(ClusterRerouteRequest private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOException { Map<String, String> params = new HashMap<>(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); boolean hasBody = false; if (randomBoolean()) { builder.prettyPrint(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 5571eb020b9e0..d5a1d04bedc10 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -43,12 +43,11 @@ import org.opensearch.cluster.routing.allocation.RoutingExplanations; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.cluster.routing.allocation.decider.Decision; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; @@ -165,7 +164,7 @@ public void testToXContent() throws IOException { + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -204,7 +203,7 @@ public void testToXContent() throws IOException { + " }\n" + " ]\n" + "}", - Strings.toString(builder) + builder.toString() ); } { @@ -262,7 +261,7 @@ public void testToXContent() throws IOException { + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteTests.java index 5e535c77e80a7..bfa33628d822a 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -32,10 +32,9 @@ package org.opensearch.action.admin.cluster.reroute; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; @@ -46,14 +45,15 @@ import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index 6df0563ee1b4e..505ed8fe9f600 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -33,10 +33,11 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.XContentTestUtils; @@ -59,14 +60,14 @@ public void testFromXContentWithRandomFields() throws IOException { private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { final ClusterUpdateSettingsRequest request = createTestItem(); boolean humanReadable = randomBoolean(); - final XContentType xContentType = XContentType.JSON; - BytesReference originalBytes = toShuffledXContent(request, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + final MediaType mediaType = MediaTypeRegistry.JSON; + BytesReference originalBytes = toShuffledXContent(request, mediaType, ToXContent.EMPTY_PARAMS, humanReadable); if (addRandomFields) { String unsupportedField = "unsupported_field"; BytesReference mutated = BytesReference.bytes( XContentTestUtils.insertIntoXContent( - xContentType.xContent(), + mediaType.xContent(), originalBytes, Collections.singletonList(""), () -> unsupportedField, @@ -75,11 +76,11 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws ); XContentParseException iae = expectThrows( XContentParseException.class, - () -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated)) + () -> ClusterUpdateSettingsRequest.fromXContent(createParser(mediaType.xContent(), mutated)) ); assertThat(iae.getMessage(), containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "]")); } else { - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(mediaType.xContent(), originalBytes)) { ClusterUpdateSettingsRequest parsedRequest = ClusterUpdateSettingsRequest.fromXContent(parser); assertNull(parser.nextToken()); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index 6f01f45fc8108..3dbbd2a98bf08 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index a09a580de1475..2f911f30065bc 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -38,14 +38,14 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.core.common.Strings; -import org.opensearch.index.query.RandomQueryBuilder; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.RandomQueryBuilder; import org.opensearch.search.SearchModule; import org.opensearch.search.internal.AliasFilter; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java index 3b50b0ff87428..21248b11e69e5 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java @@ -11,7 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import java.util.Map; @@ -24,7 +24,7 @@ public void testSetWeightedRoutingWeight() { Map<String, Double> weights = Map.of("us-east-1a", 1.0, "us-east-1b", 1.0, "us-east-1c", 0.0); WeightedRouting weightedRouting = new WeightedRouting("zone", weights); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); assertEquals(weightedRouting, request.getWeightedRouting()); assertEquals(1, request.getVersion()); } @@ -32,7 +32,7 @@ public void testSetWeightedRoutingWeight() { public void testValidate_ValuesAreProper() { String reqString = "{\"weights\":{\"us-east-1c\":\"0\",\"us-east-1b\":\"1\",\"us-east-1a\":\"1\"},\"_version\":1}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); ActionRequestValidationException actionRequestValidationException = request.validate(); assertNull(actionRequestValidationException); } @@ -40,7 +40,7 @@ public void testValidate_ValuesAreProper() { public void testValidate_MissingWeights() { String reqString = "{}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); ActionRequestValidationException actionRequestValidationException = request.validate(); assertNotNull(actionRequestValidationException); assertTrue(actionRequestValidationException.getMessage().contains("Weights are missing")); @@ -49,7 +49,7 @@ public void testValidate_MissingWeights() { public void testValidate_AttributeMissing() { String reqString = "{\"weights\":{\"us-east-1c\":\"0\",\"us-east-1b\":\"1\",\"us-east-1a\": \"1\"},\"_version\":1}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest(); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); ActionRequestValidationException actionRequestValidationException = request.validate(); assertNotNull(actionRequestValidationException); assertTrue(actionRequestValidationException.getMessage().contains("Attribute name is missing")); @@ -58,7 +58,7 @@ public void testValidate_AttributeMissing() { public void testValidate_MoreThanHalfWithZeroWeight() { String reqString = "{\"weights\":{\"us-east-1c\":\"0\",\"us-east-1b\":\"0\",\"us-east-1a\": \"1\"}," + "\"_version\":1}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); ActionRequestValidationException actionRequestValidationException = request.validate(); assertNotNull(actionRequestValidationException); assertTrue( @@ -69,7 +69,7 @@ public void testValidate_MoreThanHalfWithZeroWeight() { public void testValidate_VersionMissing() { String reqString = "{\"weights\":{\"us-east-1c\": \"0\",\"us-east-1b\": \"1\",\"us-east-1a\": \"1\"}}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); + request.setWeightedRouting(new BytesArray(reqString), MediaTypeRegistry.JSON); ActionRequestValidationException actionRequestValidationException = request.validate(); assertNotNull(actionRequestValidationException); assertTrue(actionRequestValidationException.getMessage().contains("Version is missing")); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 7c9d913951d36..818a5f4c2f502 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -36,14 +36,14 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.IndicesOptions.Option; import org.opensearch.action.support.IndicesOptions.WildcardStates; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent.MapParams; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -124,7 +124,7 @@ public void testToXContent() throws IOException { } XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); Map<String, Object> map = parser.mapOrdered(); CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java index 249c87f2eebad..274a548fd98ab 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.snapshots.create; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotInfoTests; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java index 61c0173b8dec3..3ef143e36dab9 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotInfoTests; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 5c20b3b262730..c3de3413edd13 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -33,14 +33,14 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; @@ -112,6 +112,10 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { instance.snapshotUuid(randomBoolean() ? null : randomAlphaOfLength(10)); } + instance.storageType( + randomBoolean() ? RestoreSnapshotRequest.StorageType.LOCAL : RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT + ); + if (randomBoolean()) { instance.setSourceRemoteStoreRepository(randomAlphaOfLengthBetween(5, 10)); } @@ -141,7 +145,7 @@ public void testSource() throws IOException { RestoreSnapshotRequest original = createTestInstance(); original.snapshotUuid(null); // cannot be set via the REST API XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); Map<String, Object> map = parser.mapOrdered(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatusTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatusTests.java index c6b429dee3b10..8ad973f6f819e 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatusTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatusTests.java @@ -32,16 +32,16 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import java.io.IOException; -import java.util.function.Predicate; - import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.test.AbstractXContentTestCase; +import java.io.IOException; +import java.util.function.Predicate; + public class SnapshotIndexShardStatusTests extends AbstractXContentTestCase<SnapshotIndexShardStatus> { @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java index 4b81bbf8b2710..52c7ca5933134 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java @@ -32,15 +32,15 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; +import org.opensearch.test.AbstractXContentTestCase; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.function.Predicate; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.test.AbstractXContentTestCase; - public class SnapshotIndexStatusTests extends AbstractXContentTestCase<SnapshotIndexStatus> { @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java index 8fac317bbc83a..c5939e708d3a4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import java.io.IOException; - import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; +import java.io.IOException; + public class SnapshotShardsStatsTests extends AbstractXContentTestCase<SnapshotShardsStats> { @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java index 3c87c90f3d6f8..262f33ec76256 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatsTests.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import java.io.IOException; - import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; +import java.io.IOException; + public class SnapshotStatsTests extends AbstractXContentTestCase<SnapshotStats> { @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java index a7d1e501cd8b4..3918e5d9b235c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.UUIDs; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.AbstractXContentTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java index 052f67d933c05..103a13d30d836 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java @@ -32,14 +32,14 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractXContentTestCase; + import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.function.Predicate; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractXContentTestCase; - public class SnapshotsStatusResponseTests extends AbstractXContentTestCase<SnapshotsStatusResponse> { @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateApiTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateApiTests.java index 039fde9455a2a..a87b50001b467 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateApiTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateApiTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchSingleNodeTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java index e1b8598ef9ccf..138201ccc54e1 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -35,8 +35,8 @@ import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 627ada7092273..40a30342b86b9 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -38,9 +38,10 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; @@ -51,7 +52,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.hamcrest.Matchers.equalTo; public class ClusterStatsNodesTests extends OpenSearchTestCase { @@ -62,11 +63,17 @@ public class ClusterStatsNodesTests extends OpenSearchTestCase { */ public void testNetworkTypesToXContent() throws Exception { ClusterStatsNodes.NetworkTypes stats = new ClusterStatsNodes.NetworkTypes(emptyList()); - assertEquals("{\"transport_types\":{},\"http_types\":{}}", toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString()); + assertEquals( + "{\"transport_types\":{},\"http_types\":{}}", + toXContent(stats, MediaTypeRegistry.JSON, randomBoolean()).utf8ToString() + ); List<NodeInfo> nodeInfos = singletonList(createNodeInfo("node_0", null, null)); stats = new ClusterStatsNodes.NetworkTypes(nodeInfos); - assertEquals("{\"transport_types\":{},\"http_types\":{}}", toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString()); + assertEquals( + "{\"transport_types\":{},\"http_types\":{}}", + toXContent(stats, MediaTypeRegistry.JSON, randomBoolean()).utf8ToString() + ); nodeInfos = Arrays.asList( createNodeInfo("node_1", "", ""), @@ -76,12 +83,18 @@ public void testNetworkTypesToXContent() throws Exception { stats = new ClusterStatsNodes.NetworkTypes(nodeInfos); assertEquals( "{" + "\"transport_types\":{\"custom\":1}," + "\"http_types\":{\"custom\":2}" + "}", - toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString() + toXContent(stats, MediaTypeRegistry.JSON, randomBoolean()).utf8ToString() ); } public void testIngestStats() throws Exception { - NodeStats nodeStats = randomValueOtherThanMany(n -> n.getIngestStats() == null, NodeStatsTests::createNodeStats); + NodeStats nodeStats = randomValueOtherThanMany(n -> n.getIngestStats() == null, () -> { + try { + return NodeStatsTests.createNodeStats(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); SortedMap<String, long[]> processorStats = new TreeMap<>(); nodeStats.getIngestStats().getProcessorStats().values().forEach(stats -> { @@ -132,7 +145,7 @@ public void testIngestStats() throws Exception { } processorStatsString += "}"; assertThat( - toXContent(stats, XContentType.JSON, false).utf8ToString(), + toXContent(stats, MediaTypeRegistry.JSON, false).utf8ToString(), equalTo( "{\"ingest\":{" + "\"number_of_pipelines\":" diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java index 67358fade4b17..ebfd2a6761f46 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.Script; import org.opensearch.script.StoredScriptSource; import org.opensearch.test.AbstractSerializingTestCase; @@ -70,7 +70,7 @@ private static StoredScriptSource randomScriptSource() { final String lang = randomFrom("lang", "painless", "mustache"); final String source = randomAlphaOfLengthBetween(1, 10); final Map<String, String> options = randomBoolean() - ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) + ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) : Collections.emptyMap(); return new StoredScriptSource(lang, source, options); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java index cfdd776e60832..491ecd0b7947d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java @@ -32,13 +32,14 @@ package org.opensearch.action.admin.cluster.storedscripts; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.StoredScriptSource; import org.opensearch.test.OpenSearchTestCase; @@ -52,17 +53,17 @@ public void testSerialization() throws IOException { "bar", "context", new BytesArray("{}"), - XContentType.JSON, + MediaTypeRegistry.JSON, new StoredScriptSource("foo", "bar", Collections.emptyMap()) ); - assertEquals(XContentType.JSON, storedScriptRequest.mediaType()); + assertEquals(MediaTypeRegistry.JSON, storedScriptRequest.mediaType()); try (BytesStreamOutput output = new BytesStreamOutput()) { storedScriptRequest.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { PutStoredScriptRequest serialized = new PutStoredScriptRequest(in); - assertEquals(XContentType.JSON, serialized.mediaType()); + assertEquals(MediaTypeRegistry.JSON, serialized.mediaType()); assertEquals(storedScriptRequest.id(), serialized.id()); assertEquals(storedScriptRequest.context(), serialized.context()); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java index f575be74a3e9b..479321ec633da 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -31,9 +31,9 @@ package org.opensearch.action.admin.indices; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.tests.analysis.MockTokenFilter; import org.apache.lucene.tests.analysis.MockTokenizer; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.opensearch.Version; @@ -59,8 +59,8 @@ import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.indices.analysis.AnalysisModuleTests.AppendCharFilter; import org.opensearch.plugins.AnalysisPlugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.Reader; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java index 265f02304ce00..d463782a70506 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java @@ -33,16 +33,15 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -195,7 +194,7 @@ public void testParseAdd() throws IOException { if (filter == null || filter.isEmpty()) { assertNull(action.filter()); } else { - assertEquals(Strings.toString(XContentFactory.contentBuilder(XContentType.JSON).map(filter)), action.filter()); + assertEquals(MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON).map(filter).toString(), action.filter()); } assertEquals(Objects.toString(searchRouting, null), action.searchRouting()); assertEquals(Objects.toString(indexRouting, null), action.indexRouting()); @@ -242,6 +241,7 @@ public void testParseRemove() throws IOException { String[] indices = generateRandomStringArray(10, 5, false, false); String[] aliases = generateRandomStringArray(10, 5, false, false); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); + boolean mustExist = randomBoolean(); b.startObject(); { b.startObject("remove"); @@ -256,6 +256,9 @@ public void testParseRemove() throws IOException { } else { b.field("alias", aliases[0]); } + if (mustExist) { + b.field("must_exist", true); + } } b.endObject(); } @@ -266,6 +269,9 @@ public void testParseRemove() throws IOException { assertEquals(AliasActions.Type.REMOVE, action.actionType()); assertThat(action.indices(), equalTo(indices)); assertThat(action.aliases(), equalTo(aliases)); + if (mustExist) { + assertThat(action.mustExist(), equalTo(true)); + } } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestTests.java index d4b7075d280ba..844d98f13b6d9 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestTests.java @@ -33,10 +33,10 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/analyze/AnalyzeResponseTests.java index 769b48450168c..4dfb901ce0cfd 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/analyze/AnalyzeResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.analyze; import org.opensearch.action.admin.indices.analyze.AnalyzeAction.AnalyzeToken; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.test.RandomObjects; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java index 030ee6649bba5..1abd4c06e50e7 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java @@ -17,19 +17,19 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.index.store.remote.filecache.FileCacheTests; import org.opensearch.indices.IndicesService; import org.opensearch.node.Node; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.TransportService; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java index 15e04effe91c6..fec687f2faff4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -37,7 +37,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexResponseTests.java index 8949dd65d540f..bc092da6e72be 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexResponseTests.java @@ -35,14 +35,15 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.admin.indices.close.CloseIndexResponse.IndexResult; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.transport.ActionNotFoundTransportException; @@ -151,7 +152,7 @@ public void testToXContent() throws IOException { CloseIndexResponse closeIndexResponse = new CloseIndexResponse(true, true, Collections.singletonList(indexResult)); assertEquals( "{\"acknowledged\":true,\"shards_acknowledged\":true,\"indices\":{\"test\":{\"closed\":true}}}", - Strings.toString(XContentType.JSON, closeIndexResponse) + Strings.toString(MediaTypeRegistry.JSON, closeIndexResponse) ); CloseIndexResponse.ShardResult[] shards = new CloseIndexResponse.ShardResult[1]; @@ -168,7 +169,7 @@ public void testToXContent() throws IOException { + "\"failures\":[{\"node\":\"nodeId\",\"shard\":0,\"index\":\"test\",\"status\":\"INTERNAL_SERVER_ERROR\"," + "\"reason\":{\"type\":\"action_not_found_transport_exception\"," + "\"reason\":\"No handler for action [test]\"}}]}}}}}", - Strings.toString(XContentType.JSON, closeIndexResponse) + Strings.toString(MediaTypeRegistry.JSON, closeIndexResponse) ); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 55e4efd8b10df..ef26bc225b0c7 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -31,12 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.mockito.ArgumentCaptor; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; @@ -61,17 +55,23 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.Collections; import java.util.List; @@ -79,6 +79,11 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import org.mockito.ArgumentCaptor; + +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -91,9 +96,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportVerifyShardBeforeCloseActionTests extends OpenSearchTestCase { @@ -138,7 +140,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java index 8ecd60803b52d..49561d18f70f2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java @@ -34,9 +34,9 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; import org.junit.After; @@ -75,11 +75,11 @@ public void testSetSource() throws IOException { CreateIndexRequestBuilder builder = new CreateIndexRequestBuilder(this.testClient, CreateIndexAction.INSTANCE); OpenSearchParseException e = expectThrows(OpenSearchParseException.class, () -> { - builder.setSource("{\"" + KEY + "\" : \"" + VALUE + "\"}", XContentType.JSON); + builder.setSource("{\"" + KEY + "\" : \"" + VALUE + "\"}", MediaTypeRegistry.JSON); }); assertEquals(String.format(Locale.ROOT, "unknown key [%s] for create index", KEY), e.getMessage()); - builder.setSource("{\"settings\" : {\"" + KEY + "\" : \"" + VALUE + "\"}}", XContentType.JSON); + builder.setSource("{\"settings\" : {\"" + KEY + "\" : \"" + VALUE + "\"}}", MediaTypeRegistry.JSON); assertEquals(VALUE, builder.request().settings().get(KEY)); XContentBuilder xContent = XContentFactory.jsonBuilder() @@ -100,7 +100,7 @@ public void testSetSource() throws IOException { .endObject() .endObject(); doc.close(); - builder.setSource(docOut.toByteArray(), XContentType.JSON); + builder.setSource(docOut.toByteArray(), MediaTypeRegistry.JSON); assertEquals(VALUE, builder.request().settings().get(KEY)); Map<String, String> settingsMap = new HashMap<>(); @@ -117,7 +117,7 @@ public void testSetSettings() throws IOException { builder.setSettings(Settings.builder().put(KEY, VALUE)); assertEquals(VALUE, builder.request().settings().get(KEY)); - builder.setSettings("{\"" + KEY + "\" : \"" + VALUE + "\"}", XContentType.JSON); + builder.setSettings("{\"" + KEY + "\" : \"" + VALUE + "\"}", MediaTypeRegistry.JSON); assertEquals(VALUE, builder.request().settings().get(KEY)); builder.setSettings(Settings.builder().put(KEY, VALUE)); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java index e4114a622602c..89e072d783747 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -34,16 +34,15 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.MapperService; import org.opensearch.test.OpenSearchTestCase; @@ -51,15 +50,19 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class CreateIndexRequestTests extends OpenSearchTestCase { public void testSerialization() throws IOException { CreateIndexRequest request = new CreateIndexRequest("foo"); - String mapping = Strings.toString( - JsonXContent.contentBuilder().startObject().startObject(MapperService.SINGLE_MAPPING_NAME).endObject().endObject() - ); + String mapping = JsonXContent.contentBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .endObject() + .endObject() + .toString(); request.mapping(mapping); try (BytesStreamOutput output = new BytesStreamOutput()) { @@ -92,7 +95,7 @@ public void testTopLevelKeys() { CreateIndexRequest request = new CreateIndexRequest(); OpenSearchParseException e = expectThrows( OpenSearchParseException.class, - () -> { request.source(createIndex, XContentType.JSON); } + () -> { request.source(createIndex, MediaTypeRegistry.JSON); } ); assertEquals("unknown key [FOO_SHOULD_BE_ILLEGAL_HERE] for create index", e.getMessage()); } @@ -101,7 +104,7 @@ public void testMappingKeyedByType() throws IOException { CreateIndexRequest request1 = new CreateIndexRequest("foo"); CreateIndexRequest request2 = new CreateIndexRequest("bar"); { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject() .startObject("properties") .startObject("field1") @@ -117,7 +120,7 @@ public void testMappingKeyedByType() throws IOException { .endObject() .endObject(); request1.mapping(builder); - builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject() .startObject(MapperService.SINGLE_MAPPING_NAME) .startObject("properties") @@ -140,7 +143,7 @@ public void testMappingKeyedByType() throws IOException { } public void testSettingsType() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject().startArray("settings").endArray().endObject(); CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(); @@ -148,6 +151,20 @@ public void testSettingsType() throws IOException { assertThat(e.getMessage(), equalTo("key [settings] must be an object")); } + public void testToString() throws IOException { + CreateIndexRequest request = new CreateIndexRequest("foo"); + String mapping = JsonXContent.contentBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .endObject() + .endObject() + .toString(); + request.mapping(mapping); + + assertThat(request.toString(), containsString("index='foo'")); + assertThat(request.toString(), containsString("mappings='{\"_doc\":{}}'")); + } + public static void assertMappingsEqual(Map<String, String> expected, Map<String, String> actual) throws IOException { assertEquals(expected.keySet(), actual.keySet()); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexResponseTests.java index d036a65bcd198..d989cbc2b3488 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -32,10 +32,10 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; @@ -84,13 +84,13 @@ protected CreateIndexResponse doParseInstance(XContentParser parser) { public void testToXContent() { CreateIndexResponse response = new CreateIndexResponse(true, false, "index_name"); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":\"index_name\"}", output); } public void testToAndFromXContentIndexNull() throws IOException { CreateIndexResponse response = new CreateIndexResponse(true, false, null); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":null}", output); try (XContentParser parser = createParser(JsonXContent.jsonXContent, output)) { CreateIndexResponse parsedResponse = CreateIndexResponse.fromXContent(parser); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsResponseTests.java index 5e4e2346a6e5b..c83263e4e22c6 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DataStreamsStatsResponseTests.java @@ -35,7 +35,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.AbstractWireSerializingTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java index 5c7dc04cc6753..25ab80ebf379f 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamRequestTests.java @@ -42,9 +42,9 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataDeleteIndexService; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.Index; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java index a8db27b1025eb..c1b1608d5f7a8 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsRequestTests.java @@ -37,9 +37,9 @@ import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.IndexNotFoundException; import org.opensearch.test.AbstractWireSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsResponseTests.java index da38639650a61..20e77dc68da09 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/datastream/GetDataStreamsResponseTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.DataStreamTests; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; + import java.util.ArrayList; import java.util.List; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java index 87ba6110447c1..a80141c52b6b4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/ForceMergeRequestTests.java @@ -31,21 +31,137 @@ package org.opensearch.action.admin.indices.forcemerge; +import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; public class ForceMergeRequestTests extends OpenSearchTestCase { public void testDescription() { ForceMergeRequest request = new ForceMergeRequest(); - assertEquals("Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest("shop", "blog"); - assertEquals("Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true]", request.getDescription()); + assertEquals( + "Force-merge indices [shop, blog], maxSegments[-1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]", + request.getDescription() + ); request = new ForceMergeRequest(); request.maxNumSegments(12); request.onlyExpungeDeletes(true); request.flush(false); - assertEquals("Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false]", request.getDescription()); + request.primaryOnly(true); + assertEquals( + "Force-merge indices [], maxSegments[12], onlyExpungeDeletes[true], flush[false], primaryOnly[true]", + request.getDescription() + ); + } + + public void testToString() { + ForceMergeRequest request = new ForceMergeRequest(); + assertEquals("ForceMergeRequest{maxNumSegments=-1, onlyExpungeDeletes=false, flush=true, primaryOnly=false}", request.toString()); + + request = new ForceMergeRequest(); + request.maxNumSegments(12); + request.onlyExpungeDeletes(true); + request.flush(false); + request.primaryOnly(true); + assertEquals("ForceMergeRequest{maxNumSegments=12, onlyExpungeDeletes=true, flush=false, primaryOnly=true}", request.toString()); + } + + public void testSerialization() throws Exception { + final ForceMergeRequest request = randomRequest(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + request.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + deserializedRequest = new ForceMergeRequest(in); + } + assertEquals(request.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(request.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(request.flush(), deserializedRequest.flush()); + assertEquals(request.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(request.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + + public void testBwcSerialization() throws Exception { + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(compatibleVersion); + sample.writeTo(out); + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(Version.CURRENT); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + } + } + } + + { + final ForceMergeRequest sample = randomRequest(); + final Version compatibleVersion = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + sample.getParentTask().writeTo(out); + out.writeStringArray(sample.indices()); + sample.indicesOptions().writeIndicesOptions(out); + out.writeInt(sample.maxNumSegments()); + out.writeBoolean(sample.onlyExpungeDeletes()); + out.writeBoolean(sample.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(sample.primaryOnly()); + } + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + out.writeString(sample.forceMergeUUID()); + } else { + out.writeOptionalString(sample.forceMergeUUID()); + } + + final ForceMergeRequest deserializedRequest; + try (StreamInput in = out.bytes().streamInput()) { + in.setVersion(compatibleVersion); + deserializedRequest = new ForceMergeRequest(in); + } + + assertEquals(sample.maxNumSegments(), deserializedRequest.maxNumSegments()); + assertEquals(sample.onlyExpungeDeletes(), deserializedRequest.onlyExpungeDeletes()); + assertEquals(sample.flush(), deserializedRequest.flush()); + if (compatibleVersion.onOrAfter(Version.V_3_0_0)) { + assertEquals(sample.primaryOnly(), deserializedRequest.primaryOnly()); + } + assertEquals(sample.forceMergeUUID(), deserializedRequest.forceMergeUUID()); + + } + } + } + + private ForceMergeRequest randomRequest() { + ForceMergeRequest request = new ForceMergeRequest(); + if (randomBoolean()) { + request.maxNumSegments(randomIntBetween(1, 10)); + } + request.onlyExpungeDeletes(true); + request.flush(randomBoolean()); + request.primaryOnly(randomBoolean()); + return request; } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java index 821fb60c82a25..b09e592922ed9 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/forcemerge/RestForceMergeActionTests.java @@ -33,10 +33,10 @@ package org.opensearch.action.admin.indices.forcemerge; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.admin.indices.RestForceMergeAction; import org.opensearch.test.rest.FakeRestChannel; @@ -62,7 +62,7 @@ public void testBodyRejection() throws Exception { String json = JsonXContent.contentBuilder().startObject().field("max_num_segments", 1).endObject().toString(); final FakeRestRequest request = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent( new BytesArray(json), - XContentType.JSON + MediaTypeRegistry.JSON ).withPath("/_forcemerge").build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java index 395cf375127a8..2d9ec2b6d3c02 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ClusterStateCreationUtils; @@ -44,8 +43,10 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java index 3719c09719081..89d47328a08ed 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java @@ -37,9 +37,9 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponseTests; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.test.AbstractWireSerializingTestCase; @@ -47,9 +47,9 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.Map; import java.util.List; import java.util.Locale; +import java.util.Map; public class GetIndexResponseTests extends AbstractWireSerializingTestCase<GetIndexResponse> { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java index 4c86b18201f1f..23cf0ce14a8bd 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; @@ -69,7 +69,7 @@ public void testNullFieldMappingToXContent() { Map<String, Map<String, FieldMappingMetadata>> mappings = new HashMap<>(); mappings.put("index", Collections.emptyMap()); GetFieldMappingsResponse response = new GetFieldMappingsResponse(mappings); - assertEquals("{\"index\":{\"mappings\":{}}}", Strings.toString(XContentType.JSON, response)); + assertEquals("{\"index\":{\"mappings\":{}}}", Strings.toString(MediaTypeRegistry.JSON, response)); } @Override diff --git a/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java index 8ab4598e88af8..377e2bd0c9397 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestTests.java @@ -40,16 +40,17 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.test.OpenSearchTestCase; @@ -72,12 +73,12 @@ public void testValidation() { assertNotNull("source validation should fail", ex); assertTrue(ex.getMessage().contains("source is missing")); - r.source("", XContentType.JSON); + r.source("", MediaTypeRegistry.JSON); ex = r.validate(); assertNotNull("source validation should fail", ex); assertTrue(ex.getMessage().contains("source is empty")); - r.source("somevalidmapping", XContentType.JSON); + r.source("somevalidmapping", MediaTypeRegistry.JSON); ex = r.validate(); assertNull("validation should succeed", ex); @@ -113,7 +114,7 @@ public void testToXContent() throws IOException { mapping.endObject(); request.source(mapping); - String actualRequestBody = Strings.toString(XContentType.JSON, request); + String actualRequestBody = Strings.toString(MediaTypeRegistry.JSON, request); String expectedRequestBody = "{\"properties\":{\"email\":{\"type\":\"text\"}}}"; assertEquals(expectedRequestBody, actualRequestBody); } @@ -121,7 +122,7 @@ public void testToXContent() throws IOException { public void testToXContentWithEmptySource() throws IOException { PutMappingRequest request = new PutMappingRequest("foo"); - String actualRequestBody = Strings.toString(XContentType.JSON, request); + String actualRequestBody = Strings.toString(MediaTypeRegistry.JSON, request); String expectedRequestBody = "{}"; assertEquals(expectedRequestBody, actualRequestBody); } @@ -143,8 +144,8 @@ public void testToAndFromXContent() throws IOException { private void assertMappingsEqual(String expected, String actual) throws IOException { try ( - XContentParser expectedJson = createParser(XContentType.JSON.xContent(), expected); - XContentParser actualJson = createParser(XContentType.JSON.xContent(), actual) + XContentParser expectedJson = createParser(MediaTypeRegistry.JSON.xContent(), expected); + XContentParser actualJson = createParser(MediaTypeRegistry.JSON.xContent(), actual) ) { assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java index 3e3042b58a8d5..d230c1e04ede0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/resolve/ResolveIndexResponseTests.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.indices.resolve.ResolveIndexAction.ResolvedDataStream; import org.opensearch.action.admin.indices.resolve.ResolveIndexAction.ResolvedIndex; import org.opensearch.action.admin.indices.resolve.ResolveIndexAction.Response; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/ConditionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/ConditionTests.java index db61cac534f89..3d32d2e8365e7 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/ConditionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/ConditionTests.java @@ -32,11 +32,11 @@ package org.opensearch.action.admin.indices.rollover; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 89cdb84b252e3..da9a8b928a779 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -60,7 +60,6 @@ import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.IndexScopedSettings; @@ -68,8 +67,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.env.Environment; import org.opensearch.core.index.Index; +import org.opensearch.env.Environment; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DateFieldMapper; @@ -111,10 +110,10 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.AdditionalAnswers.returnsFirstArg; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.same; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.same; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; @@ -426,7 +425,7 @@ public void testDataStreamValidation() throws IOException { equalTo("aliases, mappings, and index settings may not be specified when rolling over a data stream") ); - String mapping = Strings.toString(JsonXContent.contentBuilder().startObject().startObject("_doc").endObject().endObject()); + String mapping = JsonXContent.contentBuilder().startObject().startObject("_doc").endObject().endObject().toString(); CreateIndexRequest mappingReq = new CreateIndexRequest().mapping(mapping); exception = expectThrows( IllegalArgumentException.class, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java index 4b23a68f8b383..a410c5610dc69 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -34,27 +34,28 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParseException; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.XContentTestUtils; +import org.junit.Before; import java.io.IOException; -import org.junit.Before; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -146,7 +147,7 @@ public void testTypelessMappingParsing() throws Exception { String mapping = createIndexRequest.mappings(); assertNotNull(mapping); - Map<String, Object> parsedMapping = XContentHelper.convertToMap(new BytesArray(mapping), false, XContentType.JSON).v2(); + Map<String, Object> parsedMapping = XContentHelper.convertToMap(new BytesArray(mapping), false, MediaTypeRegistry.JSON).v2(); @SuppressWarnings("unchecked") Map<String, Object> properties = (Map<String, Object>) parsedMapping.get(MapperService.SINGLE_MAPPING_NAME); @@ -179,7 +180,7 @@ public void testSerialize() throws Exception { public void testUnknownFields() throws IOException { final RolloverRequest request = new RolloverRequest(); XContentType xContentType = randomFrom(XContentType.values()); - final XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + final XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType); builder.startObject(); { builder.startObject("conditions"); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverResponseTests.java index 233e6166d1caf..8f0d508f23ead 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.rollover; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 65d7c0fc62629..fb4e955e95ab9 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.rollover; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexStats; @@ -59,10 +58,12 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.engine.SegmentsStats; @@ -74,7 +75,6 @@ import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.shard.IndexingStats; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.StoreStats; import org.opensearch.index.warmer.WarmerStats; @@ -83,7 +83,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; -import org.mockito.ArgumentCaptor; import java.nio.file.Path; import java.util.ArrayList; @@ -92,14 +91,16 @@ import java.util.Map; import java.util.Set; +import org.mockito.ArgumentCaptor; + import static java.util.Collections.emptyList; import static org.opensearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 402d6439c4838..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,11 +34,11 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.Before; import java.util.Collection; @@ -56,7 +56,7 @@ protected Collection<Class<? extends Plugin>> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java index 0d063c5969ff5..f2b6688716e70 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.settings.get; -import org.opensearch.action.ActionListener; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.replication.ClusterStateCreationUtils; @@ -44,7 +43,9 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -108,7 +109,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestTests.java index f56bab4e5caf7..f3d3b6cff4b06 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.settings.get; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponseTests.java index 154003e036958..a596f1f76d190 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsResponseTests.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.settings.get; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java index ae632d22a3310..1b859923b7e22 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java @@ -33,11 +33,11 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index e1838f7605816..eb2089febd8a1 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -36,12 +36,12 @@ import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.NodeDisconnectedException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeRequestTests.java index 3d21af584ea04..6d83bebe4b994 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -36,11 +36,12 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexRequestTests; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.RandomCreateIndexGenerator; import org.opensearch.test.OpenSearchTestCase; @@ -78,7 +79,7 @@ private void runTestCopySettingsValidation(final Boolean copySettings, final Con public void testToXContent() throws IOException { { ResizeRequest request = new ResizeRequest("target", "source"); - String actualRequestBody = Strings.toString(XContentType.JSON, request); + String actualRequestBody = Strings.toString(MediaTypeRegistry.JSON, request); assertEquals("{\"settings\":{},\"aliases\":{}}", actualRequestBody); } { @@ -93,7 +94,7 @@ public void testToXContent() throws IOException { settings.put(SETTING_NUMBER_OF_SHARDS, 10); target.settings(settings); request.setTargetIndex(target); - String actualRequestBody = Strings.toString(XContentType.JSON, request); + String actualRequestBody = Strings.toString(MediaTypeRegistry.JSON, request); String expectedRequestBody = "{\"settings\":{\"index\":{\"number_of_shards\":\"10\"}}," + "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\",\"is_write_index\":true}}}"; assertEquals(expectedRequestBody, actualRequestBody); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeResponseTests.java index 73520f7329a00..f984c969e4b70 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/ResizeResponseTests.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; @@ -42,7 +42,7 @@ public class ResizeResponseTests extends AbstractSerializingTestCase<ResizeRespo public void testToXContent() { ResizeResponse response = new ResizeResponse(true, false, "index_name"); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals("{\"acknowledged\":true,\"shards_acknowledged\":false,\"index\":\"index_name\"}", output); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index ef49820192e9b..848df5f8e4979 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -52,7 +52,7 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.StoreStats; import org.opensearch.snapshots.EmptySnapshotsInfoService; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 4b1006a10af79..2b79e523fc620 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -36,10 +36,10 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.UUIDs; -import org.opensearch.core.xcontent.ToXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.shard.ShardPath; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java index 8d16082e77533..0c4ad757efc1f 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsTests.java @@ -32,15 +32,15 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.action.ActionFuture; import org.opensearch.action.index.IndexResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.common.Strings; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; import org.opensearch.index.engine.CommitStats; import org.opensearch.index.engine.SegmentsStats; @@ -147,7 +147,7 @@ public void testRefreshListeners() throws Exception { } if (end - System.nanoTime() < 0) { logger.info("timed out"); - fail("didn't get a refresh listener in time: " + Strings.toString(XContentType.JSON, common)); + fail("didn't get a refresh listener in time: " + Strings.toString(MediaTypeRegistry.JSON, common)); } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponseTests.java index 8b41eeb90b9e4..505b89b678227 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java index 077b8360edf47..501d17141bd6f 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java @@ -34,12 +34,12 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.ComposableIndexTemplateTests; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Template; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java index 5664a1909cceb..328e20a9ab508 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java @@ -34,12 +34,12 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.ComposableIndexTemplateTests; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Template; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java index 348015958a1e5..093228f5f1430 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateRequestTests.java @@ -33,12 +33,12 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.ComposableIndexTemplateTests; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Template; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index dbca883506b0d..d7876ea71045a 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -35,10 +35,11 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; import org.opensearch.test.AbstractXContentTestCase; @@ -76,7 +77,7 @@ public void testMappingKeyedByType() throws IOException { PutIndexTemplateRequest request1 = new PutIndexTemplateRequest("foo"); PutIndexTemplateRequest request2 = new PutIndexTemplateRequest("bar"); { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject() .startObject("properties") .startObject("field1") @@ -92,7 +93,7 @@ public void testMappingKeyedByType() throws IOException { .endObject() .endObject(); request1.mapping(builder); - builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject() .startObject("properties") .startObject("field1") @@ -114,8 +115,8 @@ public void testMappingKeyedByType() throws IOException { request1 = new PutIndexTemplateRequest("foo"); request2 = new PutIndexTemplateRequest("bar"); String nakedMapping = "{\"properties\": {\"foo\": {\"type\": \"integer\"}}}"; - request1.mapping(nakedMapping, XContentType.JSON); - request2.mapping(nakedMapping, XContentType.JSON); + request1.mapping(nakedMapping, MediaTypeRegistry.JSON); + request2.mapping(nakedMapping, MediaTypeRegistry.JSON); assertEquals(request1.mappings(), request2.mappings()); } { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryActionTests.java index c1c0e9517607a..40790d05c89f5 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/validate/query/TransportValidateQueryActionTests.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.validate.query; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java index 554acd4f92ad0..4cea2c90cff33 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java @@ -34,8 +34,8 @@ import org.opensearch.OpenSearchException; import org.opensearch.core.action.support.DefaultShardOperationFailedException; -import org.opensearch.common.Strings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractBroadcastResponseTestCase; @@ -115,7 +115,7 @@ protected ValidateQueryResponse createTestInstance( @Override public void testToXContent() { ValidateQueryResponse response = createTestInstance(10, 10, 0, new ArrayList<>()); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0},\"valid\":true}", output); } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java new file mode 100644 index 0000000000000..e2211bb120366 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/CreateViewRequestTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class CreateViewRequestTests extends AbstractWireSerializingTestCase<CreateViewAction.Request> { + + @Override + protected Writeable.Reader<CreateViewAction.Request> instanceReader() { + return CreateViewAction.Request::new; + } + + @Override + protected CreateViewAction.Request createTestInstance() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomList(5, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void testValidateRequest() { + final CreateViewAction.Request request = new CreateViewAction.Request( + "my-view", + "this is a description", + List.of(new CreateViewAction.Request.Target("my-indices-*")) + ); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final CreateViewAction.Request request = new CreateViewAction.Request("", null, null); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null", "targets cannot be empty")); + } + + public void testSizeThresholds() { + final String validName = randomAlphaOfLength(8); + final String validDescription = randomAlphaOfLength(20); + final int validTargetLength = randomIntBetween(1, 5); + final String validIndexPattern = randomAlphaOfLength(8); + + final CreateViewAction.Request requestNameTooBig = new CreateViewAction.Request( + randomAlphaOfLength(65), + validDescription, + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestNameTooBig.validate().validationErrors(), + contains("name must be less than 64 characters in length") + ); + + final CreateViewAction.Request requestDescriptionTooBig = new CreateViewAction.Request( + validName, + randomAlphaOfLength(257), + randomList(1, validTargetLength, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat( + requestDescriptionTooBig.validate().validationErrors(), + contains("description must be less than 256 characters in length") + ); + + final CreateViewAction.Request requestTargetsSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(26, 26, () -> new CreateViewAction.Request.Target(validIndexPattern)) + ); + MatcherAssert.assertThat(requestTargetsSize.validate().validationErrors(), contains("view cannot have more than 25 targets")); + + final CreateViewAction.Request requestTargetsIndexPatternSize = new CreateViewAction.Request( + validName, + validDescription, + randomList(1, 1, () -> new CreateViewAction.Request.Target(randomAlphaOfLength(65))) + ); + MatcherAssert.assertThat( + requestTargetsIndexPatternSize.validate().validationErrors(), + contains("target index pattern must be less than 64 characters in length") + ); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java new file mode 100644 index 0000000000000..29305e3dfb92f --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/DeleteViewRequestTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.nullValue; + +public class DeleteViewRequestTests extends AbstractWireSerializingTestCase<DeleteViewAction.Request> { + + @Override + protected Writeable.Reader<DeleteViewAction.Request> instanceReader() { + return DeleteViewAction.Request::new; + } + + @Override + protected DeleteViewAction.Request createTestInstance() { + return new DeleteViewAction.Request(randomAlphaOfLength(8)); + } + + public void testValidateRequest() { + final DeleteViewAction.Request request = new DeleteViewAction.Request("my-view"); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final DeleteViewAction.Request request = new DeleteViewAction.Request(""); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors(), contains("name cannot be empty or null")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java new file mode 100644 index 0000000000000..44dfbe5f1d781 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/GetViewResponseTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.cluster.metadata.View; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +import java.util.TreeSet; + +public class GetViewResponseTests extends AbstractWireSerializingTestCase<GetViewAction.Response> { + + @Override + protected Writeable.Reader<GetViewAction.Response> instanceReader() { + return GetViewAction.Response::new; + } + + @Override + protected GetViewAction.Response createTestInstance() { + return new GetViewAction.Response( + new View( + randomAlphaOfLength(8), + randomAlphaOfLength(8), + randomLong(), + randomLong(), + new TreeSet<>(randomList(5, () -> new View.Target(randomAlphaOfLength(8)))) + ) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java new file mode 100644 index 0000000000000..80a2827d158bb --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesRequestTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import static org.hamcrest.Matchers.nullValue; + +public class ListViewNamesRequestTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Request> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Request> instanceReader() { + return ListViewNamesAction.Request::new; + } + + @Override + protected ListViewNamesAction.Request createTestInstance() { + return new ListViewNamesAction.Request(); + } + + public void testValidateRequest() { + final ListViewNamesAction.Request request = new ListViewNamesAction.Request(); + + MatcherAssert.assertThat(request.validate(), nullValue()); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java new file mode 100644 index 0000000000000..ee8409fe3c805 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ListViewNamesResponseTests.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; + +public class ListViewNamesResponseTests extends AbstractWireSerializingTestCase<ListViewNamesAction.Response> { + + @Override + protected Writeable.Reader<ListViewNamesAction.Response> instanceReader() { + return ListViewNamesAction.Response::new; + } + + @Override + protected ListViewNamesAction.Response createTestInstance() { + return new ListViewNamesAction.Response(randomList(5, () -> randomAlphaOfLength(8))); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java new file mode 100644 index 0000000000000..d49c0c1a8f2bd --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/SearchViewRequestTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.test.AbstractWireSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class SearchViewRequestTests extends AbstractWireSerializingTestCase<SearchViewAction.Request> { + + @Override + protected Writeable.Reader<SearchViewAction.Request> instanceReader() { + return SearchViewAction.Request::new; + } + + @Override + protected SearchViewAction.Request createTestInstance() { + try { + return new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest()); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + public void testValidateRequest() throws IOException { + final SearchViewAction.Request request = new SearchViewAction.Request("my-view", new SearchRequest()); + MatcherAssert.assertThat(request.validate(), nullValue()); + } + + public void testValidateRequestWithoutName() { + final SearchViewAction.Request request = new SearchViewAction.Request((String) null, new SearchRequest()); + final ActionRequestValidationException e = request.validate(); + + MatcherAssert.assertThat(e.validationErrors().size(), equalTo(1)); + MatcherAssert.assertThat(e.validationErrors().get(0), containsString("View is required")); + } + +} diff --git a/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java new file mode 100644 index 0000000000000..91813e1336cf2 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/view/ViewServiceTest.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.view; + +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.search.SearchAction; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.View; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +public class ViewServiceTest { + + private final View.Target typicalTarget = new View.Target(randomAlphaOfLength(8)); + private final View typicalView = new View( + "view-" + randomAlphaOfLength(8), + "description " + randomAlphaOfLength(20), + -1L, + -1L, + Set.of(typicalTarget) + ); + + private ClusterService clusterService; + private NodeClient nodeClient; + private final AtomicLong currentTime = new AtomicLong(0); + private LongSupplier timeProvider = currentTime::longValue; + private ViewService viewService; + + @Before + public void before() { + clusterService = mock(ClusterService.class); + nodeClient = mock(NodeClient.class); + timeProvider = mock(LongSupplier.class); + doAnswer(invocation -> currentTime.get()).when(timeProvider).getAsLong(); + viewService = spy(new ViewService(clusterService, nodeClient, timeProvider)); + } + + @After + public void after() { + verifyNoMoreInteractions(timeProvider, clusterService, nodeClient); + } + + private CreateViewAction.Request createTypicalViewRequest() { + return new CreateViewAction.Request( + randomAlphaOfLength(8), + randomAlphaOfLength(20), + List.of(new CreateViewAction.Request.Target(randomAlphaOfLength(8))) + ); + } + + public void createView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.createView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("create_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.updateView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("update_view_task"), any()); + verify(timeProvider).getAsLong(); + } + + public void updateView_doesNotExist() { + final var request = createTypicalViewRequest(); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final Exception ex = assertThrows(ResourceNotFoundException.class, () -> viewService.updateView(request, listener)); + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void deleteView() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.deleteView(request, listener); + + verify(clusterService).submitStateUpdateTask(eq("delete_view_task"), any()); + } + + public void deleteView_doesNotExist() { + final var request = new DeleteViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.deleteView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void getView() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.getView(request, listener); + + verify(listener).onResponse(any()); + } + + public void getView_doesNotExist() { + final var request = new GetViewAction.Request(randomAlphaOfLength(8)); + final var listener = mock(ActionListener.class); + doThrow(new ResourceNotFoundException("abc")).when(viewService).getViewOrThrowException(anyString()); + + final ResourceNotFoundException ex = assertThrows(ResourceNotFoundException.class, () -> viewService.getView(request, listener)); + + MatcherAssert.assertThat(ex.getMessage(), equalTo("abc")); + } + + public void listViewNames() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).metadata( + new Metadata.Builder().views(Map.of(typicalView.getName(), typicalView)).build() + ).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void listViewNames_noViews() { + final var clusterState = new ClusterState.Builder(new ClusterName(randomAlphaOfLength(8))).build(); + final var listener = mock(ActionListener.class); + when(clusterService.state()).thenReturn(clusterState); + + viewService.listViewNames(listener); + + verify(clusterService).state(); + verify(listener).onResponse(any()); + } + + public void searchView() { + final var request = spy(new SearchViewAction.Request(randomAlphaOfLength(8), new SearchRequest())); + final var listener = mock(ActionListener.class); + setGetViewOrThrowExceptionToReturnTypicalView(); + + viewService.searchView(request, listener); + + verify(nodeClient).executeLocally(eq(SearchAction.INSTANCE), any(), any(ActionListener.class)); + verify(request).indices(typicalTarget.getIndexPattern()); + } + + private void setGetViewOrThrowExceptionToReturnTypicalView() { + doAnswer(invocation -> typicalView).when(viewService).getViewOrThrowException(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java index 708672193665b..cdf884ae988b3 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkItemResponseTests.java @@ -41,11 +41,11 @@ import org.opensearch.action.index.IndexResponseTests; import org.opensearch.action.update.UpdateResponse; import org.opensearch.action.update.UpdateResponseTests; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java index 4330dd68990dc..de096aee45bf9 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java @@ -40,9 +40,9 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkProcessorTests.java index 40d0df61cbb9f..6ff3ba473b5e9 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkProcessorTests.java @@ -35,14 +35,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexRequest; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.TestThreadPool; @@ -208,7 +208,7 @@ public void testConcurrentExecutions() throws Exception { if (randomBoolean()) { bulkProcessor.add(indexRequest); } else { - bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + bulkProcessor.add(bytesReference, null, null, MediaTypeRegistry.JSON); } } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); @@ -334,7 +334,7 @@ public void testConcurrentExecutionsWithFlush() throws Exception { if (randomBoolean()) { bulkProcessor.add(indexRequest); } else { - bulkProcessor.add(bytesReference, null, null, XContentType.JSON); + bulkProcessor.add(bytesReference, null, null, MediaTypeRegistry.JSON); } } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java index e68d7d7d0d447..fe6dfb7a58eae 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java @@ -32,12 +32,12 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; @@ -58,7 +58,7 @@ public void testBulkRequestModifier() { int numRequests = scaledRandomIntBetween(8, 64); BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numRequests; i++) { - bulkRequest.add(new IndexRequest("_index").id(String.valueOf(i)).source("{}", XContentType.JSON)); + bulkRequest.add(new IndexRequest("_index").id(String.valueOf(i)).source("{}", MediaTypeRegistry.JSON)); } CaptureActionListener actionListener = new CaptureActionListener(); TransportBulkAction.BulkRequestModifier bulkRequestModifier = new TransportBulkAction.BulkRequestModifier(bulkRequest); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index e0e9ea73d9291..4f07c098b0869 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; @@ -49,7 +49,7 @@ public void testIndexRequest() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, null, false, MediaTypeRegistry.JSON, indexRequest -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); @@ -58,17 +58,17 @@ public void testIndexRequest() throws IOException { }, req -> fail(), req -> fail()); assertTrue(parsed.get()); - parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail()); request = new BytesArray("{ \"index\":{ \"_id\": \"bar\", \"require_alias\": true } }\n{}\n"); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, null, false, MediaTypeRegistry.JSON, indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail()); request = new BytesArray("{ \"index\":{ \"_id\": \"bar\", \"require_alias\": false } }\n{}\n"); - parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, indexRequest -> { + parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, indexRequest -> { assertFalse(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail()); } @@ -77,7 +77,7 @@ public void testDeleteRequest() throws IOException { BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), deleteRequest -> { + parser.parse(request, "foo", null, null, null, null, false, MediaTypeRegistry.JSON, req -> fail(), req -> fail(), deleteRequest -> { assertFalse(parsed.get()); assertEquals("foo", deleteRequest.index()); assertEquals("bar", deleteRequest.id()); @@ -90,7 +90,7 @@ public void testUpdateRequest() throws IOException { BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, null, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { assertFalse(parsed.get()); assertEquals("foo", updateRequest.index()); assertEquals("bar", updateRequest.id()); @@ -99,17 +99,17 @@ public void testUpdateRequest() throws IOException { }, req -> fail()); assertTrue(parsed.get()); - parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, req -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail()); request = new BytesArray("{ \"update\":{ \"_id\": \"bar\", \"require_alias\": true } }\n{}\n"); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, null, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail()); request = new BytesArray("{ \"update\":{ \"_id\": \"bar\", \"require_alias\": false } }\n{}\n"); - parser.parse(request, "foo", null, null, null, true, false, XContentType.JSON, req -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, req -> fail()); } @@ -127,7 +127,7 @@ public void testBarfOnLackOfTrailingNewline() { null, null, false, - XContentType.JSON, + MediaTypeRegistry.JSON, indexRequest -> fail(), req -> fail(), req -> fail() @@ -142,7 +142,19 @@ public void testFailOnExplicitIndex() { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), req -> fail()) + () -> parser.parse( + request, + null, + null, + null, + null, + null, + false, + MediaTypeRegistry.JSON, + req -> fail(), + req -> fail(), + req -> fail() + ) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); } @@ -162,7 +174,7 @@ public void testParseDeduplicatesParameterStrings() throws IOException { null, null, true, - XContentType.JSON, + MediaTypeRegistry.JSON, indexRequest -> indexRequests.add(indexRequest), req -> fail(), req -> fail() @@ -189,7 +201,7 @@ public void testFailOnUnsupportedAction() { null, true, false, - XContentType.JSON, + MediaTypeRegistry.JSON, req -> fail(), req -> fail(), req -> fail() diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java index c674be8dbba38..b3a5cff0041a7 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestTests.java @@ -39,14 +39,14 @@ import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.Requests; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.Script; import org.opensearch.test.OpenSearchTestCase; @@ -71,7 +71,7 @@ public class BulkRequestTests extends OpenSearchTestCase { public void testSimpleBulk1() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk.json"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(3)); assertThat(((IndexRequest) bulkRequest.requests().get(0)).source(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }"))); assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class)); @@ -81,13 +81,13 @@ public void testSimpleBulk1() throws Exception { public void testSimpleBulkWithCarriageReturn() throws Exception { String bulkAction = "{ \"index\":{\"_index\":\"test\",\"_id\":\"1\"} }\r\n{ \"field1\" : \"value1\" }\r\n"; BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(1)); assertThat(((IndexRequest) bulkRequest.requests().get(0)).source(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }"))); Map<String, Object> sourceMap = XContentHelper.convertToMap( ((IndexRequest) bulkRequest.requests().get(0)).source(), false, - XContentType.JSON + MediaTypeRegistry.JSON ).v2(); assertEquals("value1", sourceMap.get("field1")); } @@ -95,21 +95,21 @@ public void testSimpleBulkWithCarriageReturn() throws Exception { public void testSimpleBulk2() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk2.json"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(3)); } public void testSimpleBulk3() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk3.json"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(3)); } public void testSimpleBulk4() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk4.json"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(4)); assertThat(bulkRequest.requests().get(0).id(), equalTo("1")); assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2)); @@ -131,12 +131,12 @@ public void testBulkAllowExplicitIndex() throws Exception { String bulkAction1 = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk.json"); Exception ex = expectThrows( Exception.class, - () -> new BulkRequest().add(new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, false, XContentType.JSON) + () -> new BulkRequest().add(new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, false, MediaTypeRegistry.JSON) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk5.json"); - new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", false, XContentType.JSON); + new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", false, MediaTypeRegistry.JSON); } public void testBulkAddIterable() { @@ -157,7 +157,7 @@ public void testSimpleBulk6() throws Exception { BulkRequest bulkRequest = new BulkRequest(); ParsingException exc = expectThrows( ParsingException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertThat(exc.getMessage(), containsString("Unknown key for a VALUE_STRING in [hello]")); } @@ -167,7 +167,7 @@ public void testSimpleBulk7() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertThat( exc.getMessage(), @@ -180,7 +180,7 @@ public void testSimpleBulk8() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertThat(exc.getMessage(), containsString("Action/metadata line [3] contains an unknown parameter [_foo]")); } @@ -190,7 +190,7 @@ public void testSimpleBulk9() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertThat( exc.getMessage(), @@ -201,7 +201,7 @@ public void testSimpleBulk9() throws Exception { public void testSimpleBulk10() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk10.json"); BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(9)); } @@ -210,7 +210,7 @@ public void testBulkActionShouldNotContainArray() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertEquals( exc.getMessage(), @@ -240,7 +240,7 @@ public void testBulkEmptyObject() throws Exception { BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertThat( exc.getMessage(), @@ -256,8 +256,8 @@ public void testBulkRequestWithRefresh() throws Exception { // We force here a "type is missing" validation error bulkRequest.add(new DeleteRequest("index", "id")); bulkRequest.add(new DeleteRequest("index", "id").setRefreshPolicy(RefreshPolicy.IMMEDIATE)); - bulkRequest.add(new UpdateRequest("index", "id").doc("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); - bulkRequest.add(new IndexRequest("index").id("id").source("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new UpdateRequest("index", "id").doc("{}", MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + bulkRequest.add(new IndexRequest("index").id("id").source("{}", MediaTypeRegistry.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE)); ActionRequestValidationException validate = bulkRequest.validate(); assertThat(validate, notNullValue()); assertThat(validate.validationErrors(), not(empty())); @@ -340,7 +340,7 @@ public void testSmileIsSupported() throws IOException { XContentType xContentType = XContentType.SMILE; BytesReference data; try (BytesStreamOutput out = new BytesStreamOutput()) { - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, out)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType, out)) { builder.startObject(); builder.startObject("index"); builder.field("_index", "index"); @@ -349,7 +349,7 @@ public void testSmileIsSupported() throws IOException { builder.endObject(); } out.write(xContentType.xContent().streamSeparator()); - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, out)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType, out)) { builder.startObject(); builder.field("field", "value"); builder.endObject(); @@ -375,7 +375,7 @@ public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException XContentType xContentType = XContentType.SMILE; BytesReference data; try (BytesStreamOutput out = new BytesStreamOutput()) { - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, out)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType, out)) { builder.startObject(); builder.startObject("update"); builder.field("_index", "index"); @@ -386,7 +386,7 @@ public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException builder.endObject(); } out.write(xContentType.xContent().streamSeparator()); - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, out)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType, out)) { builder.startObject(); builder.startObject("doc").endObject(); Map<String, Object> values = new HashMap<>(); @@ -408,7 +408,7 @@ public void testBulkTerminatedByNewline() throws Exception { String bulkAction = copyToStringFromClasspath("/org/opensearch/action/bulk/simple-bulk11.json"); IllegalArgumentException expectThrows = expectThrows( IllegalArgumentException.class, - () -> new BulkRequest().add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON) + () -> new BulkRequest().add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON) ); assertEquals("The bulk request must be terminated by a newline [\\n]", expectThrows.getMessage()); @@ -419,7 +419,7 @@ public void testBulkTerminatedByNewline() throws Exception { 0, bulkActionWithNewLine.length(), null, - XContentType.JSON + MediaTypeRegistry.JSON ); assertEquals(3, bulkRequestWithNewLine.numberOfActions()); } diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java index 7c758fb25ce85..0627518bb60ce 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkResponseTests.java @@ -39,11 +39,11 @@ import org.opensearch.action.delete.DeleteResponseTests; import org.opensearch.action.index.IndexResponseTests; import org.opensearch.action.update.UpdateResponseTests; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -51,7 +51,7 @@ import static org.opensearch.OpenSearchExceptionTests.randomExceptions; import static org.opensearch.action.bulk.BulkItemResponseTests.assertBulkItemResponse; import static org.opensearch.action.bulk.BulkResponse.NO_INGEST_TOOK; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public class BulkResponseTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/action/bulk/RetryTests.java b/server/src/test/java/org/opensearch/action/bulk/RetryTests.java index ea02b964b1c58..aa33372239fed 100644 --- a/server/src/test/java/org/opensearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/RetryTests.java @@ -32,13 +32,13 @@ package org.opensearch.action.bulk; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest.OpType; import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 0846a5f8dec5c..cf7080ab2fc06 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.bulk; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; @@ -49,11 +48,13 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -154,7 +155,9 @@ private void indicesThatCannotBeCreatedTestCase( Settings.EMPTY, new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), - new SystemIndices(emptyMap()) + null, + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) { @Override void executeBulk( diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 6a514b47e55a4..141c630b94020 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.bulk; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.index.IndexAction; @@ -47,10 +46,10 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.IndexTemplateMetadata; -import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Template; import org.opensearch.cluster.node.DiscoveryNode; @@ -64,12 +63,14 @@ import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -77,7 +78,6 @@ import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.util.Arrays; import java.util.Collections; @@ -88,6 +88,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import org.mockito.ArgumentCaptor; + import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.sameInstance; @@ -95,8 +97,8 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; @@ -170,7 +172,9 @@ class TestTransportBulkAction extends TransportBulkAction { SETTINGS, new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), - new SystemIndices(emptyMap()) + null, + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index d53b860e6524a..6bbd740df7f9c 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -34,7 +34,6 @@ import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.bulk.TransportBulkActionTookTests.Resolver; @@ -56,12 +55,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -87,6 +89,7 @@ import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class TransportBulkActionTests extends OpenSearchTestCase { @@ -114,7 +117,9 @@ class TestTransportBulkAction extends TransportBulkAction { new Resolver(), new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + mock(IndicesService.class), + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } @@ -153,7 +158,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index 2361b69e9b82c..9d5b4430ea395 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -33,11 +33,9 @@ package org.opensearch.action.bulk; import org.apache.lucene.util.Constants; -import org.opensearch.action.ActionType; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; import org.opensearch.action.support.ActionFilters; @@ -51,12 +49,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -125,7 +126,8 @@ private TransportBulkAction createAction(boolean controlled, AtomicLong expected TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -217,7 +219,7 @@ private void runTestTook(boolean controlled) throws Exception { bulkAction = Strings.replace(bulkAction, "\r\n", "\n"); } BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); + bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, MediaTypeRegistry.JSON); AtomicLong expected = new AtomicLong(); TransportBulkAction action = createAction(controlled, expected); action.doExecute(null, bulkRequest, new ActionListener<BulkResponse>() { @@ -278,8 +280,10 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, autoCreateIndex, new IndexingPressureService(Settings.EMPTY, clusterService), + null, new SystemIndices(emptyMap()), - relativeTimeProvider + relativeTimeProvider, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index c0c35e8c22f4d..65b555649b2d0 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.LatchedActionListener; @@ -64,8 +63,12 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; @@ -77,30 +80,32 @@ import org.opensearch.index.mapper.Mapping; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.RootObjectMapper; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStorePressureService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TestTransportChannel; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongSupplier; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -946,6 +951,82 @@ public void testRetries() throws Exception { latch.await(); } + public void testUpdateWithRetryOnConflict() throws IOException, InterruptedException { + IndexSettings indexSettings = new IndexSettings(indexMetadata(), Settings.EMPTY); + + int nItems = randomIntBetween(2, 5); + List<BulkItemRequest> items = new ArrayList<>(nItems); + for (int i = 0; i < nItems; i++) { + int retryOnConflictCount = randomIntBetween(0, 3); + logger.debug("Setting retryCount for item {}: {}", i, retryOnConflictCount); + UpdateRequest updateRequest = new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value") + .retryOnConflict(retryOnConflictCount); + items.add(new BulkItemRequest(i, updateRequest)); + } + + IndexRequest updateResponse = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); + Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0); + + IndexShard shard = mock(IndexShard.class); + when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenAnswer( + ir -> conflictedResult + ); + when(shard.indexSettings()).thenReturn(indexSettings); + when(shard.shardId()).thenReturn(shardId); + when(shard.mapperService()).thenReturn(mock(MapperService.class)); + + UpdateHelper updateHelper = mock(UpdateHelper.class); + when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( + new UpdateHelper.Result( + updateResponse, + randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, + Collections.singletonMap("field", "value"), + Requests.INDEX_CONTENT_TYPE + ) + ); + + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items.toArray(BulkItemRequest[]::new)); + + final CountDownLatch latch = new CountDownLatch(1); + Runnable runnable = () -> TransportShardBulkAction.performOnPrimary( + bulkShardRequest, + shard, + updateHelper, + threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer(), + listener -> listener.onResponse(null), + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { + assertEquals(nItems, result.replicaRequest().items().length); + for (BulkItemRequest item : result.replicaRequest().items()) { + assertEquals(VersionConflictEngineException.class, item.getPrimaryResponse().getFailure().getCause().getClass()); + } + }), latch), + threadPool, + Names.WRITE + ); + + // execute the runnable on a separate thread so that the infinite loop can be detected + new Thread(runnable).start(); + + // timeout the request in 10 seconds if there is an infinite loop + assertTrue(latch.await(10, TimeUnit.SECONDS)); + + items.forEach(item -> { + assertEquals(item.getPrimaryResponse().getFailure().getCause().getClass(), VersionConflictEngineException.class); + + // this assertion is based on the assumption that all bulk item requests are updates and are hence calling + // UpdateRequest::prepareRequest + UpdateRequest updateRequest = (UpdateRequest) item.request(); + verify(updateHelper, times(updateRequest.retryOnConflict() + 1)).prepare( + eq(updateRequest), + any(IndexShard.class), + any(LongSupplier.class) + ); + }); + } + public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { TestThreadPool rejectingThreadPool = new TestThreadPool( "TransportShardBulkActionTests#testForceExecutionOnRejectionAfterMappingUpdate", @@ -1073,8 +1154,9 @@ public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { mock(ActionFilters.class), mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), - mock(RemoteRefreshSegmentPressureService.class), - mock(SystemIndices.class) + mock(RemoteStorePressureService.class), + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId + "-1", 1, shardId), @@ -1104,8 +1186,9 @@ public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { mock(ActionFilters.class), mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), - mock(RemoteRefreshSegmentPressureService.class), - mock(SystemIndices.class) + mock(RemoteStorePressureService.class), + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1135,8 +1218,9 @@ public void testHandlePrimaryTermValidationRequestSuccess() { mock(ActionFilters.class), mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), - mock(RemoteRefreshSegmentPressureService.class), - mock(SystemIndices.class) + mock(RemoteStorePressureService.class), + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1177,8 +1261,9 @@ private TransportShardBulkAction createAction() { mock(ActionFilters.class), mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), - mock(RemoteRefreshSegmentPressureService.class), - mock(SystemIndices.class) + mock(RemoteStorePressureService.class), + mock(SystemIndices.class), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java b/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java index b5af0ad4c47c9..a13fbf01e270e 100644 --- a/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/delete/DeleteResponseTests.java @@ -33,14 +33,15 @@ package org.opensearch.action.delete; import org.opensearch.action.support.replication.ReplicationResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -56,7 +57,7 @@ public class DeleteResponseTests extends OpenSearchTestCase { public void testToXContent() { { DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "id", 3, 17, 5, true); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":5,\"result\":\"deleted\"," + "\"_shards\":null,\"_seq_no\":3,\"_primary_term\":17}", @@ -67,7 +68,7 @@ public void testToXContent() { DeleteResponse response = new DeleteResponse(new ShardId("index", "index_uuid", 0), "id", -1, 0, 7, true); response.setForcedRefresh(true); response.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":7,\"result\":\"deleted\"," + "\"forced_refresh\":true,\"_shards\":{\"total\":10,\"successful\":5,\"failed\":0}}", diff --git a/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java b/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java index 3d5f779aada0e..226ef7e39ed04 100644 --- a/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java +++ b/server/src/test/java/org/opensearch/action/explain/ExplainRequestTests.java @@ -33,10 +33,10 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesModule; import org.opensearch.search.SearchModule; diff --git a/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java b/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java index 866e13c7faf0e..f8d10128a82ad 100644 --- a/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java +++ b/server/src/test/java/org/opensearch/action/explain/ExplainResponseTests.java @@ -33,15 +33,14 @@ package org.opensearch.action.explain; import org.apache.lucene.search.Explanation; +import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.get.GetResult; import org.opensearch.test.AbstractSerializingTestCase; import org.opensearch.test.RandomObjects; @@ -114,7 +113,7 @@ public void testToXContent() throws IOException { ); ExplainResponse response = new ExplainResponse(index, id, exist, explanation, getResult); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); response.toXContent(builder, ToXContent.EMPTY_PARAMS); String generatedResponse = BytesReference.bytes(builder).utf8ToString().replaceAll("\\s+", ""); diff --git a/server/src/test/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 576e5a68dfca6..b1602393c590c 100644 --- a/server/src/test/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.util.ArrayUtils; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java b/server/src/test/java/org/opensearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java index 382425ce4e57c..f196ba16a2584 100644 --- a/server/src/test/java/org/opensearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/opensearch/action/fieldcaps/MergedFieldCapabilitiesResponseTests.java @@ -34,11 +34,10 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; @@ -125,7 +124,7 @@ protected Predicate<String> getRandomFieldsExcludeFilter() { public void testToXContent() throws IOException { FieldCapabilitiesResponse response = createSimpleResponse(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); response.toXContent(builder, ToXContent.EMPTY_PARAMS); String generatedResponse = BytesReference.bytes(builder).utf8ToString(); diff --git a/server/src/test/java/org/opensearch/action/get/GetResponseTests.java b/server/src/test/java/org/opensearch/action/get/GetResponseTests.java index 00ae69c93f5d1..72190bcbe97b6 100644 --- a/server/src/test/java/org/opensearch/action/get/GetResponseTests.java +++ b/server/src/test/java/org/opensearch/action/get/GetResponseTests.java @@ -32,15 +32,16 @@ package org.opensearch.action.get; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.document.DocumentField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.get.GetResult; import org.opensearch.test.OpenSearchTestCase; @@ -48,7 +49,7 @@ import java.util.Collections; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.index.get.GetResultTests.copyGetResult; import static org.opensearch.index.get.GetResultTests.mutateGetResult; import static org.opensearch.index.get.GetResultTests.randomGetResult; @@ -118,7 +119,7 @@ public void testToXContent() { null ) ); - String output = Strings.toString(XContentType.JSON, getResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, getResponse); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", @@ -127,7 +128,7 @@ public void testToXContent() { } { GetResponse getResponse = new GetResponse(new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null)); - String output = Strings.toString(XContentType.JSON, getResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, getResponse); assertEquals("{\"_index\":\"index\",\"_id\":\"id\",\"found\":false}", output); } } @@ -155,7 +156,7 @@ public void testToString() { public void testEqualsAndHashcode() { checkEqualsAndHashCode( - new GetResponse(randomGetResult(XContentType.JSON).v1()), + new GetResponse(randomGetResult(MediaTypeRegistry.JSON).v1()), GetResponseTests::copyGetResponse, GetResponseTests::mutateGetResponse ); diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java index 5498d685dbb6c..c6e880fbd137e 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetRequestTests.java @@ -32,13 +32,14 @@ package org.opensearch.action.get; +import org.opensearch.action.get.MultiGetRequest.Item; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.test.OpenSearchTestCase; @@ -126,7 +127,7 @@ public void testXContentSerialization() throws IOException { MultiGetRequest expected = createTestInstance(); XContentType xContentType = randomFrom(XContentType.values()); BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); - try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { + try (XContentParser parser = createParser(xContentType.xContent(), shuffled)) { MultiGetRequest actual = new MultiGetRequest(); actual.add(null, null, null, null, parser, true); assertThat(parser.nextToken(), nullValue()); @@ -141,6 +142,13 @@ public void testXContentSerialization() throws IOException { } } + public void testToString() { + MultiGetRequest req = createTestInstance(); + for (Item items : req.getItems()) { + assertThat(req.toString(), containsString(items.toString())); + } + } + private MultiGetRequest createTestInstance() { int numItems = randomIntBetween(0, 128); MultiGetRequest request = new MultiGetRequest(); diff --git a/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java index 71ada43bef116..1a626f5646c26 100644 --- a/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java +++ b/server/src/test/java/org/opensearch/action/get/MultiGetResponseTests.java @@ -31,11 +31,10 @@ package org.opensearch.action.get; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.get.GetResult; import org.opensearch.test.OpenSearchTestCase; @@ -53,7 +52,7 @@ public void testFromXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); MultiGetResponse parsed; - try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { + try (XContentParser parser = createParser(xContentType.xContent(), shuffled)) { parsed = MultiGetResponse.fromXContent(parser); assertNull(parser.nextToken()); } diff --git a/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java new file mode 100644 index 0000000000000..9565e219d1a78 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.get; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.Preference; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.common.UUIDs.randomBase64UUID; + +public class TransportGetActionTests extends OpenSearchTestCase { + + private static ClusterState clusterState(ReplicationType replicationType) { + final Index index1 = new Index("index1", randomBase64UUID()); + return ClusterState.builder(new ClusterName(TransportGetActionTests.class.getSimpleName())) + .metadata( + new Metadata.Builder().put( + new IndexMetadata.Builder(index1.getName()).settings( + Settings.builder() + .put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) + ) + ) + ) + .build(); + } + + public void testShouldForcePrimaryRouting() { + + Metadata metadata = clusterState(ReplicationType.SEGMENT).getMetadata(); + + // should return false since preference is set for request + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, Preference.REPLICA.type(), "index1")); + + // should return false since request is not realtime + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, false, null, "index1")); + + // should return true since segment replication is enabled + assertTrue(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + // should return false since index doesn't exist + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index3")); + + metadata = clusterState(ReplicationType.DOCUMENT).getMetadata(); + + // should fail since document replication enabled + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + } + +} diff --git a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java index 9e467aff710df..52443e695e014 100644 --- a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.get; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.IndicesRequest; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.support.ActionFilters; @@ -45,21 +44,25 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -68,6 +71,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; +import java.io.IOException; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -91,32 +95,8 @@ public class TransportMultiGetActionTests extends OpenSearchTestCase { private static TransportMultiGetAction transportAction; private static TransportShardMultiGetAction shardAction; - @BeforeClass - public static void beforeClass() throws Exception { - threadPool = new TestThreadPool(TransportMultiGetActionTests.class.getSimpleName()); - - transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal( - Settings.builder().put("node.name", "node1").build(), - boundAddress.publishAddress(), - randomBase64UUID() - ), - null, - emptySet() - ) { - @Override - public TaskManager getTaskManager() { - return taskManager; - } - }; - - final Index index1 = new Index("index1", randomBase64UUID()); - final Index index2 = new Index("index2", randomBase64UUID()); - final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) + private static ClusterState clusterState(ReplicationType replicationType, Index index1, Index index2) throws IOException { + return ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) .metadata( new Metadata.Builder().put( new IndexMetadata.Builder(index1.getName()).settings( @@ -124,6 +104,7 @@ public TaskManager getTaskManager() { .put("index.version.created", Version.CURRENT) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) .put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID()) ) .putMapping( @@ -139,7 +120,7 @@ public TaskManager getTaskManager() { .endObject() ), true, - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) @@ -149,6 +130,7 @@ public TaskManager getTaskManager() { .put("index.version.created", Version.CURRENT) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) .put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID()) ) .putMapping( @@ -164,12 +146,41 @@ public TaskManager getTaskManager() { .endObject() ), true, - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) ) .build(); + } + + @BeforeClass + public static void beforeClass() throws Exception { + threadPool = new TestThreadPool(TransportMultiGetActionTests.class.getSimpleName()); + + transportService = new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal( + Settings.builder().put("node.name", "node1").build(), + boundAddress.publishAddress(), + randomBase64UUID() + ), + null, + emptySet(), + NoopTracer.INSTANCE + ) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; + + final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); + ClusterState clusterState = clusterState(randomBoolean() ? ReplicationType.SEGMENT : ReplicationType.DOCUMENT, index1, index2); final ShardIterator index1ShardIterator = mock(ShardIterator.class); when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); @@ -285,6 +296,30 @@ protected void executeShardAction( } + public void testShouldForcePrimaryRouting() throws IOException { + final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); + Metadata metadata = clusterState(ReplicationType.SEGMENT, index1, index2).getMetadata(); + + // should return false since preference is set for request + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, Preference.REPLICA.type(), "index1")); + + // should return false since request is not realtime + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, false, null, "index2")); + + // should return true since segment replication is enabled + assertTrue(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + // should return false since index doesn't exist + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, null, "index3")); + + metadata = clusterState(ReplicationType.DOCUMENT, index1, index2).getMetadata(); + + // should fail since document replication enabled + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + } + private static Task createTask() { return new Task( randomLong(), diff --git a/server/src/test/java/org/opensearch/action/index/IndexRequestBuilderTests.java b/server/src/test/java/org/opensearch/action/index/IndexRequestBuilderTests.java index 5c3fba48d6215..456aab4b92e4e 100644 --- a/server/src/test/java/org/opensearch/action/index/IndexRequestBuilderTests.java +++ b/server/src/test/java/org/opensearch/action/index/IndexRequestBuilderTests.java @@ -32,10 +32,10 @@ package org.opensearch.action.index; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; import org.junit.After; @@ -74,7 +74,7 @@ public void testSetSource() throws Exception { indexRequestBuilder.setSource(source); assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true)); - indexRequestBuilder.setSource(source, XContentType.JSON); + indexRequestBuilder.setSource(source, MediaTypeRegistry.JSON); assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true)); indexRequestBuilder.setSource("SomeKey", "SomeValue"); @@ -87,7 +87,7 @@ public void testSetSource() throws Exception { ByteArrayOutputStream docOut = new ByteArrayOutputStream(); XContentBuilder doc = XContentFactory.jsonBuilder(docOut).startObject().field("SomeKey", "SomeValue").endObject(); doc.close(); - indexRequestBuilder.setSource(docOut.toByteArray(), XContentType.JSON); + indexRequestBuilder.setSource(docOut.toByteArray(), MediaTypeRegistry.JSON); assertEquals( EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true, indexRequestBuilder.request().getContentType()) diff --git a/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java b/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java index bda9a49bf797b..8a66c859bc99b 100644 --- a/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/index/IndexRequestTests.java @@ -36,15 +36,15 @@ import org.opensearch.action.DocWriteRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.replication.ReplicationResponse; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.VersionType; -import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.VersionType; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -99,19 +99,19 @@ public void testCreateOperationRejectsVersions() { public void testIndexingRejectsLongIds() { String id = randomAlphaOfLength(511); IndexRequest request = new IndexRequest("index").id(id); - request.source("{}", XContentType.JSON); + request.source("{}", MediaTypeRegistry.JSON); ActionRequestValidationException validate = request.validate(); assertNull(validate); id = randomAlphaOfLength(512); request = new IndexRequest("index").id(id); - request.source("{}", XContentType.JSON); + request.source("{}", MediaTypeRegistry.JSON); validate = request.validate(); assertNull(validate); id = randomAlphaOfLength(513); request = new IndexRequest("index").id(id); - request.source("{}", XContentType.JSON); + request.source("{}", MediaTypeRegistry.JSON); validate = request.validate(); assertThat(validate, notNullValue()); assertThat(validate.getMessage(), containsString("id [" + id + "] is too long, must be no longer than 512 bytes but was: 513")); @@ -182,15 +182,15 @@ public void testIndexResponse() { public void testIndexRequestXContentSerialization() throws IOException { IndexRequest indexRequest = new IndexRequest("foo").id("1"); boolean isRequireAlias = randomBoolean(); - indexRequest.source("{}", XContentType.JSON); + indexRequest.source("{}", MediaTypeRegistry.JSON); indexRequest.setRequireAlias(isRequireAlias); - assertEquals(XContentType.JSON, indexRequest.getContentType()); + assertEquals(MediaTypeRegistry.JSON, indexRequest.getContentType()); BytesStreamOutput out = new BytesStreamOutput(); indexRequest.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); IndexRequest serialized = new IndexRequest(in); - assertEquals(XContentType.JSON, serialized.getContentType()); + assertEquals(MediaTypeRegistry.JSON, serialized.getContentType()); assertEquals(new BytesArray("{}"), serialized.source()); assertEquals(isRequireAlias, serialized.isRequireAlias()); } @@ -215,11 +215,11 @@ public void testToStringSizeLimit() throws UnsupportedEncodingException { IndexRequest request = new IndexRequest("index"); String source = "{\"name\":\"value\"}"; - request.source(source, XContentType.JSON); + request.source(source, MediaTypeRegistry.JSON); assertEquals("index {[index][null], source[" + source + "]}", request.toString()); source = "{\"name\":\"" + randomUnicodeOfLength(IndexRequest.MAX_SOURCE_LENGTH_IN_TOSTRING) + "\"}"; - request.source(source, XContentType.JSON); + request.source(source, MediaTypeRegistry.JSON); int actualBytes = source.getBytes("UTF-8").length; assertEquals( "index {[index][null], source[n/a, actual length: [" @@ -233,7 +233,7 @@ public void testToStringSizeLimit() throws UnsupportedEncodingException { public void testRejectsEmptyStringPipeline() { IndexRequest request = new IndexRequest("index"); - request.source("{}", XContentType.JSON); + request.source("{}", MediaTypeRegistry.JSON); request.setPipeline(""); ActionRequestValidationException validate = request.validate(); assertThat(validate, notNullValue()); diff --git a/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java b/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java index e52d6d600395e..8aeca8af8ce07 100644 --- a/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/index/IndexResponseTests.java @@ -34,14 +34,15 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.support.replication.ReplicationResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -57,7 +58,7 @@ public class IndexResponseTests extends OpenSearchTestCase { public void testToXContent() { { IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "id", 3, 17, 5, true); - String output = Strings.toString(XContentType.JSON, indexResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, indexResponse); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":5,\"result\":\"created\",\"_shards\":null," + "\"_seq_no\":3,\"_primary_term\":17}", @@ -68,7 +69,7 @@ public void testToXContent() { IndexResponse indexResponse = new IndexResponse(new ShardId("index", "index_uuid", 0), "id", -1, 17, 7, true); indexResponse.setForcedRefresh(true); indexResponse.setShardInfo(new ReplicationResponse.ShardInfo(10, 5)); - String output = Strings.toString(XContentType.JSON, indexResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, indexResponse); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":7,\"result\":\"created\"," + "\"forced_refresh\":true,\"_shards\":{\"total\":10,\"successful\":5,\"failed\":0}}", diff --git a/server/src/test/java/org/opensearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/opensearch/action/ingest/GetPipelineResponseTests.java index 4abd59def0f33..83ef9eb13d9bb 100644 --- a/server/src/test/java/org/opensearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/GetPipelineResponseTests.java @@ -32,13 +32,13 @@ package org.opensearch.action.ingest; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.ingest.PipelineConfiguration; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java b/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java index 336ec67546dc5..5f75e9e0551dc 100644 --- a/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java @@ -32,13 +32,14 @@ package org.opensearch.action.ingest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.ingest.Pipeline; import org.opensearch.test.OpenSearchTestCase; @@ -48,15 +49,19 @@ public class PutPipelineRequestTests extends OpenSearchTestCase { public void testSerializationWithXContent() throws IOException { - PutPipelineRequest request = new PutPipelineRequest("1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), XContentType.JSON); - assertEquals(XContentType.JSON, request.getMediaType()); + PutPipelineRequest request = new PutPipelineRequest( + "1", + new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), + MediaTypeRegistry.JSON + ); + assertEquals(MediaTypeRegistry.JSON, request.getMediaType()); BytesStreamOutput output = new BytesStreamOutput(); request.writeTo(output); StreamInput in = StreamInput.wrap(output.bytes().toBytesRef().bytes); PutPipelineRequest serialized = new PutPipelineRequest(in); - assertEquals(XContentType.JSON, serialized.getMediaType()); + assertEquals(MediaTypeRegistry.JSON, serialized.getMediaType()); assertEquals("{}", serialized.getSource().utf8ToString()); } diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulateDocumentBaseResultTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulateDocumentBaseResultTests.java index 15fbbf6e833bd..abb8cd59821c1 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulateDocumentBaseResultTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulateDocumentBaseResultTests.java @@ -43,11 +43,11 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.opensearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc; import static org.opensearch.ingest.IngestDocumentMatcher.assertIngestDocument; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.opensearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc; public class SimulateDocumentBaseResultTests extends AbstractXContentTestCase<SimulateDocumentBaseResult> { diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java index ff7b0dddb33a3..a5a082286f123 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulateExecutionServiceTests.java @@ -32,7 +32,7 @@ package org.opensearch.action.ingest; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.VersionType; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.CompoundProcessor; diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java index 36b1f8089fdea..41f782e308785 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -55,12 +55,12 @@ import static org.opensearch.action.ingest.SimulatePipelineRequest.Fields; import static org.opensearch.action.ingest.SimulatePipelineRequest.SIMULATED_PIPELINE_ID; import static org.opensearch.ingest.IngestDocument.Metadata.ID; +import static org.opensearch.ingest.IngestDocument.Metadata.IF_PRIMARY_TERM; +import static org.opensearch.ingest.IngestDocument.Metadata.IF_SEQ_NO; import static org.opensearch.ingest.IngestDocument.Metadata.INDEX; import static org.opensearch.ingest.IngestDocument.Metadata.ROUTING; import static org.opensearch.ingest.IngestDocument.Metadata.VERSION; import static org.opensearch.ingest.IngestDocument.Metadata.VERSION_TYPE; -import static org.opensearch.ingest.IngestDocument.Metadata.IF_SEQ_NO; -import static org.opensearch.ingest.IngestDocument.Metadata.IF_PRIMARY_TERM; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -85,7 +85,7 @@ public void init() throws IOException { when(ingestService.getProcessorFactories()).thenReturn(registry); } - public void testParseUsingPipelineStore(boolean useExplicitType) throws Exception { + public void testParseUsingPipelineStore() throws Exception { int numDocs = randomIntBetween(1, 10); Map<String, Object> requestContent = new HashMap<>(); @@ -131,7 +131,7 @@ public void testParseUsingPipelineStore(boolean useExplicitType) throws Exceptio assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); } - public void innerTestParseWithProvidedPipeline() throws Exception { + public void testParseWithProvidedPipeline() throws Exception { int numDocs = randomIntBetween(1, 10); Map<String, Object> requestContent = new HashMap<>(); @@ -144,17 +144,29 @@ public void innerTestParseWithProvidedPipeline() throws Exception { List<IngestDocument.Metadata> fields = Arrays.asList(INDEX, ID, ROUTING, VERSION, VERSION_TYPE, IF_SEQ_NO, IF_PRIMARY_TERM); for (IngestDocument.Metadata field : fields) { if (field == VERSION) { - Long value = randomLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else if (field == VERSION_TYPE) { String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)); doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); } else if (field == IF_SEQ_NO || field == IF_PRIMARY_TERM) { - Long value = randomNonNegativeLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomNonNegativeLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else { if (randomBoolean()) { String value = randomAlphaOfLengthBetween(1, 10); @@ -282,4 +294,40 @@ public void testNotValidDocs() { ); assertThat(e3.getMessage(), containsString("required property is missing")); } + + public void testNotValidMetadataFields() { + List<IngestDocument.Metadata> fields = Arrays.asList(VERSION, IF_SEQ_NO, IF_PRIMARY_TERM); + for (IngestDocument.Metadata field : fields) { + String metadataFieldName = field.getFieldName(); + Map<String, Object> requestContent = new HashMap<>(); + List<Map<String, Object>> docs = new ArrayList<>(); + requestContent.put(Fields.DOCS, docs); + Map<String, Object> doc = new HashMap<>(); + doc.put(metadataFieldName, randomAlphaOfLengthBetween(1, 10)); + doc.put(Fields.SOURCE, Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); + docs.add(doc); + + Map<String, Object> pipelineConfig = new HashMap<>(); + List<Map<String, Object>> processors = new ArrayList<>(); + Map<String, Object> processorConfig = new HashMap<>(); + List<Map<String, Object>> onFailureProcessors = new ArrayList<>(); + int numOnFailureProcessors = randomIntBetween(0, 1); + for (int j = 0; j < numOnFailureProcessors; j++) { + onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap())); + } + if (numOnFailureProcessors > 0) { + processorConfig.put("on_failure", onFailureProcessors); + } + processors.add(Collections.singletonMap("mock_processor", processorConfig)); + pipelineConfig.put("processors", processors); + + requestContent.put(Fields.PIPELINE, pipelineConfig); + + assertThrows( + "Failed to parse parameter [" + metadataFieldName + "], only int or long is accepted", + IllegalArgumentException.class, + () -> SimulatePipelineRequest.parse(requestContent, false, ingestService) + ); + } + } } diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestTests.java index 35cbc83661c8e..90832bc3570d5 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestTests.java @@ -32,10 +32,10 @@ package org.opensearch.action.ingest; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -46,7 +46,7 @@ public class SimulatePipelineRequestTests extends OpenSearchTestCase { public void testSerialization() throws IOException { - SimulatePipelineRequest request = new SimulatePipelineRequest(new BytesArray(""), XContentType.JSON); + SimulatePipelineRequest request = new SimulatePipelineRequest(new BytesArray(""), MediaTypeRegistry.JSON); // Sometimes we set an id if (randomBoolean()) { request.setId(randomAlphaOfLengthBetween(1, 10)); @@ -69,16 +69,16 @@ public void testSerialization() throws IOException { public void testSerializationWithXContent() throws IOException { SimulatePipelineRequest request = new SimulatePipelineRequest( new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); - assertEquals(XContentType.JSON, request.getXContentType()); + assertEquals(MediaTypeRegistry.JSON, request.getXContentType()); BytesStreamOutput output = new BytesStreamOutput(); request.writeTo(output); StreamInput in = StreamInput.wrap(output.bytes().toBytesRef().bytes); SimulatePipelineRequest serialized = new SimulatePipelineRequest(in); - assertEquals(XContentType.JSON, serialized.getXContentType()); + assertEquals(MediaTypeRegistry.JSON, serialized.getXContentType()); assertEquals("{}", serialized.getSource().utf8ToString()); } } diff --git a/server/src/test/java/org/opensearch/action/ingest/WriteableIngestDocumentTests.java b/server/src/test/java/org/opensearch/action/ingest/WriteableIngestDocumentTests.java index 84f71b48eaf21..76aa82735ab3d 100644 --- a/server/src/test/java/org/opensearch/action/ingest/WriteableIngestDocumentTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/WriteableIngestDocumentTests.java @@ -32,16 +32,16 @@ package org.opensearch.action.ingest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.test.AbstractXContentTestCase; import org.opensearch.test.RandomObjects; diff --git a/server/src/test/java/org/opensearch/action/main/MainActionTests.java b/server/src/test/java/org/opensearch/action/main/MainActionTests.java index b219e83524e7f..b43dc2a80cd37 100644 --- a/server/src/test/java/org/opensearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.main; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -42,8 +41,10 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -109,7 +110,8 @@ public void testMainActionClusterAvailable() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService); AtomicReference<MainResponse> responseRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java index 83b52bb74a56b..e3a0f772eaf68 100644 --- a/server/src/test/java/org/opensearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainResponseTests.java @@ -35,11 +35,10 @@ import org.opensearch.Build; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; import org.opensearch.test.VersionUtils; @@ -133,7 +132,7 @@ public void testToXContent() throws IOException { + TAGLINE + "\"" + "}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index 61dd2ac8c14ae..da87a0a967f53 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -31,10 +31,7 @@ package org.opensearch.action.resync; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.PendingReplicationActions; @@ -48,30 +45,34 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.nio.charset.Charset; import java.util.Collections; @@ -82,6 +83,10 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -91,10 +96,6 @@ import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; -import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; public class TransportResyncReplicationActionTests extends OpenSearchTestCase { @@ -134,7 +135,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { new NetworkService(emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) ) { @@ -145,7 +147,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -200,7 +203,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { shardStateAction, new ActionFilters(new HashSet<>()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertThat(action.globalBlockLevel(), nullValue()); @@ -253,7 +257,8 @@ private TransportResyncReplicationAction createAction() { mock(ShardStateAction.class), new ActionFilters(new HashSet<>()), mock(IndexingPressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index d906c7be15a15..420289d3ff2e5 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -32,19 +32,24 @@ package org.opensearch.action.search; -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.ActionListener; +import org.apache.logging.log4j.LogManager; import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.Index; -import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; @@ -53,8 +58,14 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -65,6 +76,7 @@ import java.util.UUID; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -83,12 +95,16 @@ public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List<Tuple<String, String>> resolvedNodes = new ArrayList<>(); private final Set<ShardSearchContextId> releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + private SearchRequestOperationsListenerAssertingListener assertingListener; + ThreadPool threadPool; @Before @Override public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); + threadPool = new TestThreadPool(getClass().getName()); + assertingListener = new SearchRequestOperationsListenerAssertingListener(); } @After @@ -97,6 +113,8 @@ public void tearDown() throws Exception { super.tearDown(); executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertingListener.assertFinished(); } private AbstractSearchAsyncAction<SearchPhaseResult> createAction( @@ -112,6 +130,7 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( listener, controlled, false, + false, expected, new SearchShardIterator(null, null, Collections.emptyList(), null) ); @@ -123,9 +142,11 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( ActionListener<SearchResponse> listener, final boolean controlled, final boolean failExecutePhaseOnShard, + final boolean catchExceptionWhenExecutePhaseOnShard, final AtomicLong expected, final SearchShardIterator... shards ) { + final Runnable runnable; final TransportSearchAction.SearchTimeProvider timeProvider; if (controlled) { @@ -161,7 +182,12 @@ private AbstractSearchAsyncAction<SearchPhaseResult> createAction( null, results, request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { @@ -177,7 +203,15 @@ protected void executePhaseOnShard( if (failExecutePhaseOnShard) { listener.onFailure(new ShardNotFoundException(shardIt.shardId())); } else { - listener.onResponse(new QuerySearchResult()); + if (catchExceptionWhenExecutePhaseOnShard) { + try { + listener.onResponse(new QuerySearchResult()); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + listener.onResponse(new QuerySearchResult()); + } } } @@ -313,6 +347,55 @@ public void testSendSearchResponseDisallowPartialFailures() { assertEquals(requestIds, releasedContexts); } + public void testOnPhaseFailureAndVerifyListeners() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); + + final List<SearchRequestOperationsListener> requestOperationListeners = List.of(testListener, assertingListener); + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.start(); + action.executeNextPhase(action, fetchPhase); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + } + public void testOnPhaseFailure() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference<Exception> exception = new AtomicReference<>(); @@ -321,6 +404,7 @@ public void testOnPhaseFailure() { List<Tuple<String, String>> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults<SearchPhaseResult> phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction<SearchPhaseResult> action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { @@ -416,6 +500,7 @@ public void onFailure(Exception e) { }, false, true, + false, new AtomicLong(), shards ); @@ -462,6 +547,7 @@ public void onFailure(Exception e) { }, false, false, + false, new AtomicLong(), shards ); @@ -477,7 +563,7 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } - public void testExecutePhaseOnShardFailure() throws InterruptedException { + private void innerTestExecutePhaseOnShardFailure(boolean catchExceptionWhenExecutePhaseOnShard) throws InterruptedException { final Index index = new Index("test", UUID.randomUUID().toString()); final SearchShardIterator[] shards = IntStream.range(0, 2 + randomInt(3)) @@ -513,6 +599,7 @@ public void onFailure(Exception e) { }, false, false, + catchExceptionWhenExecutePhaseOnShard, new AtomicLong(), shards ); @@ -528,6 +615,247 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testExecutePhaseOnShardFailure() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(false); + } + + public void testExecutePhaseOnShardFailureAndThrowException() throws InterruptedException { + innerTestExecutePhaseOnShardFailure(true); + } + + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); + final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); + + long delay = (randomIntBetween(1, 5)); + delay = delay * 10; + + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + + // Verify queryPhase current metric + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + + // Verify queryPhase total, current and latency metrics + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(action.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseName())); + + // Verify fetchPhase current metric + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + ExpandSearchPhase expandPhase = createExpandSearchPhase(); + action.executeNextPhase(fetchPhase, expandPhase); + TimeUnit.MILLISECONDS.sleep(delay); + + // Verify fetchPhase total, current and latency metrics + assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + + assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + + action.executeNextPhase(expandPhase, fetchPhase); + action.onPhaseDone(); /* finish phase since we don't have reponse being sent */ + + assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + } + + public void testOnPhaseListenersWithDfsType() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); + final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(List.of(testListener, assertingListener)); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + long delay = (randomIntBetween(1, 5)); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + + searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); + searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure( + fetchPhase, + "Something went wrong", + null + ); /* finalizing the fetch phase since we do adhoc phase lifecycle calls */ + + assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + } + + private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAction( + List<SearchRequestOperationsListener> searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator<SearchShardIterator> shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference<Exception> exception = new AtomicReference<>(); + ActionListener<SearchResponse> listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + return new SearchDfsQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + controller, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + timeProvider, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), + searchRequest + ), + NoopTracer.INSTANCE + ); + } + + private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( + List<SearchRequestOperationsListener> searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator<SearchShardIterator> shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference<Exception> exception = new AtomicReference<>(); + ActionListener<SearchResponse> listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + return new SearchQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + timeProvider, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), + searchRequest + ), + NoopTracer.INSTANCE + ) { + @Override + ShardSearchFailure[] buildShardFailures() { + return ShardSearchFailure.EMPTY_ARRAY; + } + + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray<SearchPhaseResult> queryResults) { + start(); + } + }; + } + + private FetchSearchPhase createFetchSearchPhase() { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + QueryPhaseResultConsumer results = controller.newSearchPhaseResults( + OpenSearchExecutors.newDirectExecutorService(), + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ); + return new FetchSearchPhase( + results, + controller, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + } + + private ExpandSearchPhase createExpandSearchPhase() { + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(null, null, null, null, false, null, 1); + return new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, null); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; diff --git a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java index 67a151646c635..4f929a71429a6 100644 --- a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java +++ b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; @@ -46,10 +47,9 @@ import java.time.ZoneId; import java.util.Arrays; -import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; -import static org.opensearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; public class BottomSortValuesCollectorTests extends OpenSearchTestCase { public void testWithStrings() { @@ -136,7 +136,11 @@ public void testWithDates() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.MILLISECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.MILLISECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -170,7 +174,11 @@ public void testWithDateNanos() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.NANOSECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.NANOSECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateNanoArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -242,7 +250,7 @@ private Object[] newBytesArray(String... values) { private Object[] newDateArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i]); + longs[i] = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i]); } return longs; } @@ -250,14 +258,14 @@ private Object[] newDateArray(String... values) { private Object[] newDateNanoArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DateUtils.toNanoSeconds(DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i])); + longs[i] = DateUtils.toNanoSeconds(DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i])); } return longs; } private TopFieldDocs createTopDocs(SortField sortField, int totalHits, Object[] values) { FieldDoc[] fieldDocs = new FieldDoc[values.length]; - FieldComparator cmp = sortField.getComparator(1, false); + FieldComparator cmp = sortField.getComparator(1, Pruning.NONE); for (int i = 0; i < values.length; i++) { fieldDocs[i] = new FieldDoc(i, Float.NaN, new Object[] { values[i] }); } diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 0561f81f96ce6..1881c705fe6b3 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -31,27 +31,36 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchService; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.sort.MinAndMax; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -63,14 +72,33 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.stream.IntStream; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; public class CanMatchPreFilterSearchPhaseTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testFilterShards() throws InterruptedException { @@ -115,6 +143,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -133,10 +165,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -206,6 +241,10 @@ public void sendCanMatch( final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -224,10 +263,13 @@ public void sendCanMatch( @Override public void run() throws IOException { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); @@ -287,6 +329,10 @@ public void sendCanMatch( (e) -> { throw new AssertionError("unexpected", e); } ); Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -301,54 +347,62 @@ public void sendCanMatch( timeProvider, ClusterState.EMPTY_STATE, null, - (iter) -> new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> { - assert cluster == null : "cluster was not null: " + cluster; - return lookup.get(node); - }, - aliasFilters, - Collections.emptyMap(), - Collections.emptyMap(), - executor, - searchRequest, - responseListener, - iter, - new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), - ClusterState.EMPTY_STATE, - null, - new ArraySearchPhaseResults<>(iter.size()), - randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY - ) { - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { - return new SearchPhase("test") { + (iter) -> { + return new WrappingSearchAsyncActionPhase( + new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> { + assert cluster == null : "cluster was not null: " + cluster; + return lookup.get(node); + }, + aliasFilters, + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + responseListener, + iter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + ClusterState.EMPTY_STATE, + null, + new ArraySearchPhaseResults<>(iter.size()), + randomIntBetween(1, 32), + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE + ) { @Override - public void run() { - latch.countDown(); + protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { + return new WrappingSearchAsyncActionPhase(this) { + @Override + public void run() { + latch.countDown(); + } + }; } - }; - } - @Override - protected void executePhaseOnShard( - final SearchShardIterator shardIt, - final SearchShardTarget shard, - final SearchActionListener<SearchPhaseResult> listener - ) { - if (randomBoolean()) { - listener.onResponse(new SearchPhaseResult() { - }); - } else { - listener.onFailure(new Exception("failure")); + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener<SearchPhaseResult> listener + ) { + if (randomBoolean()) { + listener.onResponse(new SearchPhaseResult() { + }); + } else { + listener.onFailure(new Exception("failure")); + } + } } - } + ); }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + executor.shutdown(); } @@ -407,6 +461,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -425,14 +483,18 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + ShardId[] expected = IntStream.range(0, shardIds.size()) .boxed() .sorted(Comparator.comparing(minAndMaxes::get, MinAndMax.getComparator(order)).thenComparing(shardIds::get)) @@ -506,6 +568,10 @@ public void sendCanMatch( searchRequest.source(new SearchSourceBuilder().sort(SortBuilders.fieldSort("timestamp").order(order))); searchRequest.allowPartialSearchResults(true); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -524,14 +590,18 @@ public void sendCanMatch( @Override public void run() { result.set(iter); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, searchRequest), + NoopTracer.INSTANCE ); canMatchPhase.start(); latch.await(); + int shardId = 0; for (SearchShardIterator i : result.get()) { assertThat(i.shardId().id(), equalTo(shardId++)); @@ -540,4 +610,192 @@ public void run() { assertThat(result.get().size(), equalTo(numShards)); } } + + public void testAsyncAction() throws InterruptedException { + + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + + Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>(); + DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + lookup.put("node_1", new SearchAsyncActionTests.MockConnection(primaryNode)); + lookup.put("node_2", new SearchAsyncActionTests.MockConnection(replicaNode)); + final boolean shard1 = randomBoolean(); + final boolean shard2 = randomBoolean(); + + SearchTransportService searchTransportService = new SearchTransportService(null, null) { + @Override + public void sendCanMatch( + Transport.Connection connection, + ShardSearchRequest request, + SearchTask task, + ActionListener<SearchService.CanMatchResponse> listener + ) { + new Thread( + () -> listener.onResponse(new SearchService.CanMatchResponse(request.shardId().id() == 0 ? shard1 : shard2, null)) + ).start(); + } + }; + + AtomicReference<GroupShardsIterator<SearchShardIterator>> result = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter( + "idx", + new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS), + 2, + randomBoolean(), + primaryNode, + replicaNode + ); + final SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(true); + + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + ExecutorService executor = OpenSearchExecutors.newDirectExecutorService(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + searchRequest + ); + + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + executor, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + null, + (iter) -> { + AbstractSearchAsyncAction<? extends SearchPhaseResult> action = new SearchDfsQueryAsyncAction( + logger, + searchTransportService, + (clusterAlias, node) -> lookup.get(node), + Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), + Collections.emptyMap(), + Collections.emptyMap(), + controller, + executor, + resultConsumer, + searchRequest, + null, + shardsIter, + timeProvider, + ClusterState.EMPTY_STATE, + task, + SearchResponse.Clusters.EMPTY, + searchRequestContext + ); + return new WrappingSearchAsyncActionPhase(action) { + @Override + public void run() { + super.run(); + latch.countDown(); + } + }; + }, + SearchResponse.Clusters.EMPTY, + searchRequestContext, + NoopTracer.INSTANCE + ); + + canMatchPhase.start(); + latch.await(); + + assertThat(result.get(), is(nullValue())); + } + + private static final class SearchDfsQueryAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> { + private final SearchRequestOperationsListener listener; + + SearchDfsQueryAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final BiFunction<String, String, Transport.Connection> nodeIdToConnection, + final Map<String, AliasFilter> aliasFilter, + final Map<String, Float> concreteIndexBoosts, + final Map<String, Set<String>> indexRoutings, + final SearchPhaseController searchPhaseController, + final Executor executor, + final QueryPhaseResultConsumer queryPhaseResultConsumer, + final SearchRequest request, + final ActionListener<SearchResponse> listener, + final GroupShardsIterator<SearchShardIterator> shardsIts, + final TransportSearchAction.SearchTimeProvider timeProvider, + final ClusterState clusterState, + final SearchTask task, + SearchResponse.Clusters clusters, + SearchRequestContext searchRequestContext + ) { + super( + SearchPhaseName.DFS_PRE_QUERY.getName(), + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + request.getMaxConcurrentShardRequests(), + clusters, + searchRequestContext, + NoopTracer.INSTANCE + ); + this.listener = searchRequestContext.getSearchRequestOperationsListener(); + } + + @Override + protected void executePhaseOnShard( + final SearchShardIterator shardIt, + final SearchShardTarget shard, + final SearchActionListener<DfsSearchResult> listener + ) { + final DfsSearchResult response = new DfsSearchResult(shardIt.getSearchContextId(), shard, null); + response.setShardIndex(shard.getShardId().getId()); + listener.innerOnResponse(response); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults<DfsSearchResult> results, SearchPhaseContext context) { + return new SearchPhase("last") { + @Override + public void run() throws IOException { + listener.onPhaseEnd(context, null); + } + }; + } + } + } diff --git a/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java index e3c7d4741d3ae..b171ee46dd22c 100644 --- a/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java @@ -32,13 +32,14 @@ package org.opensearch.action.search; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.UUIDs; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.ShardSearchContextId; @@ -46,7 +47,6 @@ import org.opensearch.test.VersionUtils; import org.opensearch.transport.NodeNotConnectedException; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/action/search/ClearScrollRequestTests.java b/server/src/test/java/org/opensearch/action/search/ClearScrollRequestTests.java index 11c96f6377c13..859711b2b778c 100644 --- a/server/src/test/java/org/opensearch/action/search/ClearScrollRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/ClearScrollRequestTests.java @@ -32,15 +32,14 @@ package org.opensearch.action.search; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -94,7 +93,7 @@ public void testToXContent() throws IOException { clearScrollRequest.addScrollId("SCROLL_ID"); try (XContentBuilder builder = JsonXContent.contentBuilder()) { clearScrollRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"scroll_id\":[\"SCROLL_ID\"]}", Strings.toString(builder)); + assertEquals("{\"scroll_id\":[\"SCROLL_ID\"]}", builder.toString()); } } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 2c585d152c5d4..6cbe458a35ef8 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -9,9 +9,7 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; -import org.junit.Before; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; import org.opensearch.client.node.NodeClient; @@ -20,9 +18,11 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -32,13 +32,14 @@ import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -48,9 +49,9 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.opensearch.action.search.PitTestsUtil.getPitId; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.action.search.PitTestsUtil.getPitId; /** * Functional tests for various methods in create pit controller. Covers update pit phase specifically since @@ -180,7 +181,7 @@ public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -273,7 +274,7 @@ public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -338,8 +339,8 @@ public void onFailure(Exception e) { createListener.onFailure(new Exception("Exception occurred in phase 1")); latch.await(); assertEquals(0, updateNodesInvoked.size()); - /** - * cleanup is not called on create pit phase one failure + /* + cleanup is not called on create pit phase one failure */ assertEquals(0, deleteNodesInvoked.size()); } @@ -366,7 +367,7 @@ public void testUpdatePitFailureForNodeDrop() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -437,8 +438,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } @@ -462,7 +463,7 @@ public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -525,8 +526,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } diff --git a/server/src/test/java/org/opensearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/opensearch/action/search/DfsQueryPhaseTests.java index db371eb9467ba..6952841c295e2 100644 --- a/server/src/test/java/org/opensearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/DfsQueryPhaseTests.java @@ -38,11 +38,11 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.opensearch.action.OriginalIndices; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchPhaseResult; @@ -51,8 +51,8 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.query.QuerySearchRequest; import org.opensearch.search.query.QuerySearchResult; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java index 8be2b9b203da6..1f5adafc5de0f 100644 --- a/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/ExpandSearchPhaseTests.java @@ -33,8 +33,8 @@ package org.opensearch.action.search; import org.apache.lucene.search.TotalHits; -import org.opensearch.action.ActionListener; import org.opensearch.common.document.DocumentField; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/test/java/org/opensearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/FetchSearchPhaseTests.java index 24a018d05a16a..1eb3a44642806 100644 --- a/server/src/test/java/org/opensearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/FetchSearchPhaseTests.java @@ -37,10 +37,10 @@ import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.opensearch.action.OriginalIndices; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; @@ -51,8 +51,8 @@ import org.opensearch.search.fetch.ShardFetchSearchRequest; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.query.QuerySearchResult; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import java.util.concurrent.CountDownLatch; diff --git a/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java b/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java new file mode 100644 index 0000000000000..882b397575e93 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/GetAllPitNodesResponseTests.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.Version; +import org.opensearch.action.FailedNodeException; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class GetAllPitNodesResponseTests extends OpenSearchTestCase { + protected void assertEqualInstances(GetAllPitNodesResponse expected, GetAllPitNodesResponse actual) { + assertNotSame(expected, actual); + Set<ListPitInfo> expectedPitInfos = new HashSet<>(expected.getPitInfos()); + Set<ListPitInfo> actualPitInfos = new HashSet<>(actual.getPitInfos()); + assertEquals(expectedPitInfos, actualPitInfos); + + List<GetAllPitNodeResponse> expectedResponses = expected.getNodes(); + List<GetAllPitNodeResponse> actualResponses = actual.getNodes(); + assertEquals(expectedResponses.size(), actualResponses.size()); + for (int i = 0; i < expectedResponses.size(); i++) { + assertEquals(expectedResponses.get(i).getNode(), actualResponses.get(i).getNode()); + Set<ListPitInfo> expectedNodePitInfos = new HashSet<>(expectedResponses.get(i).getPitInfos()); + Set<ListPitInfo> actualNodePitInfos = new HashSet<>(actualResponses.get(i).getPitInfos()); + assertEquals(expectedNodePitInfos, actualNodePitInfos); + } + + List<FailedNodeException> expectedFailures = expected.failures(); + List<FailedNodeException> actualFailures = actual.failures(); + assertEquals(expectedFailures.size(), actualFailures.size()); + for (int i = 0; i < expectedFailures.size(); i++) { + assertEquals(expectedFailures.get(i).nodeId(), actualFailures.get(i).nodeId()); + assertEquals(expectedFailures.get(i).getMessage(), actualFailures.get(i).getMessage()); + assertEquals(expectedFailures.get(i).getCause().getClass(), actualFailures.get(i).getCause().getClass()); + } + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Collections.emptyList()); + } + + public void testSerialization() throws IOException { + GetAllPitNodesResponse response = createTestItem(); + GetAllPitNodesResponse deserialized = copyWriteable(response, getNamedWriteableRegistry(), GetAllPitNodesResponse::new); + assertEqualInstances(response, deserialized); + } + + private GetAllPitNodesResponse createTestItem() { + int numNodes = randomIntBetween(1, 10); + int numPits = randomInt(10); + List<ListPitInfo> candidatePitInfos = new ArrayList<>(numPits); + for (int i = 0; i < numNodes; i++) { + candidatePitInfos.add(new ListPitInfo(randomAlphaOfLength(10), randomLong(), randomLong())); + } + + List<GetAllPitNodeResponse> responses = new ArrayList<>(); + List<FailedNodeException> failures = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + DiscoveryNode node = new DiscoveryNode( + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ); + if (randomBoolean()) { + List<ListPitInfo> nodePitInfos = new ArrayList<>(); + for (int j = 0; j < randomInt(numPits); j++) { + nodePitInfos.add(randomFrom(candidatePitInfos)); + } + responses.add(new GetAllPitNodeResponse(node, nodePitInfos)); + } else { + failures.add( + new FailedNodeException(node.getId(), randomAlphaOfLength(10), new TransportException(randomAlphaOfLength(10))) + ); + } + } + return new GetAllPitNodesResponse(new ClusterName("test"), responses, failures); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index f5a705c0e1033..cc10da8fc1f12 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -36,8 +36,8 @@ import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.common.Nullable; -import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.InternalSearchResponse; @@ -65,14 +65,29 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { final List<ShardSearchFailure> failures = Collections.synchronizedList(new ArrayList<>()); SearchTransportService searchTransport; final Set<ShardSearchContextId> releasedSearchContexts = new HashSet<>(); - final SearchRequest searchRequest = new SearchRequest(); + final SearchRequest searchRequest; final AtomicReference<SearchResponse> searchResponse = new AtomicReference<>(); + final SearchPhase currentPhase; public MockSearchPhaseContext(int numShards) { + this(numShards, new SearchRequest()); + } + + public MockSearchPhaseContext(int numShards, SearchRequest searchRequest) { + this(numShards, searchRequest, null); + } + + public MockSearchPhaseContext(int numShards, SearchRequest searchRequest, SearchPhase currentPhase) { this.numShards = numShards; + this.searchRequest = searchRequest; + this.currentPhase = currentPhase; numSuccess = new AtomicInteger(numShards); } + public MockSearchPhaseContext(int numShards, SearchPhase currentPhase) { + this(numShards, new SearchRequest(), currentPhase); + } + public void assertNoFailure() { if (phaseFailure.get() != null) { throw new AssertionError(phaseFailure.get()); @@ -99,6 +114,11 @@ public SearchRequest getRequest() { return searchRequest; } + @Override + public SearchPhase getCurrentPhase() { + return currentPhase; + } + @Override public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray<SearchPhaseResult> queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java index fc4bd287c5f18..94ba5b0a8768b 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.action.ActionListener; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.client.node.NodeClient; @@ -46,9 +45,11 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -153,7 +154,8 @@ private TransportMultiSearchAction createTransportMultiSearchAction(boolean cont TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java index f9d163f251176..2577dfdc20698 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchRequestTests.java @@ -36,19 +36,21 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.CheckedBiConsumer; import org.opensearch.common.CheckedRunnable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.rest.RestRequest; @@ -71,6 +73,7 @@ import static java.util.Collections.singletonList; import static org.opensearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -120,7 +123,7 @@ public void testFailWithUnknownKey() { + "{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -134,7 +137,7 @@ public void testSimpleAddWithCarriageReturn() throws Exception { + "{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); MultiSearchRequest request = RestMultiSearchAction.parseRequest(restRequest, null, true); assertThat(request.requests().size(), equalTo(1)); @@ -152,7 +155,7 @@ public void testCancelAfterIntervalAtParentAndFewChildRequest() throws Exception + "{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).withParams(Collections.singletonMap("cancel_after_time_interval", "20s")).build(); MultiSearchRequest request = RestMultiSearchAction.parseRequest(restRequest, null, true); assertThat(request.requests().size(), equalTo(2)); @@ -169,7 +172,7 @@ public void testOnlyParentMSearchRequestWithCancelAfterTimeIntervalParameter() t + "{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).withParams(Collections.singletonMap("cancel_after_time_interval", "20s")).build(); MultiSearchRequest request = RestMultiSearchAction.parseRequest(restRequest, null, true); assertThat(request.requests().size(), equalTo(1)); @@ -182,7 +185,7 @@ public void testDefaultIndicesOptions() throws IOException { + "{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).withParams(Collections.singletonMap("ignore_unavailable", "true")).build(); MultiSearchRequest request = RestMultiSearchAction.parseRequest(restRequest, null, true); assertThat(request.requests().size(), equalTo(1)); @@ -301,7 +304,7 @@ public void testResponseErrorToXContent() { + "\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"},\"status\":500" + "}" + "]}", - Strings.toString(XContentType.JSON, response) + Strings.toString(MediaTypeRegistry.JSON, response) ); } @@ -315,7 +318,7 @@ public void testMsearchTerminatedByNewline() throws Exception { String mserchAction = StreamsUtils.copyToStringFromClasspath("/org/opensearch/action/search/simple-msearch5.json"); RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(mserchAction.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); IllegalArgumentException expectThrows = expectThrows( IllegalArgumentException.class, @@ -326,7 +329,7 @@ public void testMsearchTerminatedByNewline() throws Exception { String mserchActionWithNewLine = mserchAction + "\n"; RestRequest restRequestWithNewLine = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(mserchActionWithNewLine.getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); MultiSearchRequest msearchRequest = RestMultiSearchAction.parseRequest(restRequestWithNewLine, null, true); assertEquals(3, msearchRequest.requests().size()); @@ -334,14 +337,14 @@ public void testMsearchTerminatedByNewline() throws Exception { private MultiSearchRequest parseMultiSearchRequestFromString(String request) throws IOException { return parseMultiSearchRequest( - new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(request), XContentType.JSON).build() + new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(request), MediaTypeRegistry.JSON).build() ); } private MultiSearchRequest parseMultiSearchRequestFromFile(String sample) throws IOException { byte[] data = StreamsUtils.copyToBytesFromClasspath(sample); return parseMultiSearchRequest( - new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(data), XContentType.JSON).build() + new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(data), MediaTypeRegistry.JSON).build() ); } @@ -379,10 +382,10 @@ public void testMultiLineSerialization() throws IOException { int iters = 16; for (int i = 0; i < iters; i++) { // The only formats that support stream separator - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.SMILE); MultiSearchRequest originalRequest = createMultiSearchRequest(); - byte[] originalBytes = MultiSearchRequest.writeMultiLineFormat(originalRequest, xContentType.xContent()); + byte[] originalBytes = MultiSearchRequest.writeMultiLineFormat(originalRequest, mediaType.xContent()); MultiSearchRequest parsedRequest = new MultiSearchRequest(); CheckedBiConsumer<SearchRequest, XContentParser, IOException> consumer = (r, p) -> { SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p, false); @@ -393,7 +396,7 @@ public void testMultiLineSerialization() throws IOException { }; MultiSearchRequest.readMultiLineFormat( new BytesArray(originalBytes), - xContentType.xContent(), + mediaType.xContent(), consumer, null, null, @@ -413,7 +416,7 @@ public void testSerDeWithCancelAfterTimeIntervalParameterAndRandomVersion() thro + "\"cancel_after_time_interval\" : \"10s\"}\r\n{\"query\" : {\"match_all\" :{}}}\r\n"; FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(requestContent), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); Version version = VersionUtils.randomVersion(random()); MultiSearchRequest originalRequest = RestMultiSearchAction.parseRequest(restRequest, null, true); @@ -545,7 +548,7 @@ private void assertExpandWildcardsValue(IndicesOptions options, String expectedV try (XContentBuilder builder = JsonXContent.contentBuilder()) { MultiSearchRequest.writeSearchRequestParams(request, builder); Map<String, Object> map = XContentHelper.convertToMap( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder).streamInput(), false ); @@ -558,6 +561,13 @@ public void testEqualsAndHashcode() { checkEqualsAndHashCode(createMultiSearchRequest(), MultiSearchRequestTests::copyRequest, MultiSearchRequestTests::mutate); } + public void testToString() { + MultiSearchRequest req = createMultiSearchRequest(); + for (SearchRequest subReq : req.requests()) { + assertThat(req.toString(), containsString(subReq.toString())); + } + } + private static MultiSearchRequest mutate(MultiSearchRequest searchRequest) throws IOException { MultiSearchRequest mutation = copyRequest(searchRequest); List<CheckedRunnable<IOException>> mutators = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java index 07a0d3a1d97f2..d80e011c04332 100644 --- a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -8,9 +8,7 @@ package org.opensearch.action.search; -import org.junit.Assert; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -18,16 +16,18 @@ import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchContextId; +import org.junit.Assert; import java.util.ArrayList; import java.util.HashMap; @@ -36,11 +36,11 @@ import java.util.Map; import java.util.concurrent.ExecutionException; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.opensearch.test.OpenSearchTestCase.between; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; import static org.opensearch.test.OpenSearchTestCase.randomBoolean; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** * Helper class for common pit tests functions diff --git a/server/src/test/java/org/opensearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/opensearch/action/search/QueryPhaseResultConsumerTests.java index 27d04cba204fb..283c9e2f238cc 100644 --- a/server/src/test/java/org/opensearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/opensearch/action/search/QueryPhaseResultConsumerTests.java @@ -36,12 +36,12 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.opensearch.action.OriginalIndices; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchShardTarget; diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index 9ee3b11f05785..35e90ff662b19 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -31,8 +31,8 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; @@ -41,8 +41,9 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; @@ -50,17 +51,21 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -77,6 +82,23 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class SearchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } public void testSkipSearchShards() throws InterruptedException { SearchRequest request = new SearchRequest(); @@ -115,6 +137,10 @@ public void testSkipSearchShards() throws InterruptedException { lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -135,7 +161,9 @@ public void testSkipSearchShards() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -166,6 +194,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -233,6 +262,10 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -253,7 +286,9 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { @Override @@ -291,6 +326,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res return new SearchPhase("test") { @Override public void run() { + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); assertTrue(searchPhaseDidRun.compareAndSet(false, true)); } }; @@ -370,7 +406,12 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -406,6 +447,7 @@ public void run() { sendReleaseSearchContext(result.getContextId(), new MockConnection(result.node), OriginalIndices.NONE); } responseListener.onResponse(response); + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } @@ -492,7 +534,12 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + request + ), + NoopTracer.INSTANCE ) { TestSearchResponse response = new TestSearchResponse(); @@ -585,6 +632,10 @@ public void testAllowPartialResults() throws InterruptedException { Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); + final SearchRequestOperationsListener searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener( + List.of(assertingListener), + LogManager.getLogger() + ); AbstractSearchAsyncAction<TestSearchPhaseResult> asyncAction = new AbstractSearchAsyncAction<TestSearchPhaseResult>( "test", logger, @@ -605,9 +656,10 @@ public void testAllowPartialResults() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext(searchRequestOperationsListener, request), + NoopTracer.INSTANCE ) { - @Override protected void executePhaseOnShard( SearchShardIterator shardIt, @@ -639,6 +691,7 @@ protected SearchPhase getNextPhase(SearchPhaseResults<TestSearchPhaseResult> res @Override public void run() { assertTrue(searchPhaseDidRun.compareAndSet(false, true)); + searchRequestOperationsListener.onPhaseEnd(new MockSearchPhaseContext(1, request, this), null); } }; } diff --git a/server/src/test/java/org/opensearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/opensearch/action/search/SearchContextIdTests.java index 65e419936760f..755f7fb4de742 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchContextIdTests.java @@ -33,13 +33,13 @@ package org.opensearch.action.search; import org.opensearch.Version; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchPhaseResult; import org.opensearch.search.internal.AliasFilter; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java index 93f9f69e68de0..a927f733cc504 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchPhaseControllerTests.java @@ -46,19 +46,19 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.OriginalIndices; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.text.Text; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.text.Text; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; @@ -83,8 +83,8 @@ import org.opensearch.search.suggest.completion.CompletionSuggestion; import org.opensearch.search.suggest.phrase.PhraseSuggestion; import org.opensearch.search.suggest.term.TermSuggestion; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/action/search/SearchPhaseExecutionExceptionTests.java b/server/src/test/java/org/opensearch/action/search/SearchPhaseExecutionExceptionTests.java index 0ea7e503529e8..06b00a6a438c4 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchPhaseExecutionExceptionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchPhaseExecutionExceptionTests.java @@ -35,19 +35,20 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.OriginalIndices; import org.opensearch.action.TimestampParsingException; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.InvalidIndexTemplateException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -111,7 +112,7 @@ public void testToXContent() throws IOException { + " ]" + "}" ); - assertEquals(expectedJson, Strings.toString(XContentType.JSON, exception)); + assertEquals(expectedJson, Strings.toString(MediaTypeRegistry.JSON, exception)); } public void testToAndFromXContent() throws IOException { diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java new file mode 100644 index 0000000000000..4878a463729f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -0,0 +1,245 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; + +import org.mockito.ArgumentCaptor; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public final class SearchQueryCategorizerTests extends OpenSearchTestCase { + + private static final String MULTI_TERMS_AGGREGATION = "multi_terms"; + + private MetricsRegistry metricsRegistry; + + private SearchQueryCategorizer searchQueryCategorizer; + + @Before + public void setup() { + metricsRegistry = mock(MetricsRegistry.class); + when(metricsRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenAnswer( + invocation -> mock(Counter.class) + ); + searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } + + public void testAggregationsQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation( + new MultiTermsAggregationBuilder("agg1").terms( + Arrays.asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("username").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName("rating").build() + ) + ) + ); + sourceBuilder.size(0); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d), any(Tags.class)); + + // capture the arguments passed to the aggCounter.add method + ArgumentCaptor<Double> valueCaptor = ArgumentCaptor.forClass(Double.class); + ArgumentCaptor<Tags> tagsCaptor = ArgumentCaptor.forClass(Tags.class); + + // Verify that aggCounter.add was called with the expected arguments + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(valueCaptor.capture(), tagsCaptor.capture()); + + double actualValue = valueCaptor.getValue(); + String actualTag = (String) tagsCaptor.getValue().getTagsMap().get("type"); + + assertEquals(1.0d, actualValue, 0.0001); + assertEquals(MULTI_TERMS_AGGREGATION, actualTag); + } + + public void testBoolQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new BoolQueryBuilder().must(new MatchQueryBuilder("searchText", "fox"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("bool")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); + } + + public void testFunctionScoreQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("function_score")).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchPhraseQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchPhraseQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match_phrase")).add(eq(1.0d), any(Tags.class)); + } + + public void testMultiMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new MultiMatchQueryBuilder("foo bar", "myField")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("multi_match")).add(eq(1.0d), any(Tags.class)); + } + + public void testOtherQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + BoostingQueryBuilder queryBuilder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "foo"), + new MatchNoneQueryBuilder() + ); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("boosting")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match_none")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); + } + + public void testQueryStringQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("query_string")).add(eq(1.0d), any(Tags.class)); + } + + public void testRangeQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); + rangeQuery.gte("1970-01-01"); + rangeQuery.lt("1982-01-01"); + sourceBuilder.query(rangeQuery); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("range")).add(eq(1.0d), any(Tags.class)); + } + + public void testRegexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(new RegexpQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("regexp")).add(eq(1.0d), any(Tags.class)); + } + + public void testSortQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort(new ScoreSortBuilder()); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); + } + + public void testTermQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.termQuery("field", "value2")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); + } + + public void testWildcardQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new WildcardQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("wildcard")).add(eq(1.0d), any(Tags.class)); + } + + public void testComplexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("field", "value2"); + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("tags", "php"); + RegexpQueryBuilder regexpQueryBuilder = new RegexpQueryBuilder("field", "text"); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().must(termQueryBuilder) + .filter(matchQueryBuilder) + .should(regexpQueryBuilder); + sourceBuilder.query(boolQueryBuilder); + sourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")); + + searchQueryCategorizer.categorize(sourceBuilder); + + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("term")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("match")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("regexp")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.nameToQueryTypeCounters.get("bool")).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d), any(Tags.class)); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 1b67d805fe814..aefbbe80d5fa1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; @@ -41,12 +42,12 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.GroupShardsIterator; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchPhaseResult; @@ -58,11 +59,15 @@ import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; +import org.junit.After; +import org.junit.Before; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; @@ -75,6 +80,24 @@ import static org.hamcrest.Matchers.instanceOf; public class SearchQueryThenFetchAsyncActionTests extends OpenSearchTestCase { + private SearchRequestOperationsListenerAssertingListener assertingListener; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + + assertingListener = new SearchRequestOperationsListenerAssertingListener(); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + + assertingListener.assertFinished(); + } + public void testBottomFieldSort() throws Exception { testCase(false, false); } @@ -214,18 +237,25 @@ public void sendExecuteQuery( timeProvider, null, task, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(assertingListener), LogManager.getLogger()), + searchRequest + ), + NoopTracer.INSTANCE ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) { return new SearchPhase("test") { @Override public void run() { + assertingListener.onPhaseEnd(new MockSearchPhaseContext(1, searchRequest, this), null); latch.countDown(); } }; } }; + action.start(); latch.await(); assertThat(successfulOps.get(), equalTo(numShards)); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestBuilderTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestBuilderTests.java index f72dfe04ed02a..798a9b25617ce 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestBuilderTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestBuilderTests.java @@ -32,12 +32,13 @@ package org.opensearch.action.search; -import org.mockito.Mockito; import org.opensearch.client.OpenSearchClient; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.OpenSearchTestCase; +import org.mockito.Mockito; + import static org.hamcrest.CoreMatchers.equalTo; public class SearchRequestBuilderTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java new file mode 100644 index 0000000000000..845543fbd9f57 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; + +public class SearchRequestOperationsCompositeListenerFactoryTests extends OpenSearchTestCase { + public void testAddAndGetListeners() { + SearchRequestOperationsListener testListener = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener + ); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener, requestListeners.getListeners().get(0)); + } + + public void testStandardListenersEnabled() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(false); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1, + testListener2 + ); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger + ); + List<SearchRequestOperationsListener> listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener2, listeners.get(0)); + assertEquals(2, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + assertEquals(testListener2, requestListeners.getListeners().get(1)); + } + + public void testStandardListenersAndPerRequestListener() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(true); + testListener2.setEnabled(true); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + searchRequest.setPhaseTook(true); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List<SearchRequestOperationsListener> listeners = compositeListener.getListeners(); + assertEquals(2, listeners.size()); + assertEquals(testListener1, listeners.get(0)); + assertEquals(testListener2, listeners.get(1)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + } + + public void testStandardListenersDisabledAndPerRequestListener() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(false); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List<SearchRequestOperationsListener> listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener2, listeners.get(0)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + assertFalse(requestListeners.getListeners().get(0).isEnabled()); + } + + public void testStandardListenerAndPerRequestListenerDisabled() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + testListener1.setEnabled(true); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener2.setEnabled(false); + + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + searchRequest.setPhaseTook(false); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List<SearchRequestOperationsListener> listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener1, listeners.get(0)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + } + + public SearchRequestOperationsListener createTestSearchRequestOperationsListener() { + return new SearchRequestOperationsListener() { + @Override + protected void onPhaseStart(SearchPhaseContext context) {} + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) {} + }; + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java new file mode 100644 index 0000000000000..327371ebcaf0b --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerAssertingListener.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +class SearchRequestOperationsListenerAssertingListener extends SearchRequestOperationsListener { + private volatile SearchPhase phase; + + @Override + protected void onPhaseStart(SearchPhaseContext context) { + assertThat(phase, is(nullValue())); + phase = context.getCurrentPhase(); + } + + @Override + protected void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + @Override + protected void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + assertThat(phase, is(context.getCurrentPhase())); + phase = null; + } + + public void assertFinished() { + assertThat(phase, is(nullValue())); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java new file mode 100644 index 0000000000000..0f737e00478cb --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; + +import java.util.List; + +/** + * Helper interface to access package protected {@link SearchRequestOperationsListener} from test cases. + */ +public interface SearchRequestOperationsListenerSupport { + default void onPhaseStart(SearchRequestOperationsListener listener, SearchPhaseContext context) { + listener.onPhaseStart(context); + } + + default void onPhaseEnd(SearchRequestOperationsListener listener, SearchPhaseContext context) { + listener.onPhaseEnd( + context, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java new file mode 100644 index 0000000000000..990ed95f1aebc --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestOperationsListenerTests extends OpenSearchTestCase { + + public void testListenersAreExecuted() { + Map<SearchPhaseName, SearchRequestStats.StatsHolder> searchPhaseMap = new HashMap<>(); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + searchPhaseMap.put(searchPhaseName, new SearchRequestStats.StatsHolder()); + } + SearchRequestOperationsListener testListener = new SearchRequestOperationsListener() { + + @Override + public void onPhaseStart(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).total.inc(); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context, Throwable cause) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + }; + + int totalListeners = randomIntBetween(1, 10); + final List<SearchRequestOperationsListener> requestOperationListeners = new ArrayList<>(); + for (int i = 0; i < totalListeners; i++) { + requestOperationListeners.add(testListener); + } + + SearchRequestOperationsListener compositeListener = new SearchRequestOperationsListener.CompositeListener( + requestOperationListeners, + logger + ); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase searchPhase = mock(SearchPhase.class); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(ctx.getCurrentPhase()).thenReturn(searchPhase); + when(searchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + compositeListener.onPhaseStart(ctx); + assertEquals(totalListeners, searchPhaseMap.get(searchPhaseName).current.count()); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java new file mode 100644 index 0000000000000..f009988ffae17 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java @@ -0,0 +1,424 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.lucene.search.TotalHits; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.logging.MockAppender; +import org.opensearch.common.logging.SlowLogLevel; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SearchRequestSlowLogTests extends OpenSearchTestCase { + static MockAppender appender; + static Logger logger = LogManager.getLogger(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_PREFIX + ".SearchRequestSlowLog"); + + @BeforeClass + public static void init() throws IllegalAccessException { + appender = new MockAppender("trace_appender"); + appender.start(); + Loggers.addAppender(logger, appender); + } + + @AfterClass + public static void cleanup() { + Loggers.removeAppender(logger, appender); + appender.stop(); + } + + public void testMultipleSlowLoggersUseSingleLog4jLogger() { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + + SearchPhaseContext searchPhaseContext1 = new MockSearchPhaseContext(1); + ClusterService clusterService1 = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ); + SearchRequestSlowLog searchRequestSlowLog1 = new SearchRequestSlowLog(clusterService1); + int numberOfLoggersBefore = context.getLoggers().size(); + + SearchPhaseContext searchPhaseContext2 = new MockSearchPhaseContext(1); + ClusterService clusterService2 = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null + ); + SearchRequestOperationsCompositeListenerFactory searchRequestListeners = new SearchRequestOperationsCompositeListenerFactory(); + SearchRequestSlowLog searchRequestSlowLog2 = new SearchRequestSlowLog(clusterService2); + + int numberOfLoggersAfter = context.getLoggers().size(); + assertThat(numberOfLoggersAfter, equalTo(numberOfLoggersBefore)); + } + + public void testOnRequestEnd() throws InterruptedException { + final Logger logger = mock(Logger.class); + final SearchRequestContext searchRequestContext = mock(SearchRequestContext.class); + final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); + final SearchRequest searchRequest = mock(SearchRequest.class); + final SearchTask searchTask = mock(SearchTask.class); + + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING.getKey(), "0ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "0ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "0ms"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); + final List<SearchRequestOperationsListener> searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); + + when(searchRequestContext.getSearchRequestOperationsListener()).thenReturn( + new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger) + ); + when(searchRequestContext.getAbsoluteStartNanos()).thenReturn(System.nanoTime() - 1L); + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getTask()).thenReturn(searchTask); + when(searchRequest.searchType()).thenReturn(SearchType.QUERY_THEN_FETCH); + + searchRequestContext.getSearchRequestOperationsListener().onRequestEnd(searchPhaseContext, searchRequestContext); + + verify(logger, never()).warn(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, times(1)).info(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, never()).debug(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, never()).trace(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + } + + public void testConcurrentOnRequestEnd() throws InterruptedException { + final Logger logger = mock(Logger.class); + final SearchPhaseContext searchPhaseContext = mock(SearchPhaseContext.class); + final SearchRequest searchRequest = mock(SearchRequest.class); + final SearchTask searchTask = mock(SearchTask.class); + + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "-1"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING.getKey(), "10s"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "-1"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "-1"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService, logger); + final List<SearchRequestOperationsListener> searchListenersList = new ArrayList<>(List.of(searchRequestSlowLog)); + + when(searchPhaseContext.getRequest()).thenReturn(searchRequest); + when(searchPhaseContext.getTask()).thenReturn(searchTask); + when(searchRequest.searchType()).thenReturn(SearchType.QUERY_THEN_FETCH); + + int numRequests = 50; + int numRequestsLogged = randomIntBetween(0, 50); + Thread[] threads = new Thread[numRequests]; + Phaser phaser = new Phaser(numRequests + 1); + CountDownLatch countDownLatch = new CountDownLatch(numRequests); + + // create a list of SearchRequestContexts + // each SearchRequestContext contains unique composite SearchRequestOperationsListener + ArrayList<SearchRequestContext> searchRequestContexts = new ArrayList<>(); + for (int i = 0; i < numRequests; i++) { + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger), + searchRequest + ); + searchRequestContext.setAbsoluteStartNanos((i < numRequestsLogged) ? 0 : System.nanoTime()); + searchRequestContexts.add(searchRequestContext); + } + + for (int i = 0; i < numRequests; i++) { + int finalI = i; + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + SearchRequestContext thisContext = searchRequestContexts.get(finalI); + thisContext.getSearchRequestOperationsListener().onRequestEnd(searchPhaseContext, thisContext); + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + + verify(logger, never()).warn(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, times(numRequestsLogged)).info(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, never()).debug(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + verify(logger, never()).trace(any(SearchRequestSlowLog.SearchRequestSlowLogMessage.class)); + } + + public void testSearchRequestSlowLogHasJsonFields_EmptySearchRequestContext() throws IOException { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); + SearchRequestSlowLog.SearchRequestSlowLogMessage p = new SearchRequestSlowLog.SearchRequestSlowLogMessage( + searchPhaseContext, + 10, + searchRequestContext + ); + + assertThat(p.getValueFor("took"), equalTo("10nanos")); + assertThat(p.getValueFor("took_millis"), equalTo("0")); + assertThat(p.getValueFor("phase_took"), equalTo("{}")); + assertThat(p.getValueFor("total_hits"), equalTo("-1")); + assertThat(p.getValueFor("search_type"), equalTo("QUERY_THEN_FETCH")); + assertThat(p.getValueFor("shards"), equalTo("")); + assertThat(p.getValueFor("source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + assertThat(p.getValueFor("id"), equalTo(null)); + } + + public void testSearchRequestSlowLogHasJsonFields_NotEmptySearchRequestContext() throws IOException { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); + searchRequestContext.setTotalHits(new TotalHits(3L, TotalHits.Relation.EQUAL_TO)); + searchRequestContext.setShardStats(10, 8, 1, 1); + SearchRequestSlowLog.SearchRequestSlowLogMessage p = new SearchRequestSlowLog.SearchRequestSlowLogMessage( + searchPhaseContext, + 10, + searchRequestContext + ); + + assertThat(p.getValueFor("took"), equalTo("10nanos")); + assertThat(p.getValueFor("took_millis"), equalTo("0")); + assertThat(p.getValueFor("phase_took"), equalTo("{expand=5, fetch=10, query=50}")); + assertThat(p.getValueFor("total_hits"), equalTo("3 hits")); + assertThat(p.getValueFor("search_type"), equalTo("QUERY_THEN_FETCH")); + assertThat(p.getValueFor("shards"), equalTo("{total:10, successful:8, skipped:1, failed:1}")); + assertThat(p.getValueFor("source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + assertThat(p.getValueFor("id"), equalTo(null)); + } + + public void testSearchRequestSlowLogHasJsonFields_PartialContext() throws IOException { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); + searchRequestContext.setTotalHits(new TotalHits(3L, TotalHits.Relation.EQUAL_TO)); + searchRequestContext.setShardStats(5, 3, 1, 1); + SearchRequestSlowLog.SearchRequestSlowLogMessage p = new SearchRequestSlowLog.SearchRequestSlowLogMessage( + searchPhaseContext, + 10000000000L, + searchRequestContext + ); + + assertThat(p.getValueFor("took"), equalTo("10s")); + assertThat(p.getValueFor("took_millis"), equalTo("10000")); + assertThat(p.getValueFor("phase_took"), equalTo("{expand=5, fetch=10, query=50}")); + assertThat(p.getValueFor("total_hits"), equalTo("3 hits")); + assertThat(p.getValueFor("search_type"), equalTo("QUERY_THEN_FETCH")); + assertThat(p.getValueFor("shards"), equalTo("{total:5, successful:3, skipped:1, failed:1}")); + assertThat(p.getValueFor("source"), equalTo("{\\\"query\\\":{\\\"match_all\\\":{\\\"boost\\\":1.0}}}")); + assertThat(p.getValueFor("id"), equalTo(null)); + } + + public void testSearchRequestSlowLogSearchContextPrinterToLog() throws IOException { + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); + searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); + searchRequestContext.setTotalHits(new TotalHits(3L, TotalHits.Relation.EQUAL_TO)); + searchRequestContext.setShardStats(10, 8, 1, 1); + SearchRequestSlowLog.SearchRequestSlowLogMessage p = new SearchRequestSlowLog.SearchRequestSlowLogMessage( + searchPhaseContext, + 100000, + searchRequestContext + ); + + assertThat(p.getFormattedMessage(), startsWith("took[100micros]")); + assertThat(p.getFormattedMessage(), containsString("took_millis[0]")); + assertThat(p.getFormattedMessage(), containsString("phase_took_millis[{expand=5, fetch=10, query=50}]")); + assertThat(p.getFormattedMessage(), containsString("total_hits[3 hits]")); + assertThat(p.getFormattedMessage(), containsString("search_type[QUERY_THEN_FETCH]")); + assertThat(p.getFormattedMessage(), containsString("shards[{total:10, successful:8, skipped:1, failed:1}]")); + assertThat(p.getFormattedMessage(), containsString("source[{\"query\":{\"match_all\":{\"boost\":1.0}}}]")); + // Makes sure that output doesn't contain any new lines + assertThat(p.getFormattedMessage(), not(containsString("\n"))); + assertThat(p.getFormattedMessage(), endsWith("id[]")); + } + + public void testLevelSettingWarn() { + SlowLogLevel level = SlowLogLevel.WARN; + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + assertEquals(level, searchRequestSlowLog.getLevel()); + } + + public void testLevelSettingDebug() { + String level = "DEBUG"; + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + assertEquals(level, searchRequestSlowLog.getLevel().toString()); + } + + public void testLevelSettingFail() { + String level = "NOT A LEVEL"; + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL.getKey(), level); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + + try { + new SearchRequestSlowLog(clusterService); + fail(); + } catch (IllegalArgumentException ex) { + final String expected = "No enum constant org.opensearch.common.logging.SlowLogLevel.NOT A LEVEL"; + assertThat(ex, hasToString(containsString(expected))); + assertThat(ex, instanceOf(IllegalArgumentException.class)); + } + } + + public void testSetThresholds() { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "400ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING.getKey(), "300ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "200ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100ms"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); + assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); + assertEquals(TimeValue.timeValueMillis(200).nanos(), searchRequestSlowLog.getDebugThreshold()); + assertEquals(TimeValue.timeValueMillis(100).nanos(), searchRequestSlowLog.getTraceThreshold()); + assertEquals(SlowLogLevel.TRACE, searchRequestSlowLog.getLevel()); + } + + public void testSetThresholdsUnits() { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "400s"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING.getKey(), "300ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "200micros"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING.getKey(), "100nanos"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + assertEquals(TimeValue.timeValueSeconds(400).nanos(), searchRequestSlowLog.getWarnThreshold()); + assertEquals(TimeValue.timeValueMillis(300).nanos(), searchRequestSlowLog.getInfoThreshold()); + assertEquals(TimeValue.timeValueNanos(200000).nanos(), searchRequestSlowLog.getDebugThreshold()); + assertEquals(TimeValue.timeValueNanos(100).nanos(), searchRequestSlowLog.getTraceThreshold()); + assertEquals(SlowLogLevel.TRACE, searchRequestSlowLog.getLevel()); + } + + public void testSetThresholdsDefaults() { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "400ms"); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING.getKey(), "200ms"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); + assertEquals(TimeValue.timeValueMillis(400).nanos(), searchRequestSlowLog.getWarnThreshold()); + assertEquals(TimeValue.timeValueMillis(-1).nanos(), searchRequestSlowLog.getInfoThreshold()); + assertEquals(TimeValue.timeValueMillis(200).nanos(), searchRequestSlowLog.getDebugThreshold()); + assertEquals(TimeValue.timeValueMillis(-1).nanos(), searchRequestSlowLog.getTraceThreshold()); + assertEquals(SlowLogLevel.TRACE, searchRequestSlowLog.getLevel()); + } + + public void testSetThresholdsError() { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(SearchRequestSlowLog.CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING.getKey(), "NOT A TIME VALUE"); + Settings settings = settingsBuilder.build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + + try { + new SearchRequestSlowLog(clusterService); + fail(); + } catch (IllegalArgumentException ex) { + final String expected = + "failed to parse setting [cluster.search.request.slowlog.threshold.warn] with value [NOT A TIME VALUE] as a time value"; + assertThat(ex, hasToString(containsString(expected))); + assertThat(ex, instanceOf(IllegalArgumentException.class)); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java new file mode 100644 index 0000000000000..fb9b26e3f3ad1 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -0,0 +1,171 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestStatsTests extends OpenSearchTestCase { + public void testSearchRequestPhaseFailure() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testRequestStats.onPhaseStart(ctx); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseFailure(ctx, new Throwable()); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStats() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + testRequestStats.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseEnd( + ctx, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + assertEquals(1, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat(testRequestStats.getPhaseMetric(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } + + public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + Map<SearchPhaseName, Long> searchPhaseNameLongMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseEnd( + ctx, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); + countDownLatch.countDown(); + }); + threads[i].start(); + } + searchPhaseNameLongMap.put(searchPhaseName, tookTimeInMillis); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat( + testRequestStats.getPhaseMetric(searchPhaseName), + greaterThanOrEqualTo((searchPhaseNameLongMap.get(searchPhaseName) * numTasks)) + ); + } + } + + public void testSearchRequestStatsOnPhaseFailureConcurrently() throws InterruptedException { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseFailure(ctx, new Throwable()); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index ddce6d9772b05..f025e3a63b9bf 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -38,13 +38,13 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.AbstractSearchTestCase; import org.opensearch.search.Scroll; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.rescore.QueryRescorerBuilder; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -244,6 +244,9 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); + mutators.add( + () -> mutation.setPhaseTook(searchRequest.isPhaseTook() == null ? randomBoolean() : searchRequest.isPhaseTook() == false) + ); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java index 1004965c0d50e..ce4d5ca4f7091 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; @@ -132,7 +133,13 @@ public void testMergeTookInMillis() throws InterruptedException { addResponse(merger, searchResponse); } awaitResponsesAdded(); - SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + SearchResponse searchResponse = merger.getMergedResponse( + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); } @@ -184,7 +191,13 @@ public void testMergeShardFailures() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -235,7 +248,13 @@ public void testMergeShardFailuresNullShardTarget() throws InterruptedException awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -281,7 +300,13 @@ public void testMergeShardFailuresNullShardId() throws InterruptedException { } awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); - ShardSearchFailure[] shardFailures = merger.getMergedResponse(SearchResponse.Clusters.EMPTY).getShardFailures(); + ShardSearchFailure[] shardFailures = merger.getMergedResponse( + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ).getShardFailures(); assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); } @@ -315,7 +340,13 @@ public void testMergeProfileResults() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -377,7 +408,13 @@ public void testMergeCompletionSuggestions() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -449,7 +486,13 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -523,7 +566,13 @@ public void testMergeAggs() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -680,7 +729,13 @@ public void testMergeSearchHits() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); assertEquals(expectedTotal, searchResponse.getTotalShards()); @@ -740,7 +795,13 @@ public void testMergeNoResponsesAdded() { SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); assertEquals(0, merger.numResponses()); - SearchResponse response = merger.getMergedResponse(clusters); + SearchResponse response = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, response.getClusters()); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), response.getTook().millis()); assertEquals(0, response.getTotalShards()); @@ -813,7 +874,13 @@ public void testMergeEmptySearchHitsWithNonEmpty() { merger.add(searchResponse); } assertEquals(2, merger.numResponses()); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(10, mergedResponse.getHits().getTotalHits().value); assertEquals(10, mergedResponse.getHits().getHits().length); assertEquals(2, mergedResponse.getTotalShards()); @@ -855,7 +922,13 @@ public void testMergeOnlyEmptyHits() { ); merger.add(searchResponse); } - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(expectedTotalHits, mergedResponse.getHits().getTotalHits()); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index d6983d111bcde..c9e59ab4ea04d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -35,17 +35,26 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.rest.action.search.RestSearchAction; +import org.opensearch.search.GenericSearchExtBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchHitsTests; @@ -57,17 +66,19 @@ import org.opensearch.search.profile.SearchProfileShardResultsTests; import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestTests; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.UUID; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonMap; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -78,11 +89,25 @@ public class SearchResponseTests extends OpenSearchTestCase { static { List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + namedXContents.add( + new NamedXContentRegistry.Entry(SearchExtBuilder.class, DummySearchExtBuilder.DUMMY_FIELD, DummySearchExtBuilder::parse) + ); xContentRegistry = new NamedXContentRegistry(namedXContents); } private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( - new SearchModule(Settings.EMPTY, emptyList()).getNamedWriteables() + new SearchModule(Settings.EMPTY, List.of(new SearchPlugin() { + @Override + public List<SearchExtSpec<?>> getSearchExts() { + return List.of( + new SearchExtSpec<>( + DummySearchExtBuilder.DUMMY_FIELD, + DummySearchExtBuilder::new, + parser -> DummySearchExtBuilder.parse(parser) + ) + ); + } + })).getNamedWriteables() ); private AggregationsTests aggregationsTests = new AggregationsTests(); @@ -117,10 +142,23 @@ private SearchResponse createMinimalTestItem() { * if minimal is set, don't include search hits, aggregations, suggest etc... to make test simpler */ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... shardSearchFailures) { + return createTestItem(minimal, Collections.emptyList(), shardSearchFailures); + } + + public SearchResponse createTestItem( + boolean minimal, + List<SearchExtBuilder> searchExtBuilders, + ShardSearchFailure... shardSearchFailures + ) { boolean timedOut = randomBoolean(); Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); + Map<String, Long> phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), randomNonNegativeLong()); + } + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); @@ -137,7 +175,8 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha profileShardResults, timedOut, terminatedEarly, - numReducePhases + numReducePhases, + searchExtBuilders ); } else { internalSearchResponse = InternalSearchResponse.empty(); @@ -150,8 +189,10 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha successfulShards, skippedShards, tookInMillis, + phaseTook, shardSearchFailures, - randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY + randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, + null ); } @@ -170,6 +211,32 @@ public void testFromXContent() throws IOException { doFromXContentTestWithRandomFields(createTestItem(), false); } + public void testFromXContentWithSearchExtBuilders() throws IOException { + doFromXContentTestWithRandomFields(createTestItem(false, List.of(new DummySearchExtBuilder(UUID.randomUUID().toString()))), false); + } + + public void testFromXContentWithUnregisteredSearchExtBuilders() throws IOException { + List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + String dummyId = UUID.randomUUID().toString(); + String fakeId = UUID.randomUUID().toString(); + List<SearchExtBuilder> extBuilders = List.of(new DummySearchExtBuilder(dummyId), new FakeSearchExtBuilder(fakeId)); + SearchResponse response = createTestItem(false, extBuilders); + MediaType xcontentType = randomFrom(XContentType.values()); + boolean humanReadable = randomBoolean(); + final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); + BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); + XContentParser parser = createParser(new NamedXContentRegistry(namedXContents), xcontentType.xContent(), originalBytes); + SearchResponse parsed = SearchResponse.fromXContent(parser); + assertEquals(extBuilders.size(), response.getInternalResponse().getSearchExtBuilders().size()); + + List<SearchExtBuilder> actual = parsed.getInternalResponse().getSearchExtBuilders(); + assertEquals(extBuilders.size(), actual.size()); + for (int i = 0; i < actual.size(); i++) { + assertTrue(actual.get(0) instanceof GenericSearchExtBuilder); + } + } + /** * This test adds random fields and objects to the xContent rendered out to * ensure we can parse it back to be forward compatible with additions to @@ -180,8 +247,8 @@ public void testFromXContentWithRandomFields() throws IOException { doFromXContentTestWithRandomFields(createMinimalTestItem(), true); } - private void doFromXContentTestWithRandomFields(SearchResponse response, boolean addRandomFields) throws IOException { - XContentType xcontentType = randomFrom(XContentType.values()); + public void doFromXContentTestWithRandomFields(SearchResponse response, boolean addRandomFields) throws IOException { + MediaType xcontentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); @@ -243,6 +310,7 @@ public void testToXContent() { SearchHit hit = new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; + String dummyId = UUID.randomUUID().toString(); { SearchResponse response = new SearchResponse( new InternalSearchResponse( @@ -252,7 +320,8 @@ public void testToXContent() { null, false, null, - 1 + 1, + List.of(new DummySearchExtBuilder(dummyId)) ), null, 0, @@ -260,7 +329,8 @@ public void testToXContent() { 0, 0, ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); @@ -278,13 +348,27 @@ public void testToXContent() { { expectedString.append("{\"total\":{\"value\":100,\"relation\":\"eq\"},"); expectedString.append("\"max_score\":1.5,"); - expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]}"); + expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]},"); + } + expectedString.append("\"ext\":"); + { + expectedString.append("{\"dummy\":\"" + dummyId + "\"}"); } } expectedString.append("}"); - assertEquals(expectedString.toString(), Strings.toString(XContentType.JSON, response)); + assertEquals(expectedString.toString(), Strings.toString(MediaTypeRegistry.JSON, response)); + List<SearchExtBuilder> searchExtBuilders = response.getInternalResponse().getSearchExtBuilders(); + assertEquals(1, searchExtBuilders.size()); } { + Map<String, Long> phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), 0L); + } + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 25L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 30L); + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); SearchResponse response = new SearchResponse( new InternalSearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), @@ -300,13 +384,24 @@ public void testToXContent() { 0, 0, 0, + phaseTook, ShardSearchFailure.EMPTY_ARRAY, - new SearchResponse.Clusters(5, 3, 2) + new SearchResponse.Clusters(5, 3, 2), + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); { expectedString.append("\"took\":0,"); + expectedString.append("\"phase_took\":"); + { + expectedString.append("{\"dfs_pre_query\":0,"); + expectedString.append("\"query\":50,"); + expectedString.append("\"fetch\":25,"); + expectedString.append("\"dfs_query\":0,"); + expectedString.append("\"expand\":30,"); + expectedString.append("\"can_match\":0},"); + } expectedString.append("\"timed_out\":false,"); expectedString.append("\"_shards\":"); { @@ -329,7 +424,7 @@ public void testToXContent() { } } expectedString.append("}"); - assertEquals(expectedString.toString(), Strings.toString(XContentType.JSON, response)); + assertEquals(expectedString.toString(), Strings.toString(MediaTypeRegistry.JSON, response)); } } @@ -350,6 +445,48 @@ public void testSerialization() throws IOException { assertEquals(searchResponse.getClusters(), deserialized.getClusters()); } + public void testSerializationWithSearchExtBuilders() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem(false, List.of(new DummySearchExtBuilder(id))); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithSearchExtBuildersOnUnsupportedWriterVersion() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem(false, List.of(new DummySearchExtBuilder(id))); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.V_2_9_0); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals(1, searchResponse.getInternalResponse().getSearchExtBuilders().size()); + assertTrue(deserialized.getInternalResponse().getSearchExtBuilders().isEmpty()); + } + public void testToXContentEmptyClusters() throws IOException { SearchResponse searchResponse = new SearchResponse( InternalSearchResponse.empty(), @@ -362,8 +499,111 @@ public void testToXContentEmptyClusters() throws IOException { SearchResponse.Clusters.EMPTY ); SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); deserialized.getClusters().toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(0, Strings.toString(builder).length()); + assertEquals(0, builder.toString().length()); + } + + public void testSearchResponsePhaseTookEquals() throws IOException { + SearchResponse.PhaseTook phaseTookA = new SearchResponse.PhaseTook(Map.of("foo", 0L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookB = new SearchResponse.PhaseTook(Map.of("foo", 1L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookC = new SearchResponse.PhaseTook(Map.of("foo", 0L)); + SearchResponse.PhaseTook phaseTookD = new SearchResponse.PhaseTook(Map.of()); + + assertNotEquals(phaseTookA, phaseTookB); + assertNotEquals(phaseTookB, phaseTookA); + assertNotEquals(phaseTookA, phaseTookC); + assertNotEquals(phaseTookC, phaseTookA); + assertNotEquals(phaseTookA, phaseTookD); + assertNotEquals(phaseTookD, phaseTookA); + assertEquals(phaseTookA, phaseTookA); + assertEquals(phaseTookB, phaseTookB); + assertEquals(phaseTookC, phaseTookC); + assertEquals(phaseTookD, phaseTookD); + } + + static class DummySearchExtBuilder extends SearchExtBuilder { + + static ParseField DUMMY_FIELD = new ParseField("dummy"); + + protected final String id; + + public DummySearchExtBuilder(String id) { + assertNotNull(id); + this.id = id; + } + + public DummySearchExtBuilder(StreamInput in) throws IOException { + this.id = in.readString(); + } + + public String getId() { + return this.id; + } + + @Override + public String getWriteableName() { + return DUMMY_FIELD.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field("dummy", id); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof DummySearchExtBuilder)) { + return false; + } + + return this.id.equals(((DummySearchExtBuilder) obj).getId()); + } + + public static DummySearchExtBuilder parse(XContentParser parser) throws IOException { + String id; + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + id = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token); + } + if (id == null) { + throw new ParsingException(parser.getTokenLocation(), "no id specified for " + DUMMY_FIELD.getPreferredName()); + } + return new DummySearchExtBuilder(id); + } + } + + static class FakeSearchExtBuilder extends DummySearchExtBuilder { + static ParseField DUMMY_FIELD = new ParseField("fake"); + + public FakeSearchExtBuilder(String id) { + super(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(DUMMY_FIELD.getPreferredName()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(DUMMY_FIELD.getPreferredName(), id); + } } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchScrollAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchScrollAsyncActionTests.java index 7ae96e51df76a..3653f05936cf1 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchScrollAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchScrollAsyncActionTests.java @@ -32,13 +32,13 @@ package org.opensearch.action.search; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.Scroll; import org.opensearch.search.SearchShardTarget; diff --git a/server/src/test/java/org/opensearch/action/search/SearchScrollRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchScrollRequestTests.java index 9d59266242c90..f393b7e9eba05 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchScrollRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchScrollRequestTests.java @@ -32,19 +32,18 @@ package org.opensearch.action.search; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.internal.InternalScrollSearchRequest; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.test.OpenSearchTestCase; @@ -120,7 +119,7 @@ public void testToXContent() throws IOException { searchScrollRequest.scroll("1m"); try (XContentBuilder builder = JsonXContent.contentBuilder()) { searchScrollRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"scroll_id\":\"SCROLL_ID\",\"scroll\":\"1m\"}", Strings.toString(builder)); + assertEquals("{\"scroll_id\":\"SCROLL_ID\",\"scroll\":\"1m\"}", builder.toString()); } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchShardIteratorTests.java b/server/src/test/java/org/opensearch/action/search/SearchShardIteratorTests.java index 489893dde0c0e..1d84b387d2051 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchShardIteratorTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchShardIteratorTests.java @@ -38,8 +38,8 @@ import org.opensearch.cluster.routing.GroupShardsIteratorTests; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchShardTarget; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/action/search/SearchShardTests.java b/server/src/test/java/org/opensearch/action/search/SearchShardTests.java index 770340240296e..15b44b4e2be29 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchShardTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchShardTests.java @@ -34,8 +34,8 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/action/search/ShardSearchFailureTests.java b/server/src/test/java/org/opensearch/action/search/ShardSearchFailureTests.java index 4fa555f0edddc..0ad3b5b0ed243 100644 --- a/server/src/test/java/org/opensearch/action/search/ShardSearchFailureTests.java +++ b/server/src/test/java/org/opensearch/action/search/ShardSearchFailureTests.java @@ -34,20 +34,21 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.XContentTestUtils.insertRandomFields; public class ShardSearchFailureTests extends OpenSearchTestCase { @@ -124,7 +125,7 @@ public void testToXContent() throws IOException { new ParsingException(0, 0, "some message", null), new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), null, OriginalIndices.NONE) ); - BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(failure, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( "{\"shard\":123," + "\"index\":\"indexName\"," @@ -145,7 +146,7 @@ public void testToXContentWithClusterAlias() throws IOException { new ParsingException(0, 0, "some message", null), new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), "cluster1", OriginalIndices.NONE) ); - BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(failure, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( "{\"shard\":123," + "\"index\":\"cluster1:indexName\"," diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 3d76e711788db..a1e3a2b03caf7 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -7,9 +7,7 @@ */ package org.opensearch.action.search; -import org.junit.Before; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; @@ -20,19 +18,24 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -42,11 +45,11 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.opensearch.action.search.PitTestsUtil.getPitId; +import static org.opensearch.action.support.PlainActionFuture.newFuture; import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.action.search.PitTestsUtil.getPitId; -import static org.opensearch.action.support.PlainActionFuture.newFuture; /** * Functional tests for transport delete pit action @@ -141,7 +144,7 @@ public void testDeletePitSuccess() throws InterruptedException, ExecutionExcepti Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -204,7 +207,7 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -261,6 +264,46 @@ public void getAllPits(ActionListener<GetAllPitNodesResponse> getAllPitsListener } } + public void testDeleteAllPITSuccessWhenNoPITsExist() throws InterruptedException, ExecutionException { + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT)) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + TransportService mockTransportService = mock(TransportService.class); + PitService pitService = new PitService(clusterServiceMock, mock(SearchTransportService.class), mockTransportService, client) { + @Override + public void getAllPits(ActionListener<GetAllPitNodesResponse> getAllPitsListener) { + List<ListPitInfo> list = new ArrayList<>(); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse(cluster1Transport.getLocalDiscoNode(), list); + List<GetAllPitNodeResponse> nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + mockTransportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + ActionListener<DeletePitResponse> listener = new ActionListener<DeletePitResponse>() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + assertEquals(RestStatus.OK, deletePitResponse.status()); + assertEquals(0, deletePitResponse.getDeletePitResults().size()); + } + + @Override + public void onFailure(Exception e) { + fail("Should not receive Exception"); + } + }; + action.execute(task, deletePITRequest, listener); + } + } + public void testDeletePitWhenNodeIsDown() throws InterruptedException, ExecutionException { List<DiscoveryNode> deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); @@ -279,7 +322,7 @@ public void testDeletePitWhenNodeIsDown() throws InterruptedException, Execution Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -342,7 +385,7 @@ public void testDeletePitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -400,7 +443,7 @@ public void testDeletePitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -465,7 +508,7 @@ public void testDeleteAllPitWhenNodeIsDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -542,7 +585,7 @@ public void testDeleteAllPitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -615,7 +658,7 @@ public void testDeleteAllPitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 5fd5e7315e553..48970e2b96add 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.search; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilter; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; @@ -48,9 +47,11 @@ import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -87,7 +88,8 @@ public void testParentTaskId() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { @@ -151,7 +153,8 @@ public void testBatchExecute() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index 413fff98de44c..da19c839f3826 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -32,9 +32,9 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.TotalHits; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.OriginalIndicesTests; @@ -54,15 +54,16 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermsQueryBuilder; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.Scroll; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -75,6 +76,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -234,7 +236,14 @@ public void testMergeShardsIterators() { } public void testProcessRemoteShards() { - try (TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + TransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { RemoteClusterService service = transportService.getRemoteClusterService(); assertFalse(service.isCrossClusterSearchEnabled()); Map<String, ClusterSearchShardsResponse> searchShardsResponseMap = new HashMap<>(); @@ -451,7 +460,9 @@ public void testCCSRemoteReduceMergeFails() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); Function<Boolean, InternalAggregation.ReduceContext> reduceContext = finalReduce -> null; - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -473,7 +484,11 @@ public void testCCSRemoteReduceMergeFails() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -507,7 +522,9 @@ public void testCCSRemoteReduce() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; int totalClusters = numClusters + (local ? 1 : 0); TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -529,7 +546,11 @@ public void testCCSRemoteReduce() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -566,7 +587,11 @@ public void testCCSRemoteReduce() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -624,7 +649,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -664,7 +693,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -715,7 +748,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -748,7 +785,9 @@ public void testCollectSearchShards() throws Exception { Settings.Builder builder = Settings.builder(); MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); Settings settings = builder.build(); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java b/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java index 991d118e5243b..89908894f2f30 100644 --- a/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java +++ b/server/src/test/java/org/opensearch/action/support/ActiveShardCountTests.java @@ -42,8 +42,8 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/support/ContextPreservingActionListenerTests.java b/server/src/test/java/org/opensearch/action/support/ContextPreservingActionListenerTests.java index 6ca38b2a2fc4f..0b9f2c6707c02 100644 --- a/server/src/test/java/org/opensearch/action/support/ContextPreservingActionListenerTests.java +++ b/server/src/test/java/org/opensearch/action/support/ContextPreservingActionListenerTests.java @@ -31,9 +31,9 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/support/GroupedActionListenerTests.java b/server/src/test/java/org/opensearch/action/support/GroupedActionListenerTests.java index bbdd2351910d6..bac019264b7d3 100644 --- a/server/src/test/java/org/opensearch/action/support/GroupedActionListenerTests.java +++ b/server/src/test/java/org/opensearch/action/support/GroupedActionListenerTests.java @@ -31,7 +31,7 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java index 0e25091d845b5..b7a226002c712 100644 --- a/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/IndicesOptionsTests.java @@ -35,17 +35,17 @@ import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions.Option; import org.opensearch.action.support.IndicesOptions.WildcardStates; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent.MapParams; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; @@ -422,7 +422,7 @@ public void testFromXContentWithWildcardSpecialValues() throws IOException { final boolean allowNoIndices = randomBoolean(); BytesReference xContentBytes; - try (XContentBuilder builder = XContentFactory.contentBuilder(type)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(type)) { builder.startObject(); builder.field("expand_wildcards", "all"); builder.field("ignore_unavailable", ignoreUnavailable); @@ -441,7 +441,7 @@ public void testFromXContentWithWildcardSpecialValues() throws IOException { assertTrue(fromXContentOptions.expandWildcardsHidden()); assertTrue(fromXContentOptions.expandWildcardsOpen()); - try (XContentBuilder builder = XContentFactory.contentBuilder(type)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(type)) { builder.startObject(); builder.field("expand_wildcards", "none"); builder.field("ignore_unavailable", ignoreUnavailable); @@ -461,7 +461,7 @@ public void testFromXContentWithWildcardSpecialValues() throws IOException { } private BytesReference toXContentBytes(IndicesOptions indicesOptions, XContentType type) throws IOException { - try (XContentBuilder builder = XContentFactory.contentBuilder(type)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(type)) { builder.startObject(); indicesOptions.toXContent(builder, new MapParams(Collections.emptyMap())); builder.endObject(); diff --git a/server/src/test/java/org/opensearch/action/support/ListenableActionFutureTests.java b/server/src/test/java/org/opensearch/action/support/ListenableActionFutureTests.java index b3fa03a3f873c..c69d357e51fc9 100644 --- a/server/src/test/java/org/opensearch/action/support/ListenableActionFutureTests.java +++ b/server/src/test/java/org/opensearch/action/support/ListenableActionFutureTests.java @@ -31,8 +31,8 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/action/support/ListenerTimeoutsTests.java b/server/src/test/java/org/opensearch/action/support/ListenerTimeoutsTests.java index 9e9f27688e121..424a7c1d16895 100644 --- a/server/src/test/java/org/opensearch/action/support/ListenerTimeoutsTests.java +++ b/server/src/test/java/org/opensearch/action/support/ListenerTimeoutsTests.java @@ -33,10 +33,10 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/action/support/RetryableActionTests.java b/server/src/test/java/org/opensearch/action/support/RetryableActionTests.java index 85eda47e9b797..fe2994f9fe9bb 100644 --- a/server/src/test/java/org/opensearch/action/support/RetryableActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/RetryableActionTests.java @@ -33,10 +33,10 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/opensearch/action/support/TransportActionFilterChainTests.java index 35c346957ebd2..a4f40db365f9a 100644 --- a/server/src/test/java/org/opensearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/opensearch/action/support/TransportActionFilterChainTests.java @@ -33,13 +33,13 @@ package org.opensearch.action.support; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.LatchedActionListener; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.node.Node; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 23654c02f0901..4305151965ab6 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -36,7 +36,6 @@ import org.opensearch.Version; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; @@ -60,20 +59,22 @@ import org.opensearch.cluster.routing.ShardsIterator; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ReceiveTimeoutTransportException; import org.opensearch.transport.TestTransportChannel; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -237,7 +238,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index bcc4861db0a8d..9ae1310a8b15c 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -33,10 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.ThreadedActionListener; @@ -54,15 +51,19 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; -import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -79,10 +80,10 @@ import java.util.HashSet; import java.util.Objects; import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.test.ClusterServiceUtils.createClusterService; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java index b9abddc5622c9..ce44c4d2c5b48 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java @@ -32,9 +32,9 @@ package org.opensearch.action.support.clustermanager; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterState; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; public class TransportMasterNodeActionUtils { diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 28f01d0e6ea4a..445934b0ccdfd 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -209,7 +210,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java index 330d43e791555..19a9918fa4561 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.support.replication; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.NoShardAvailableActionException; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.admin.indices.flush.FlushRequest; @@ -40,7 +39,6 @@ import org.opensearch.action.admin.indices.flush.TransportFlushAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.broadcast.BroadcastRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; @@ -49,19 +47,22 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -115,7 +116,8 @@ public void setUp() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - circuitBreakerService + circuitBreakerService, + NoopTracer.INSTANCE ); clusterService = createClusterService(threadPool); transportService = new TransportService( @@ -125,7 +127,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java index a26198a5602cc..8fa4441bbd5ec 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/PendingReplicationActionsTests.java @@ -32,14 +32,14 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.RetryableAction; import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.PrimaryShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 78081a8f83ce3..6b54623b03164 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.PlainActionFuture; @@ -49,17 +48,18 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardNotStartedException; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.NodeClosedException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -84,15 +84,15 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.action.support.replication.ReplicationOperation.RetryOnPrimaryException; +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; -import static org.opensearch.action.support.replication.ReplicationOperation.RetryOnPrimaryException; -import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; public class ReplicationOperationTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationResponseTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationResponseTests.java index 5e32193415bd8..ee92126a3d72e 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationResponseTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationResponseTests.java @@ -34,16 +34,17 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo; -import org.opensearch.common.Strings; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -51,7 +52,7 @@ import java.util.Locale; import static org.opensearch.OpenSearchExceptionTests.assertDeepEquals; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public class ReplicationResponseTests extends OpenSearchTestCase { @@ -66,7 +67,7 @@ public void testShardInfoToString() { public void testShardInfoToXContent() throws IOException { { ShardInfo shardInfo = new ShardInfo(5, 3); - String output = Strings.toString(XContentType.JSON, shardInfo); + String output = Strings.toString(MediaTypeRegistry.JSON, shardInfo); assertEquals("{\"total\":5,\"successful\":3,\"failed\":0}", output); } { @@ -88,7 +89,7 @@ public void testShardInfoToXContent() throws IOException { true ) ); - String output = Strings.toString(XContentType.JSON, shardInfo); + String output = Strings.toString(MediaTypeRegistry.JSON, shardInfo); assertEquals( "{\"total\":6,\"successful\":4,\"failed\":2,\"failures\":[{\"_index\":\"index\",\"_shard\":3," + "\"_node\":\"_node_id\",\"reason\":{\"type\":\"illegal_argument_exception\",\"reason\":\"Wrong\"}," diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 089bcf77afbae..dad0fa0efd3ec 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -33,15 +33,8 @@ package org.opensearch.action.support.replication; import org.apache.lucene.store.AlreadyClosedException; -import org.hamcrest.Matcher; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.admin.indices.close.CloseIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexRequest; @@ -70,28 +63,31 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.cluster.ClusterStateChanges; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.MockTransportService; @@ -102,9 +98,14 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; +import org.hamcrest.Matcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; import java.util.Collections; @@ -121,6 +122,11 @@ import java.util.stream.Collectors; import static java.util.Collections.singleton; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -139,11 +145,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportReplicationActionTests extends OpenSearchTestCase { @@ -152,7 +153,7 @@ public class TransportReplicationActionTests extends OpenSearchTestCase { /** * takes a request that was sent by a {@link TransportReplicationAction} and captured * and returns the underlying request if it's wrapped or the original (cast to the expected type). - * + * <p> * This will throw a {@link ClassCastException} if the request is of the wrong type. */ public static <R extends ReplicationRequest> R resolveRequest(TransportRequest requestOrWrappedRequest) { @@ -195,7 +196,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -1316,7 +1318,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( Settings.EMPTY, @@ -1325,7 +1328,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index f7c26ed8fea03..cce8758ef1014 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.support.replication; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; @@ -54,17 +53,19 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; -import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; @@ -113,7 +114,7 @@ * This test tests the concurrent execution of several transport replication actions. All of these actions (except one) acquire a single * permit during their execution on shards and are expected to fail if a global level or index level block is present in the cluster state. * These actions are all started at the same time, but some are delayed until one last action. - * + * <p> * This last action is special because it acquires all the permits on shards, adds the block to the cluster state and then "releases" the * previously delayed single permit actions. This way, there is a clear transition between the single permit actions executed before the * all permit action that sets the block and those executed afterwards that are doomed to fail because of the block. @@ -232,7 +233,8 @@ public String executor() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, bta -> node1, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index c978031103ff2..7212b1f5efe13 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -5,7 +5,6 @@ package org.opensearch.action.support.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; @@ -18,30 +17,32 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.shard.ShardNotInPrimaryModeException; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.hamcrest.Matcher; import org.junit.After; @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -390,7 +392,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, TransportWriteActionForIndexingPressureTests.this.indexingPressureService, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 7a0d0f3814100..b4549f82230bf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -32,9 +32,7 @@ package org.opensearch.action.support.replication; -import org.hamcrest.MatcherAssert; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; @@ -51,20 +49,23 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.PrimaryShardClosedException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -72,13 +73,12 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.util.Collections; @@ -92,10 +92,12 @@ import java.util.function.Consumer; import java.util.stream.Collectors; +import org.mockito.ArgumentCaptor; + import static java.util.Collections.emptyMap; -import static org.hamcrest.Matchers.emptyArray; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.any; @@ -287,7 +289,8 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -406,7 +409,8 @@ public void testPrimaryClosedDoesNotFailShard() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -460,7 +464,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ), TransportWriteActionTests.this.clusterService, null, @@ -472,7 +477,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(Settings.EMPTY, TransportWriteActionTests.this.clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; @@ -500,7 +506,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = false; this.withDocumentFailureOnReplica = false; diff --git a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 02f245e94ffdc..118b4e596fc66 100644 --- a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -33,8 +33,6 @@ package org.opensearch.action.support.single.instance; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionResponse; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; @@ -49,14 +47,17 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -188,7 +189,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index 23153b5a45d4c..e84b5213be39e 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -32,6 +32,8 @@ package org.opensearch.action.termvectors; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.LowerCaseFilter; @@ -63,21 +65,36 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Locale; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTermVectorsTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractTermVectorsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public AbstractTermVectorsTestCase(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + protected static class TestFieldSetting { public final String name; public final boolean storedOffset; diff --git a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java index 8f01fd40fe607..7dd73966bb079 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/GetTermVectorsTests.java @@ -31,7 +31,6 @@ package org.opensearch.action.termvectors; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.payloads.FloatEncoder; @@ -45,6 +44,7 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index c7f14f7a22805..347670ffcdd00 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -51,16 +51,16 @@ import org.apache.lucene.store.Directory; import org.opensearch.LegacyESVersion; import org.opensearch.action.termvectors.TermVectorsRequest.Flag; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.rest.action.document.RestTermVectorsAction; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.StreamsUtils; import org.hamcrest.Matchers; @@ -247,7 +247,7 @@ public void testStreamRequest() throws IOException { request.termStatistics(random().nextBoolean()); String pref = random().nextBoolean() ? "somePreference" : null; request.preference(pref); - request.doc(new BytesArray("{}"), randomBoolean(), XContentType.JSON); + request.doc(new BytesArray("{}"), randomBoolean(), MediaTypeRegistry.JSON); // write ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); @@ -267,7 +267,7 @@ public void testStreamRequest() throws IOException { assertThat(request.preference(), equalTo(pref)); assertThat(request.routing(), equalTo(null)); assertEquals(new BytesArray("{}"), request.doc()); - assertEquals(XContentType.JSON, request.xContentType()); + assertEquals(MediaTypeRegistry.JSON, request.xContentType()); } } @@ -281,7 +281,7 @@ public void testStreamRequestLegacyVersion() throws IOException { request.termStatistics(random().nextBoolean()); String pref = random().nextBoolean() ? "somePreference" : null; request.preference(pref); - request.doc(new BytesArray("{}"), randomBoolean(), XContentType.JSON); + request.doc(new BytesArray("{}"), randomBoolean(), MediaTypeRegistry.JSON); // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); @@ -313,7 +313,7 @@ public void testStreamRequestLegacyVersion() throws IOException { assertThat(request.preference(), equalTo(pref)); assertThat(request.routing(), equalTo(null)); assertEquals(new BytesArray("{}"), request.doc()); - assertEquals(XContentType.JSON, request.xContentType()); + assertEquals(MediaTypeRegistry.JSON, request.xContentType()); } } diff --git a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java index cc4abc5343959..0868421fe1d41 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.termvectors; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.IndicesRequest; import org.opensearch.action.RoutingMissingException; import org.opensearch.action.get.TransportMultiGetActionTests; @@ -48,19 +47,21 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -80,8 +81,8 @@ import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.when; public class TransportMultiTermVectorsActionTests extends OpenSearchTestCase { @@ -107,7 +108,8 @@ public static void beforeClass() throws Exception { randomBase64UUID() ), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { @@ -140,7 +142,7 @@ public TaskManager getTaskManager() { .endObject() ), true, - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) @@ -165,7 +167,7 @@ public TaskManager getTaskManager() { .endObject() ), true, - XContentType.JSON + MediaTypeRegistry.JSON ) ) ) diff --git a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java index e0ee034f53821..b70fda0d86240 100644 --- a/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/update/UpdateRequestTests.java @@ -37,22 +37,23 @@ import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.replication.ReplicationRequest; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.env.Environment; import org.opensearch.index.get.GetResult; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; @@ -71,7 +72,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.script.MockScriptEngine.mockInlineScript; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -514,7 +515,7 @@ public void testToValidateUpsertRequestAndCAS() { UpdateRequest updateRequest = new UpdateRequest("index", "id"); updateRequest.setIfSeqNo(1L); updateRequest.setIfPrimaryTerm(1L); - updateRequest.doc("{}", XContentType.JSON); + updateRequest.doc("{}", MediaTypeRegistry.JSON); updateRequest.upsert(new IndexRequest("index").id("id")); assertThat( updateRequest.validate().validationErrors(), @@ -524,7 +525,7 @@ public void testToValidateUpsertRequestAndCAS() { public void testToValidateUpsertRequestWithVersion() { UpdateRequest updateRequest = new UpdateRequest("index", "id"); - updateRequest.doc("{}", XContentType.JSON); + updateRequest.doc("{}", MediaTypeRegistry.JSON); updateRequest.upsert(new IndexRequest("index").id("1").version(1L)); assertThat(updateRequest.validate().validationErrors(), contains("can't provide version in upsert request")); } @@ -532,7 +533,7 @@ public void testToValidateUpsertRequestWithVersion() { public void testValidate() { { UpdateRequest request = new UpdateRequest("index", "id"); - request.doc("{}", XContentType.JSON); + request.doc("{}", MediaTypeRegistry.JSON); ActionRequestValidationException validate = request.validate(); assertThat(validate, nullValue()); @@ -540,7 +541,7 @@ public void testValidate() { { // Null types are defaulted to "_doc" UpdateRequest request = new UpdateRequest("index", null); - request.doc("{}", XContentType.JSON); + request.doc("{}", MediaTypeRegistry.JSON); ActionRequestValidationException validate = request.validate(); assertThat(validate, not(nullValue())); diff --git a/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java index 9a215bfffb00c..f6e0c9ae7c61a 100644 --- a/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/opensearch/action/update/UpdateResponseTests.java @@ -35,18 +35,19 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.index.IndexResponseTests; import org.opensearch.action.support.replication.ReplicationResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.get.GetResult; import org.opensearch.index.get.GetResultTests; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -60,7 +61,7 @@ import static org.opensearch.action.DocWriteResponse.Result.NOT_FOUND; import static org.opensearch.action.DocWriteResponse.Result.UPDATED; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -69,7 +70,7 @@ public class UpdateResponseTests extends OpenSearchTestCase { public void testToXContent() throws IOException { { UpdateResponse updateResponse = new UpdateResponse(new ShardId("index", "index_uuid", 0), "id", -2, 0, 0, NOT_FOUND); - String output = Strings.toString(XContentType.JSON, updateResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, updateResponse); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":0,\"result\":\"not_found\"," + "\"_shards\":{\"total\":0,\"successful\":0,\"failed\":0}}", @@ -86,7 +87,7 @@ public void testToXContent() throws IOException { 1, DELETED ); - String output = Strings.toString(XContentType.JSON, updateResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, updateResponse); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"result\":\"deleted\"," + "\"_shards\":{\"total\":10,\"successful\":6,\"failed\":0},\"_seq_no\":3,\"_primary_term\":17}", @@ -110,7 +111,7 @@ public void testToXContent() throws IOException { ); updateResponse.setGetResult(new GetResult("books", "1", 0, 1, 2, true, source, fields, null)); - String output = Strings.toString(XContentType.JSON, updateResponse); + String output = Strings.toString(MediaTypeRegistry.JSON, updateResponse); assertEquals( "{\"_index\":\"books\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\"," + "\"_shards\":{\"total\":3,\"successful\":2,\"failed\":0},\"_seq_no\":7,\"_primary_term\":17,\"get\":{" @@ -136,13 +137,13 @@ public void testFromXContentWithRandomFields() throws IOException { } private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - final XContentType xContentType = randomFrom(XContentType.JSON); - final Tuple<UpdateResponse, UpdateResponse> tuple = randomUpdateResponse(xContentType); + final MediaType mediaType = randomFrom(MediaTypeRegistry.JSON); + final Tuple<UpdateResponse, UpdateResponse> tuple = randomUpdateResponse(mediaType); UpdateResponse updateResponse = tuple.v1(); UpdateResponse expectedUpdateResponse = tuple.v2(); boolean humanReadable = randomBoolean(); - BytesReference originalBytes = toShuffledXContent(updateResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + BytesReference originalBytes = toShuffledXContent(updateResponse, mediaType, ToXContent.EMPTY_PARAMS, humanReadable); BytesReference mutated; if (addRandomFields) { @@ -155,12 +156,12 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws // object since this is where GetResult's metadata fields are rendered out and they would be parsed back as // extra metadata fields. Predicate<String> excludeFilter = path -> path.contains("reason") || path.contains("get"); - mutated = insertRandomFields(xContentType, originalBytes, excludeFilter, random()); + mutated = insertRandomFields(mediaType, originalBytes, excludeFilter, random()); } else { mutated = originalBytes; } UpdateResponse parsedUpdateResponse; - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + try (XContentParser parser = createParser(mediaType.xContent(), mutated)) { parsedUpdateResponse = UpdateResponse.fromXContent(parser); assertNull(parser.nextToken()); } @@ -173,9 +174,9 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws // Prints out the parsed UpdateResponse object to verify that it is the same as the expected output. // If random fields have been inserted, it checks that they have been filtered out and that they do // not alter the final output of the parsed object. - BytesReference parsedBytes = toXContent(parsedUpdateResponse, xContentType, humanReadable); - BytesReference expectedBytes = toXContent(expectedUpdateResponse, xContentType, humanReadable); - assertToXContentEquivalent(expectedBytes, parsedBytes, xContentType); + BytesReference parsedBytes = toXContent(parsedUpdateResponse, mediaType, humanReadable); + BytesReference expectedBytes = toXContent(expectedUpdateResponse, mediaType, humanReadable); + assertToXContentEquivalent(expectedBytes, parsedBytes, mediaType); } /** @@ -184,8 +185,8 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws * The left element is the actual {@link UpdateResponse} to serialize while the right element is the * expected {@link UpdateResponse} after parsing. */ - public static Tuple<UpdateResponse, UpdateResponse> randomUpdateResponse(XContentType xContentType) { - Tuple<GetResult, GetResult> getResults = GetResultTests.randomGetResult(xContentType); + public static Tuple<UpdateResponse, UpdateResponse> randomUpdateResponse(MediaType mediaType) { + Tuple<GetResult, GetResult> getResults = GetResultTests.randomGetResult(mediaType); GetResult actualGetResult = getResults.v1(); GetResult expectedGetResult = getResults.v2(); diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index 15aacd25b30b1..69102d2e76bef 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -40,8 +40,8 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.SettingsBasedSeedHostsProvider; import org.opensearch.env.Environment; diff --git a/server/src/test/java/org/opensearch/bootstrap/IdentityPluginTests.java b/server/src/test/java/org/opensearch/bootstrap/IdentityPluginTests.java index b84a9a87ec77e..2129810a99879 100644 --- a/server/src/test/java/org/opensearch/bootstrap/IdentityPluginTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/IdentityPluginTests.java @@ -8,7 +8,6 @@ package org.opensearch.bootstrap; -import java.util.List; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; import org.opensearch.identity.IdentityService; @@ -16,6 +15,9 @@ import org.opensearch.identity.noop.NoopTokenManager; import org.opensearch.plugins.IdentityPlugin; import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; + import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; diff --git a/server/src/test/java/org/opensearch/bootstrap/MaxMapCountCheckTests.java b/server/src/test/java/org/opensearch/bootstrap/MaxMapCountCheckTests.java index f1a94096412f7..81a2b1334c39d 100644 --- a/server/src/test/java/org/opensearch/bootstrap/MaxMapCountCheckTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/MaxMapCountCheckTests.java @@ -33,8 +33,8 @@ package org.opensearch.bootstrap; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; diff --git a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java index b1e27ea9c66e3..2b4d2a755f543 100644 --- a/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/OpenSearchPolicyTests.java @@ -49,6 +49,7 @@ public class OpenSearchPolicyTests extends OpenSearchTestCase { /** * test restricting privileges to no permissions actually works */ + @SuppressWarnings("removal") public void testRestrictPrivileges() { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { diff --git a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java index ea4ef96ec0f77..76353aea03257 100644 --- a/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/SecurityTests.java @@ -35,8 +35,12 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Map; public class SecurityTests extends OpenSearchTestCase { @@ -72,6 +76,7 @@ public void testEnsureRegularFile() throws IOException { } /** can't execute processes */ + @SuppressWarnings("removal") public void testProcessExecution() throws Exception { assumeTrue("test requires security manager", System.getSecurityManager() != null); try { @@ -79,4 +84,23 @@ public void testProcessExecution() throws Exception { fail("didn't get expected exception"); } catch (SecurityException expected) {} } + + public void testReadPolicyWithCodebases() throws IOException { + final Map<String, URL> codebases = Map.of( + "test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar", + new URL("file://test-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar"), + "test-kafka-server-common-3.6.1.jar", + new URL("file://test-kafka-server-common-3.6.1.jar"), + "test-kafka-server-common-3.6.1-test.jar", + new URL("file://test-kafka-server-common-3.6.1-test.jar"), + "test-lucene-core-9.11.0-snapshot-8a555eb.jar", + new URL("file://test-lucene-core-9.11.0-snapshot-8a555eb.jar"), + "test-zstd-jni-1.5.5-5.jar", + new URL("file://test-zstd-jni-1.5.5-5.jar") + ); + + AccessController.doPrivileged( + (PrivilegedAction<?>) () -> Security.readPolicy(SecurityTests.class.getResource("test-codebases.policy"), codebases) + ); + } } diff --git a/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java b/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java index 92a88aa7940ee..682ea395b0193 100644 --- a/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/opensearch/client/AbstractClientHeadersTestCase.java @@ -34,7 +34,6 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteAction; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; @@ -49,7 +48,8 @@ import org.opensearch.action.search.SearchAction; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -130,7 +130,7 @@ public void testActions() { .execute(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool())); client.prepareIndex("idx") .setId("id") - .setSource("source", XContentType.JSON) + .setSource("source", MediaTypeRegistry.JSON) .execute(new AssertingActionListener<>(IndexAction.NAME, client.threadPool())); // choosing arbitrary cluster admin actions to test diff --git a/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java b/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java index eca49516f42c2..3449d2d5ea51d 100644 --- a/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java +++ b/server/src/test/java/org/opensearch/client/OriginSettingClientTests.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; diff --git a/server/src/test/java/org/opensearch/client/ParentTaskAssigningClientTests.java b/server/src/test/java/org/opensearch/client/ParentTaskAssigningClientTests.java index 93cd89ce31a08..ce956273ed3ae 100644 --- a/server/src/test/java/org/opensearch/client/ParentTaskAssigningClientTests.java +++ b/server/src/test/java/org/opensearch/client/ParentTaskAssigningClientTests.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.SearchRequest; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpClient; diff --git a/server/src/test/java/org/opensearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/opensearch/client/node/NodeClientHeadersTests.java index 36196d0d236c7..176c94b01c878 100644 --- a/server/src/test/java/org/opensearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/opensearch/client/node/NodeClientHeadersTests.java @@ -32,16 +32,16 @@ package org.opensearch.client.node; -import org.opensearch.action.ActionType; import org.opensearch.action.ActionModule.DynamicActionRegistry; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportAction; import org.opensearch.client.AbstractClientHeadersTestCase; import org.opensearch.client.Client; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java index d20a13f90b14f..963e25f29b399 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java @@ -43,8 +43,8 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.gateway.GatewayService; import org.opensearch.core.index.Index; +import org.opensearch.gateway.GatewayService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.TestCustomMetadata; diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 23c2506bf6143..457bdac1809ef 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -49,15 +49,14 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.TestCustomMetadata; @@ -74,6 +73,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -85,7 +85,7 @@ public void testSupersedes() { final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); - ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); + ClusterName name = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); ClusterState noClusterManager1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState withClusterManager1a = ClusterState.builder(name) @@ -327,7 +327,7 @@ public void testToXContent() throws IOException { + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -527,7 +527,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -734,7 +734,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -844,7 +844,7 @@ public void testToXContentSameTypeName() throws IOException { + " \"nodes\" : { }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 8ba965b3df1ab..ff47ec3015697 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -190,6 +190,10 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, + null, null ), new NodeStats( @@ -216,6 +220,10 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, + null, null ), new NodeStats( @@ -242,6 +250,10 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, + null, null ) ); @@ -299,6 +311,10 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, + null, null ), new NodeStats( @@ -325,6 +341,10 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, + null, null ), new NodeStats( @@ -351,6 +371,10 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index 1cd21482566ef..47dbf85c13b1f 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -33,9 +33,7 @@ package org.opensearch.cluster; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; @@ -47,12 +45,14 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplierService; +import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; -import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.node.Node; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index 511c1555f1159..4cf82f1dabab3 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.node.DiscoveryNode; @@ -45,13 +44,15 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.UUIDs; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleListener; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -552,7 +553,8 @@ private TestTransportService(Transport transport, ThreadPool threadPool) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java index c3ea1608f1275..e8833f12e5e6e 100644 --- a/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/index/MappingUpdatedActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.action.index; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.AdminClient; @@ -46,6 +45,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.Mapper; diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java index b423a0f3dc8de..3dee3507ae71a 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.action.shard; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -45,6 +44,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 2688aaa145dc0..efe91de1ae1a8 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.index.CorruptIndexException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; @@ -51,11 +50,14 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -64,7 +66,6 @@ import org.opensearch.transport.NodeNotConnectedException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -155,7 +156,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java index a0ac9d94c8c37..775d113f986ca 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java @@ -8,8 +8,6 @@ package org.opensearch.cluster.action.shard.routing.weighted.get; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.get.ClusterGetWeightedRoutingRequestBuilder; @@ -32,12 +30,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.HashSet; @@ -90,8 +91,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java index 176efa727718c..7910daebb00de 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java @@ -8,10 +8,6 @@ package org.opensearch.cluster.awarenesshealth; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -20,11 +16,16 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.Arrays; import java.util.Collections; @@ -64,7 +65,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java index 59f6edbadcecd..b68f0f2375354 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java @@ -8,10 +8,6 @@ package org.opensearch.cluster.awarenesshealth; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.ClusterName; @@ -29,15 +25,20 @@ import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; import java.util.Arrays; @@ -82,7 +83,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java index 137de1355f11b..0b84eb19f4264 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java @@ -31,16 +31,17 @@ package org.opensearch.cluster.coordination; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; +import org.junit.Before; import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; @@ -100,7 +101,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index be7b32d4aef11..bfb225854979b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; @@ -101,7 +102,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index b091130db0b98..efdf4fb4b92f5 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.StatusInfo; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java new file mode 100644 index 0000000000000..56bd2d94dce84 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING; +import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class CoordinationCheckerSettingsTests extends OpenSearchSingleNodeTestCase { + public void testFollowerCheckTimeoutValueUpdate() { + Setting<TimeValue> setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + + assertAcked(response); + assertEquals(timeValueSeconds(60), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testFollowerCheckTimeoutMaxValue() { + Setting<TimeValue> setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "61s").build(); + + assertThrows( + "failed to parse value [61s] for setting [" + setting1.getKey() + "], must be <= [60000ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testFollowerCheckTimeoutMinValue() { + Setting<TimeValue> setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testLeaderCheckTimeoutValueUpdate() { + Setting<TimeValue> setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + assertAcked(response); + assertEquals(timeValueSeconds(60), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testLeaderCheckTimeoutMaxValue() { + Setting<TimeValue> setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "61s").build(); + + assertThrows( + "failed to parse value [61s] for setting [" + setting1.getKey() + "], must be <= [60000ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testLeaderCheckTimeoutMinValue() { + Setting<TimeValue> setting1 = LEADER_CHECK_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationMetadataTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationMetadataTests.java index 15887d6759458..290479941aaa9 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationMetadataTests.java @@ -33,16 +33,16 @@ import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.common.util.set.Sets; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 23087e6dd2ba4..1c0dc7fc1ca2d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -31,32 +31,49 @@ package org.opensearch.cluster.coordination; -import org.opensearch.core.Assertions; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; import java.util.Collections; +import java.util.Locale; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class CoordinationStateTests extends OpenSearchTestCase { @@ -67,6 +84,7 @@ public class CoordinationStateTests extends OpenSearchTestCase { private ClusterState initialStateNode1; private PersistedState ps1; + private PersistedStateRegistry psr1; private CoordinationState cs1; private CoordinationState cs2; @@ -97,10 +115,12 @@ public void setupNodes() { ); ps1 = new InMemoryPersistedState(0L, initialStateNode1); + psr1 = persistedStateRegistry(); + psr1.addPersistedState(PersistedStateType.LOCAL, ps1); - cs1 = createCoordinationState(ps1, node1); - cs2 = createCoordinationState(new InMemoryPersistedState(0L, initialStateNode2), node2); - cs3 = createCoordinationState(new InMemoryPersistedState(0L, initialStateNode3), node3); + cs1 = createCoordinationState(psr1, node1, Settings.EMPTY); + cs2 = createCoordinationState(createPersistedStateRegistry(initialStateNode2), node2, Settings.EMPTY); + cs3 = createCoordinationState(createPersistedStateRegistry(initialStateNode3), node3, Settings.EMPTY); } public static DiscoveryNode createNode(String id) { @@ -200,7 +220,7 @@ public void testJoinBeforeBootstrap() { public void testJoinWithNoStartJoinAfterReboot() { StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5)); Join v1 = cs1.handleStartJoin(startJoinRequest1); - cs1 = createCoordinationState(ps1, node1); + cs1 = createCoordinationState(psr1, node1, Settings.EMPTY); assertThat( expectThrows(CoordinationStateRejectedException.class, () -> cs1.handleJoin(v1)).getMessage(), containsString("ignored join as term has not been incremented yet after reboot") @@ -886,8 +906,104 @@ public void testSafety() { ).runRandomly(); } - public static CoordinationState createCoordinationState(PersistedState storage, DiscoveryNode localNode) { - return new CoordinationState(localNode, storage, ElectionStrategy.DEFAULT_INSTANCE); + public void testHandlePrePublishAndCommitWhenRemoteStateDisabled() { + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); + final PersistedStateRegistry persistedStateRegistrySpy = Mockito.spy(persistedStateRegistry); + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistrySpy, node1, Settings.EMPTY); + final VotingConfiguration initialConfig = VotingConfiguration.of(node1); + final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + coordinationState.handlePrePublish(clusterState); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + final ClusterState clusterState2 = clusterState(0L, 1L, node1, initialConfig, initialConfig, 42L); + coordinationState.handlePrePublish(clusterState2); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + coordinationState.handlePreCommit(); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + } + + public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final VotingConfiguration initialConfig = VotingConfiguration.of(node1); + final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + 0L, + 0L, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + Version.CURRENT, + randomAlphaOfLength(10), + false, + 1, + randomAlphaOfLength(10), + Collections.emptyList(), + randomAlphaOfLength(10), + true + ); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID)).thenReturn(manifest); + + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, previousClusterUUID) + ); + + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); + coordinationState.handlePrePublish(clusterState); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState, previousClusterUUID); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); + + coordinationState.handlePreCommit(); + ClusterState committedClusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).clusterUUIDCommitted(true).build()) + .build(); + // Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(committedClusterState, manifest); + ArgumentCaptor<ClusterState> clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any()); + assertThat(clusterStateCaptor.getValue().metadata().indices(), equalTo(committedClusterState.metadata().indices())); + assertThat(clusterStateCaptor.getValue().metadata().clusterUUID(), equalTo(committedClusterState.metadata().clusterUUID())); + assertThat(clusterStateCaptor.getValue().stateUUID(), equalTo(committedClusterState.stateUUID())); + assertThat( + clusterStateCaptor.getValue().coordinationMetadata().term(), + equalTo(committedClusterState.coordinationMetadata().term()) + ); + assertThat(clusterStateCaptor.getValue().version(), equalTo(committedClusterState.version())); + assertThat( + clusterStateCaptor.getValue().metadata().clusterUUIDCommitted(), + equalTo(committedClusterState.metadata().clusterUUIDCommitted()) + ); + } + + public static CoordinationState createCoordinationState( + PersistedStateRegistry persistedStateRegistry, + DiscoveryNode localNode, + Settings settings + ) { + return new CoordinationState(localNode, persistedStateRegistry, ElectionStrategy.DEFAULT_INSTANCE, settings); } public static ClusterState clusterState( @@ -950,4 +1066,10 @@ public static ClusterState setValue(ClusterState clusterState, long value) { public static long value(ClusterState clusterState) { return clusterState.metadata().persistentSettings().getAsLong("value", 0L); } + + private static PersistedStateRegistry createPersistedStateRegistry(ClusterState clusterState) { + final PersistedStateRegistry persistedStateRegistry = new PersistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, clusterState)); + return persistedStateRegistry; + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index 8c9095cd78914..a3129655148ab 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -46,12 +46,12 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.gateway.GatewayService; @@ -73,8 +73,8 @@ import static java.util.Collections.singleton; import static org.opensearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.DEFAULT_DELAY_VARIABILITY; import static org.opensearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.EXTREME_DELAY_VARIABILITY; -import static org.opensearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.opensearch.cluster.coordination.Coordinator.Mode.CANDIDATE; +import static org.opensearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.opensearch.cluster.coordination.ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING; import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING; @@ -1256,7 +1256,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode newNode = cluster1.new ClusterNode( nextNodeIndex.getAndIncrement(), nodeInOtherCluster.getLocalNode(), n -> cluster1.new MockPersistedState( - n, nodeInOtherCluster.persistedState, Function.identity(), Function.identity() + n, nodeInOtherCluster.persistedStateRegistry, Function.identity(), Function.identity() ), nodeInOtherCluster.nodeSettings, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info") ); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ElectionSchedulerFactoryTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ElectionSchedulerFactoryTests.java index 997bdc2cf362b..164f802fdae38 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ElectionSchedulerFactoryTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ElectionSchedulerFactoryTests.java @@ -32,10 +32,10 @@ package org.opensearch.cluster.coordination; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index be211a16cdd72..a106706c00732 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -39,22 +39,24 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -95,7 +97,7 @@ public class FollowersCheckerTests extends OpenSearchTestCase { public void testChecksExpectedNodes() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).build(); - + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DiscoveryNodes[] discoveryNodesHolder = new DiscoveryNodes[] { DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build() }; @@ -123,13 +125,15 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -255,6 +259,7 @@ public void testFailsNodeThatDisconnects() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -285,7 +290,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -294,6 +300,7 @@ public String toString() { final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -333,6 +340,7 @@ private void testBehaviourOfFailingNode( final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).put(testSettings).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -371,7 +379,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -380,6 +389,7 @@ public String toString() { final FollowersChecker followersChecker = new FollowersChecker( settings, + clusterSettings, transportService, fcr -> { assert false : fcr; }, (node, reason) -> { @@ -460,6 +470,7 @@ public void testUnhealthyNodeRejectsImmediately() { final DiscoveryNode leader = new DiscoveryNode("leader", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode follower = new DiscoveryNode("follower", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), follower.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -475,7 +486,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -483,7 +495,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final AtomicBoolean calledCoordinator = new AtomicBoolean(); final AtomicReference<RuntimeException> coordinatorException = new AtomicReference<>(); - final FollowersChecker followersChecker = new FollowersChecker(settings, transportService, fcr -> { + final FollowersChecker followersChecker = new FollowersChecker(settings, clusterSettings, transportService, fcr -> { assertTrue(calledCoordinator.compareAndSet(false, true)); final RuntimeException exception = coordinatorException.get(); if (exception != null) { @@ -531,6 +543,7 @@ public void testResponder() { final DiscoveryNode leader = new DiscoveryNode("leader", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode follower = new DiscoveryNode("follower", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), follower.getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @@ -546,7 +559,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -554,7 +568,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req final AtomicBoolean calledCoordinator = new AtomicBoolean(); final AtomicReference<RuntimeException> coordinatorException = new AtomicReference<>(); - final FollowersChecker followersChecker = new FollowersChecker(settings, transportService, fcr -> { + final FollowersChecker followersChecker = new FollowersChecker(settings, clusterSettings, transportService, fcr -> { assertTrue(calledCoordinator.compareAndSet(false, true)); final RuntimeException exception = coordinatorException.get(); if (exception != null) { @@ -694,6 +708,7 @@ public void testPreferClusterManagerNodes() { DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); CapturingTransport capturingTransport = new CapturingTransport(); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), nodes.get(0).getName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); TransportService transportService = capturingTransport.createTransportService( Settings.EMPTY, @@ -701,17 +716,12 @@ public void testPreferClusterManagerNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, - emptySet() - ); - final FollowersChecker followersChecker = new FollowersChecker( - Settings.EMPTY, - transportService, - fcr -> { assert false : fcr; }, - (node, reason) -> { - assert false : node; - }, - () -> new StatusInfo(HEALTHY, "healthy-info") + emptySet(), + NoopTracer.INSTANCE ); + final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, clusterSettings, transportService, fcr -> { + assert false : fcr; + }, (node, reason) -> { assert false : node; }, () -> new StatusInfo(HEALTHY, "healthy-info")); followersChecker.setCurrentNodes(discoveryNodes); List<DiscoveryNode> followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) .map(cr -> cr.node) @@ -747,7 +757,7 @@ private static Settings randomSettings() { settingsBuilder.put(FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), randomIntBetween(100, 100000) + "ms"); } if (randomBoolean()) { - settingsBuilder.put(FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 100000) + "ms"); + settingsBuilder.put(FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 60000) + "ms"); } return settingsBuilder.build(); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index 27146829ad8da..78c3b5d45a9ab 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.Level; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; @@ -41,19 +40,26 @@ import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BytesTransportRequest; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -73,6 +79,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; public class JoinHelperTests extends OpenSearchTestCase { private final NamedWriteableRegistry namedWriteableRegistry = DEFAULT_NAMED_WRITABLE_REGISTRY; @@ -90,13 +97,15 @@ public void testJoinDeduplication() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); JoinHelper joinHelper = new JoinHelper( Settings.EMPTY, null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -274,7 +283,8 @@ public void testJoinFailureOnUnhealthyNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); AtomicReference<StatusInfo> nodeHealthServiceStatus = new AtomicReference<>(new StatusInfo(UNHEALTHY, "unhealthy-info")); JoinHelper joinHelper = new JoinHelper( @@ -282,6 +292,7 @@ public void testJoinFailureOnUnhealthyNodes() { null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -464,7 +475,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } else { transportService = mockTransport.createTransportService( @@ -473,7 +485,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } JoinHelper joinHelper = new JoinHelper( @@ -481,6 +494,7 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { @@ -500,6 +514,18 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin return new TestClusterSetup(deterministicTaskQueue, localNode, transportService, localClusterState, joinHelper, capturingTransport); } + private RemoteStoreNodeService buildRemoteStoreNodeService(TransportService transportService, ThreadPool threadPool) { + RepositoriesService repositoriesService = new RepositoriesService( + Settings.EMPTY, + mock(ClusterService.class), + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + return new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + } + private static class TestClusterSetup { public final DeterministicTaskQueue deterministicTaskQueue; public final DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index d57fd6c1abd7a..be25bee5fe7b1 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -42,22 +42,40 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; - -import static org.hamcrest.Matchers.is; +import java.util.stream.Collectors; + +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -65,6 +83,7 @@ import static org.opensearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -109,14 +128,20 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); + Metadata metadata = Metadata.EMPTY_METADATA; + final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - final Version tooLow = LegacyESVersion.fromString("6.7.0"); + final DiscoveryNode tooLowJoiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + LegacyESVersion.fromString("6.7.0") + ); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } }); @@ -134,11 +159,11 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { minGoodVersion = minCompatVersion.before(allVersions().get(0)) ? allVersions().get(0) : minCompatVersion; } final Version justGood = randomVersionBetween(random(), minGoodVersion, maxCompatibleVersion(minNodeVersion)); - + final DiscoveryNode justGoodJoiningNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), justGood); if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(justGood, nodes); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(justGood, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } } @@ -174,8 +199,16 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); + when(remoteStoreNodeService.updateRepositoriesMetadata(any(), any())).thenReturn(new RepositoriesMetadata(Collections.emptyList())); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -271,8 +304,15 @@ public void testJoinFailedForDecommissionedNode() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -327,6 +367,467 @@ public void testJoinClusterWithDecommissionFailed() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinClusterWithNonRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithNonRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluster() { + + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a remote store node [" + joiningNode + "] is trying to join a non remote " + "store cluster")); + } + + public void testRemoteStoreNodeJoiningNonRemoteStoreClusterMixedMode() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .metadata(metadata) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testAllTypesNodeJoiningRemoteStoreClusterMixedMode() { + final DiscoveryNode docrepNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode remoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + final Settings settings = Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .build(); + final Settings nodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(nodeSettings); + Metadata metadata = Metadata.builder().persistentSettings(settings).build(); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(docrepNode) + .localNodeId(docrepNode.getId()) + .add(remoteNode) + .localNodeId(remoteNode.getId()) + .build() + ) + .metadata(metadata) + .build(); + + // compatible remote node should not be able to join a mixed mode having a remote node + DiscoveryNode goodRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(goodRemoteNode, currentState.getNodes(), currentState.metadata()); + + // incompatible node should not be able to join a mixed mode + DiscoveryNode badRemoteNode = newDiscoveryNode(remoteStoreNodeAttributes(TRANSLOG_REPO, TRANSLOG_REPO)); + assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(badRemoteNode, currentState.getNodes(), currentState.metadata()) + ); + + // DocRep node should be able to join a mixed mode + DiscoveryNode docrepNode2 = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + JoinTaskExecutor.ensureNodesCompatibility(docrepNode2, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentAttributesJoiningRemoteStoreCluster() { + Map<String, String> existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry<String, String> nodeAttribute : existingNodeAttributes.entrySet()) { + if (nodeAttribute.getKey() != REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue() + "-new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentNameAttributesJoiningRemoteStoreCluster() { + Map<String, String> existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry<String, String> nodeAttribute : existingNodeAttributes.entrySet()) { + if (REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO + "new", TRANSLOG_REPO); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO + "new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes( + SEGMENT_REPO, + TRANSLOG_REPO, + CLUSTER_STATE_REPO + "new" + ); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } + } + } + + public void testPreventJoinClusterWithNonRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a non remote store node [" + joiningNode + "] is trying to join a remote " + "store cluster")); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithPartialAttributesJoiningRemoteStoreCluster() { + Map<String, String> existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry<String, String> nodeAttribute : existingNodeAttributes.entrySet()) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), null); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage().equals("joining node [" + joiningNode + "] doesn't have the node attribute [" + nodeAttribute.getKey() + "]") + ); + + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + + public void testUpdatesClusterStateWithSingleNodeCluster() throws Exception { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult<JoinTaskExecutor.Task> result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); + } + + public void testUpdatesClusterStateWithMultiNodeCluster() throws Exception { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, TRANSLOG_REPO); + List<RepositoryMetadata> repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + add(translogRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult<JoinTaskExecutor.Task> result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); + } + + public void testUpdatesClusterStateWithSingleNodeClusterAndSameRepository() throws Exception { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult<JoinTaskExecutor.Task> result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throws Exception { + Map<String, String> remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, COMMON_REPO); + List<RepositoryMetadata> repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult<JoinTaskExecutor.Task> result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) + throws Exception { + + final RepositoriesMetadata repositoriesMetadata = updatedState.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata.repositories().size() == expectedRepositories); + if (repositoriesMetadata.repositories().size() == 2 || repositoriesMetadata.repositories().size() == 3) { + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(existingNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(existingNode, TRANSLOG_REPO); + for (RepositoryMetadata repositoryMetadata : repositoriesMetadata.repositories()) { + if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { + assertTrue(segmentRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoryMetadata.name().equals(translogRepositoryMetadata.name())) { + assertTrue(translogRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoriesMetadata.repositories().size() == 3) { + final RepositoryMetadata clusterStateRepoMetadata = buildRepositoryMetadata(existingNode, CLUSTER_STATE_REPO); + assertTrue(clusterStateRepoMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } + } + } else if (repositoriesMetadata.repositories().size() == 1) { + final RepositoryMetadata repositoryMetadata = buildRepositoryMetadata(existingNode, COMMON_REPO); + assertTrue(repositoryMetadata.equalsIgnoreGenerations(repositoriesMetadata.repositories().get(0))); + } else { + throw new Exception("Stack overflow example: checkedExceptionThrower"); + } + } + private DiscoveryNode newDiscoveryNode(Map<String, String> attributes) { return new DiscoveryNode( randomAlphaOfLength(10), @@ -337,4 +838,99 @@ private DiscoveryNode newDiscoveryNode(Map<String, String> attributes) { Version.CURRENT ); } + + private static final String SEGMENT_REPO = "segment-repo"; + private static final String TRANSLOG_REPO = "translog-repo"; + private static final String CLUSTER_STATE_REPO = "cluster-state-repo"; + private static final String COMMON_REPO = "remote-repo"; + + private Map<String, String> remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { + return remoteStoreNodeAttributes(segmentRepoName, translogRepoName, CLUSTER_STATE_REPO); + } + + private Map<String, String> remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName, String clusterStateRepo) { + String segmentRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String clusterStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + clusterStateRepo + ); + String clusterStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + clusterStateRepo + ); + + return new HashMap<>() { + { + put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName); + put(segmentRepositoryTypeAttributeKey, "s3"); + put(segmentRepositorySettingsAttributeKeyPrefix + "bucket", "segment_bucket"); + put(segmentRepositorySettingsAttributeKeyPrefix + "base_path", "/segment/path"); + put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName); + putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); + put(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, clusterStateRepo); + putIfAbsent(clusterStateRepositoryTypeAttributeKey, "s3"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "base_path", "/state/path"); + } + }; + } + + private void validateAttributes(Map<String, String> remoteStoreNodeAttributes, ClusterState currentState, DiscoveryNode existingNode) { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertEquals( + e.getMessage(), + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ); + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map<String, String> nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map<String, String> settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index ac6d885229f9e..fe65058333116 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -38,20 +38,22 @@ import org.opensearch.cluster.coordination.LeaderChecker.LeaderCheckRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.StatusInfo; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -116,6 +118,7 @@ public void testFollowerBehaviour() { final AtomicBoolean allResponsesFail = new AtomicBoolean(); final Settings settings = settingsBuilder.build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); logger.info("--> using {}", settings); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); @@ -165,14 +168,15 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), matchesRegex("node \\[.*\\] failed \\[[1-9][0-9]*\\] consecutive checks")); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -240,6 +244,7 @@ public void testFollowerFailsImmediatelyOnDisconnection() { final Response[] responseHolder = new Response[] { Response.SUCCESS }; final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @Override @@ -281,13 +286,14 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), anyOf(endsWith("disconnected"), endsWith("disconnected during check"))); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -354,6 +360,7 @@ public void testFollowerFailsImmediatelyOnHealthCheckFailure() { final Response[] responseHolder = new Response[] { Response.SUCCESS }; final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final MockTransport mockTransport = new MockTransport() { @Override @@ -393,13 +400,14 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); final AtomicBoolean leaderFailed = new AtomicBoolean(); - final LeaderChecker leaderChecker = new LeaderChecker(settings, transportService, e -> { + final LeaderChecker leaderChecker = new LeaderChecker(settings, clusterSettings, transportService, e -> { assertThat(e.getMessage(), endsWith("failed health checks")); assertTrue(leaderFailed.compareAndSet(false, true)); }, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info")); @@ -428,6 +436,7 @@ public void testLeaderBehaviour() { final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getId()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); final CapturingTransport capturingTransport = new CapturingTransport(); AtomicReference<StatusInfo> nodeHealthServiceStatus = new AtomicReference<>(new StatusInfo(UNHEALTHY, "unhealthy-info")); @@ -438,13 +447,15 @@ public void testLeaderBehaviour() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); final LeaderChecker leaderChecker = new LeaderChecker( settings, + clusterSettings, transportService, e -> fail("shouldn't be checking anything"), () -> nodeHealthServiceStatus.get() diff --git a/server/src/test/java/org/opensearch/cluster/coordination/MessagesTests.java b/server/src/test/java/org/opensearch/cluster/coordination/MessagesTests.java index c6d97c641e2e1..58ca768edbb46 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/MessagesTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/MessagesTests.java @@ -35,9 +35,9 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.util.set.Sets; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.opensearch.test.OpenSearchTestCase; import java.util.Optional; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index fb2e7cd73d3bf..d94f3fb304fe2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -33,12 +33,12 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.cluster.decommission.DecommissionStatus; @@ -47,17 +47,21 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.cluster.service.MasterServiceTests; import org.opensearch.common.Randomness; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.BaseFuture; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -68,7 +72,6 @@ import org.opensearch.transport.Transport; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -89,6 +92,8 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import org.mockito.Mockito; + import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; @@ -243,8 +248,11 @@ protected void onSendRequest( TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> initialState.nodes().getLocalNode(), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(term, initialState)); coordinator = new Coordinator( "test_node", Settings.EMPTY, @@ -253,14 +261,16 @@ protected void onSendRequest( writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), clusterManagerService, - () -> new InMemoryPersistedState(term, initialState), + () -> persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), r -> emptyList(), new NoOpClusterApplier(), Collections.emptyList(), random, (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE, - nodeHealthService + nodeHealthService, + persistedStateRegistry, + Mockito.mock(RemoteStoreNodeService.class) ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java b/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java index 47ec3b2c4e9f0..8358a397bf1b7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/OpenSearchNodeCommandTests.java @@ -38,12 +38,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java index 97bd683cab9eb..5ddf614db3334 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java @@ -35,11 +35,13 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.ConnectTransportException; @@ -135,7 +137,8 @@ public void handleRemoteError(long requestId, Throwable t) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -290,10 +293,16 @@ public void testPrevotingIndicatesElectionSuccess() { DiscoveryNode[] votingNodes = votingNodesSet.toArray(new DiscoveryNode[0]); startAndRunCollector(votingNodes); + PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState( + PersistedStateType.LOCAL, + new InMemoryPersistedState(currentTerm, makeClusterState(votingNodes)) + ); final CoordinationState coordinationState = new CoordinationState( localNode, - new InMemoryPersistedState(currentTerm, makeClusterState(votingNodes)), - ElectionStrategy.DEFAULT_INSTANCE + persistedStateRegistry, + ElectionStrategy.DEFAULT_INSTANCE, + Settings.EMPTY ); final long newTerm = randomLongBetween(currentTerm + 1, Long.MAX_VALUE); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 517456f54b785..4d18ff95887dd 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -33,9 +33,9 @@ package org.opensearch.cluster.coordination; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; @@ -44,10 +44,11 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.discovery.Discovery; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import java.util.ArrayList; import java.util.Arrays; @@ -91,11 +92,9 @@ class MockNode { CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG, 0L ); - coordinationState = new CoordinationState( - localNode, - new InMemoryPersistedState(0L, initialState), - ElectionStrategy.DEFAULT_INSTANCE - ); + PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialState)); + coordinationState = new CoordinationState(localNode, persistedStateRegistry, ElectionStrategy.DEFAULT_INSTANCE, Settings.EMPTY); } final DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java index 4f5bf0f292b5d..6d94054afdea2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -39,10 +39,11 @@ import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; @@ -68,7 +69,8 @@ public void testDiffSerializationFailure() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final PublicationTransportHandler handler = new PublicationTransportHandler( transportService, diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 7108e06fe39fc..627f31502a417 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -8,12 +8,8 @@ package org.opensearch.cluster.decommission; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.opensearch.action.support.ActionFilters; @@ -31,11 +27,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -51,13 +52,13 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.cluster.ClusterState.builder; import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; public class DecommissionControllerTests extends OpenSearchTestCase { @@ -91,7 +92,8 @@ public void setTransportServiceAndDefaultClusterState() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java index 94833e15f55d0..15821cdb3335b 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java @@ -8,7 +8,6 @@ package org.opensearch.cluster.decommission; -import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -19,6 +18,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; +import org.junit.BeforeClass; import java.util.Set; diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 5509a238f700f..6c15d1dc54aea 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -8,13 +8,7 @@ package org.opensearch.cluster.decommission; -import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mockito; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; @@ -33,12 +27,18 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.HashSet; @@ -49,15 +49,17 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import org.mockito.Mockito; + import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; import static org.opensearch.cluster.ClusterState.builder; import static org.opensearch.cluster.OpenSearchAllocationTestCase.createAllocationService; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; public class DecommissionServiceTests extends OpenSearchTestCase { @@ -102,7 +104,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index 0e8af8bb0a9a8..795dc8a624e38 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -55,10 +55,11 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.transport.CapturingTransport; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AliasMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AliasMetadataTests.java index e89eccd2734cb..2d57626917247 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AliasMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AliasMetadataTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.metadata.AliasMetadata.Builder; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ComponentTemplateTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ComponentTemplateTests.java index 9964550d2f8a0..67e84a27f2eb9 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ComponentTemplateTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ComponentTemplateTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.Diff; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractDiffableSerializationTestCase; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ComposableIndexTemplateTests.java index f1888a5a980d4..1e56970ea5bb5 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.Diff; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractDiffableSerializationTestCase; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTests.java index 5b31eb3c2abf7..a70438307f94a 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/DataStreamTests.java @@ -33,8 +33,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexGraveyardTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexGraveyardTests.java index ebe5884ac5347..770145a0ebf37 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexGraveyardTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexGraveyardTests.java @@ -32,21 +32,21 @@ package org.opensearch.cluster.metadata; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentOpenSearchExtension; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -80,20 +80,22 @@ public void testSerialization() throws IOException { public void testXContent() throws IOException { final IndexGraveyard graveyard = createRandom(); - final XContentBuilder builder = JsonXContent.contentBuilder(); + final XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); graveyard.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); if (graveyard.getTombstones().size() > 0) { // check that date properly printed assertThat( - Strings.toString(XContentType.JSON, graveyard, false, true), + Strings.toString(MediaTypeRegistry.JSON, graveyard, false, true), containsString( - XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(graveyard.getTombstones().get(0).getDeleteDateInMillis()) + XContentOpenSearchExtension.DEFAULT_FORMATTER.format( + Instant.ofEpochMilli(graveyard.getTombstones().get(0).getDeleteDateInMillis()) + ) ) ); } - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder)); parser.nextToken(); // the beginning of the parser assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index ea615c7cfc09a..393a652952771 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -36,22 +36,22 @@ import org.opensearch.action.admin.indices.rollover.MaxDocsCondition; import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.admin.indices.rollover.RolloverInfo; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -129,9 +129,9 @@ public void testIndexMetadataSerialization() throws IOException { final IndexMetadata fromXContentMeta = IndexMetadata.fromXContent(parser); assertEquals( "expected: " - + Strings.toString(XContentType.JSON, metadata) + + Strings.toString(MediaTypeRegistry.JSON, metadata) + "\nactual : " - + Strings.toString(XContentType.JSON, fromXContentMeta), + + Strings.toString(MediaTypeRegistry.JSON, fromXContentMeta), metadata, fromXContentMeta ); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexTemplateMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexTemplateMetadataTests.java index 4648eaa6e3852..0ea2834cc3024 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexTemplateMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexTemplateMetadataTests.java @@ -31,17 +31,17 @@ package org.opensearch.cluster.metadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -73,7 +73,7 @@ public void testIndexTemplateMetadataXContentRoundTrip() throws Exception { NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, templateBytes, - XContentType.JSON + MediaTypeRegistry.JSON ) ) { indexTemplateMetadata = IndexTemplateMetadata.Builder.fromXContent(parser, "test"); @@ -93,7 +93,7 @@ public void testIndexTemplateMetadataXContentRoundTrip() throws Exception { NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, templateBytesRoundTrip, - XContentType.JSON + MediaTypeRegistry.JSON ) ) { indexTemplateMetadataRoundTrip = IndexTemplateMetadata.Builder.fromXContent(parser, "test"); @@ -142,7 +142,7 @@ public void testValidateInvalidIndexPatterns() throws Exception { NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new BytesArray(templateWithEmptyPattern), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { final IllegalArgumentException ex = expectThrows( @@ -166,7 +166,7 @@ public void testValidateInvalidIndexPatterns() throws Exception { NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new BytesArray(templateWithoutPattern), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { final IllegalArgumentException ex = expectThrows( @@ -184,7 +184,7 @@ public void testParseTemplateWithAliases() throws Exception { NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new BytesArray(templateInJSON), - XContentType.JSON + MediaTypeRegistry.JSON ) ) { IndexTemplateMetadata template = IndexTemplateMetadata.Builder.fromXContent(parser, randomAlphaOfLengthBetween(1, 100)); @@ -222,7 +222,7 @@ public void testFromToXContent() throws Exception { templateBuilder.putMapping("doc", "{\"doc\":{\"properties\":{\"type\":\"text\"}}}"); } IndexTemplateMetadata template = templateBuilder.build(); - XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON.xContent())); + XContentBuilder builder = XContentBuilder.builder(randomFrom(MediaTypeRegistry.JSON.xContent())); builder.startObject(); IndexTemplateMetadata.Builder.toXContentWithTypes(template, builder, ToXContent.EMPTY_PARAMS); builder.endObject(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ManifestTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ManifestTests.java index 534bcd0aed212..5b1a96c02ac4e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ManifestTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ManifestTests.java @@ -33,12 +33,12 @@ package org.opensearch.cluster.metadata; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index e52237c8dba99..6d1f359d210ac 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -32,11 +32,10 @@ package org.opensearch.cluster.metadata; -import org.hamcrest.Matchers; -import org.junit.Before; import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.shrink.ResizeType; @@ -56,7 +55,6 @@ import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -64,16 +62,18 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.env.Environment; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndexCreationException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -82,12 +82,14 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -98,6 +100,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -113,25 +116,15 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasValue; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; @@ -141,19 +134,51 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; +import static org.opensearch.node.Node.NODE_ATTRIBUTES; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private AliasValidator aliasValidator; private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; + private ClusterSettings clusterSettings; + private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; + private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + + final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + + @Before + public void setup() throws Exception { + super.setUp(); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } @Before public void setupCreateIndexRequestAndAliasValidator() { @@ -671,7 +696,11 @@ public void testValidateIndexName() throws Exception { false, new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) ); - validateIndexName(checkerService, "index?name", "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); + validateIndexName( + checkerService, + "index?name", + "must not contain the following characters " + org.opensearch.core.common.Strings.INVALID_FILENAME_CHARS + ); validateIndexName(checkerService, "index#name", "must not contain '#'"); @@ -815,7 +844,8 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); @@ -877,7 +907,8 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest")); @@ -899,7 +930,8 @@ public void testDefaultSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); @@ -914,7 +946,8 @@ public void testSettingsFromClusterState() { Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("15")); @@ -951,7 +984,8 @@ public void testTemplateOrder() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); List<AliasMetadata> resolvedAliases = resolveAndValidateAliases( request.index(), @@ -990,7 +1024,8 @@ public void testAggregateIndexSettingsIgnoresTemplatesOnCreateFromSourceIndex() Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get("templateSetting"), is(nullValue())); @@ -1191,110 +1226,136 @@ public void testvalidateIndexSettings() { threadPool.shutdown(); } - public void testRemoteStoreNoUserOverrideConflictingReplicationTypeIndexSettings() { + public void testIndexTemplateReplicationType() { + Settings templateSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build(); + + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + templateSettings, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertNotEquals(ReplicationType.SEGMENT, clusterSettings.get(CLUSTER_REPLICATION_TYPE_SETTING)); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey())); + } + + public void testClusterForceReplicationTypeInAggregateSettings() { Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings nonMatchingReplicationIndexSettings = Settings.builder() + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, + request.settings(nonMatchingReplicationIndexSettings); + IndexCreationException exception = expectThrows( + IndexCreationException.class, () -> aggregateIndexSettings( ClusterState.EMPTY_STATE, request, Settings.EMPTY, null, - settings, + Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ) ); - assertThat( - exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") - ); - } + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage()); - public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + Settings matchingReplicationIndexSettings = Settings.builder() + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( + request.settings(matchingReplicationIndexSettings); + Settings aggregateIndexSettings = aggregateIndexSettings( ClusterState.EMPTY_STATE, request, Settings.EMPTY, null, - settings, + Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-segment-repo-1", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + Collections.emptySet(), + clusterSettings ); + assertEquals(ReplicationType.SEGMENT.toString(), aggregateIndexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey())); } - public void testRemoteStoreNoUserOverrideIndexSettings() { - Settings settings = Settings.builder() + public void testClusterForceReplicationTypeInValidateIndexSettings() { + ClusterService clusterService = mock(ClusterService.class); + Metadata metadata = Metadata.builder() + .transientSettings(Settings.builder().put(Metadata.DEFAULT_REPLICA_COUNT_SETTING.getKey(), 1).build()) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .build(); + ThreadPool threadPool = new TestThreadPool(getTestName()); + // Enforce cluster level replication type setting + final Settings forceClusterSettingEnabled = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + ClusterSettings clusterSettings = new ClusterSettings(forceClusterSettingEnabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getSettings()).thenReturn(forceClusterSettingEnabled); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(clusterService.state()).thenReturn(clusterState); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, + final MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + forceClusterSettingEnabled, + clusterService, null, - settings, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()) ); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-segment-repo-1", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + // Use DOCUMENT replication type setting for index creation + final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); + + IndexCreationException exception = expectThrows( + IndexCreationException.class, + () -> checkerService.validateIndexSettings("test", indexSettings, false) ); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage()); + + // Cluster level replication type setting not enforced + final Settings forceClusterSettingDisabled = Settings.builder() + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), false) + .build(); + clusterSettings = new ClusterSettings(forceClusterSettingDisabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + checkerService.validateIndexSettings("test", indexSettings, false); + threadPool.shutdown(); } - public void testRemoteStoreDisabledByUserIndexSettings() { + public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REMOTE_STORE_ENABLED, false); + requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); request.settings(requestSettings.build()); Settings indexSettings = aggregateIndexSettings( ClusterState.EMPTY_STATE, @@ -1304,32 +1365,27 @@ public void testRemoteStoreDisabledByUserIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); verifyRemoteStoreIndexSettings( indexSettings, - "false", - null, - null, + "true", + "my-segment-repo-1", + "my-translog-repo-1", ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } - public void testRemoteStoreOverrideSegmentRepoIndexSettings() { + public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStore() { Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(SETTING_REMOTE_STORE_ENABLED, true) - .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo"); request.settings(requestSettings.build()); Settings indexSettings = aggregateIndexSettings( ClusterState.EMPTY_STATE, @@ -1339,31 +1395,27 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-custom-repo", + "my-segment-repo-1", "my-translog-repo-1", ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } - public void testRemoteStoreOverrideTranslogRepoIndexSettings() { + public void testRemoteStoreNoUserOverrideIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "my-custom-repo"); - request.settings(requestSettings.build()); Settings indexSettings = aggregateIndexSettings( ClusterState.EMPTY_STATE, request, @@ -1372,49 +1424,129 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); verifyRemoteStoreIndexSettings( indexSettings, "true", "my-segment-repo-1", - "my-custom-repo", + "my-translog-repo-1", ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); } - public void testRemoteStoreOverrideReplicationTypeIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + public void testRemoteStoreDisabledByUserIndexSettings() { + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(SETTING_REMOTE_STORE_ENABLED, false); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final List<String> validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is(String.format(Locale.ROOT, "private index setting [%s] can not be set explicitly", SETTING_REMOTE_STORE_ENABLED)) + ); + })); + } + + public void testRemoteStoreOverrideSegmentRepoIndexSettings() { final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); + requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo"); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); + + final List<String> validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) + ); + })); + } + + public void testRemoteStoreOverrideTranslogRepoIndexSettings() { + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "my-custom-repo"); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); + + final List<String> validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) + ); + })); } public void testBuildIndexMetadata() { @@ -1486,7 +1618,8 @@ public void testSoftDeletesDisabledIsRejected() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); }); assertThat( @@ -1515,7 +1648,8 @@ public void testValidateTranslogRetentionSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertWarnings( "Translog retention settings [index.translog.retention.age] " @@ -1562,7 +1696,8 @@ public void testDeprecatedSimpleFSStoreSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertWarnings( "[simplefs] is deprecated and will be removed in 2.0. Use [niofs], which offers equal " @@ -1581,7 +1716,8 @@ public void testClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } @@ -1601,12 +1737,237 @@ public void testIndexSettingOverridesClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); // Verify if index setting overrides cluster replication setting assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } + public void testRefreshIntervalValidationWithNoIndexSetting() { + // This checks that aggregateIndexSetting works for the case where there are no index setting + // `index.refresh_interval` in the cluster state update request. + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + } + + public void testRefreshIntervalValidationSuccessWithIndexSettingEqualToClusterMinimum() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is equal to the `cluster.default.index.refresh_interval` value. + TimeValue refreshInterval = TimeValue.timeValueSeconds(10); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + // Verify that the value is the same as set as earlier and the validation was successful + assertEquals(refreshInterval, INDEX_REFRESH_INTERVAL_SETTING.get(indexSettings)); + } + + public void testRefreshIntervalValidationSuccessWithIndexSettingGreaterThanClusterMinimum() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is greater than the `cluster.default.index.refresh_interval` value. + int clusterMinRefreshTimeMs = 10 * 1000; + TimeValue clusterMinRefreshTime = TimeValue.timeValueSeconds(clusterMinRefreshTimeMs); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + TimeValue indexRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs + randomNonNegativeLong()); + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), indexRefreshTime); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + // Verify that the value is the same as set as earlier and the validation was successful + assertEquals(indexRefreshTime, INDEX_REFRESH_INTERVAL_SETTING.get(indexSettings)); + } + + public void testRefreshIntervalValidationFailureWithIndexSetting() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is below the `cluster.default.index.refresh_interval` value. + int clusterMinRefreshTimeMs = 10 * 1000; + TimeValue clusterMinRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + TimeValue indexRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs - randomIntBetween(1, clusterMinRefreshTimeMs - 1)); + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), indexRefreshTime); + request.settings(requestSettings.build()); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + // verify that the message is as expected + assertEquals( + "invalid index.refresh_interval [" + + indexRefreshTime + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [10s]", + exception.getMessage() + ); + } + + public void testAnyTranslogDurabilityWhenRestrictSettingFalse() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + Translog.Durability durability = randomFrom(Translog.Durability.values()); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability); + request.settings(requestSettings.build()); + if (randomBoolean()) { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertFalse(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(durability, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.builder().put("node.attr.remote_store.setting", "test").build(), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + // verify that the message is as expected + assertEquals( + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertTrue(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(Translog.Durability.REQUEST, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + + public void testIndexCreationWithIndexStoreTypeRemoteStoreThrowsException() { + // This checks that aggregateIndexSettings throws exception for the case when the index setting + // index.store.type is set to remote_snapshot + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_STORE_TYPE_SETTING.getKey(), RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT); + request.settings(requestSettings.build()); + final IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + assertThat( + error.getMessage(), + containsString( + "cannot create index with index setting \"index.store.type\" set to \"remote_snapshot\". Store type can be set to \"remote_snapshot\" only when restoring a remote snapshot by using \"storage_type\": \"remote_snapshot\"" + ) + ); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer<IndexTemplateMetadata.Builder> configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); @@ -1619,18 +1980,17 @@ private IndexTemplateMetadata.Builder templateMetadataBuilder(String name, Strin private CompressedXContent createMapping(String fieldName, String fieldType) { try { - final String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject(fieldName) - .field("type", fieldType) - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject(fieldName) + .field("type", fieldType) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); return new CompressedXContent(mapping); } catch (IOException e) { @@ -1667,4 +2027,9 @@ private void verifyRemoteStoreIndexSettings( assertEquals(translogBufferInterval, INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings)); } + @After + public void shutdown() throws Exception { + clusterSettings = null; + } + } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java index 5633b874eb423..ccce338e388ae 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataDeleteIndexServiceTests.java @@ -54,11 +54,11 @@ import org.hamcrest.core.IsNull; import org.junit.Before; -import java.util.HashSet; import java.util.Collections; +import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index bf66f577e182b..9fb4551f106ec 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -164,11 +165,11 @@ public void testMustExist() { // Show that removing non-existing alias with mustExist == true fails final ClusterState finalCS = after; - final IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, + final AliasesNotFoundException iae = expectThrows( + AliasesNotFoundException.class, () -> service.applyAliasActions(finalCS, singletonList(new AliasAction.Remove(index, "test_2", true))) ); - assertThat(iae.getMessage(), containsString("required alias [test_2] does not exist")); + assertThat(iae.getMessage(), containsString("aliases [test_2] missing")); } public void testMultipleIndices() { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index 1863d45af8f2e..8c42b6ef1cca6 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -55,8 +55,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexNotFoundException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 4442f192a37ea..0b8e64e31a523 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -33,14 +33,12 @@ package org.opensearch.cluster.metadata; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -48,12 +46,13 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.env.Environment; -import org.opensearch.core.index.Index; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndexTemplateMissingException; @@ -77,6 +76,10 @@ import java.util.stream.Collectors; import static java.util.Collections.singletonList; +import static org.opensearch.common.settings.Settings.builder; +import static org.opensearch.env.Environment.PATH_HOME_SETTING; +import static org.opensearch.index.mapper.DataStreamFieldMapper.Defaults.TIMESTAMP_FIELD; +import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.containsStringIgnoringCase; import static org.hamcrest.CoreMatchers.equalTo; @@ -89,10 +92,6 @@ import static org.hamcrest.Matchers.matchesRegex; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.common.settings.Settings.builder; -import static org.opensearch.env.Environment.PATH_HOME_SETTING; -import static org.opensearch.index.mapper.DataStreamFieldMapper.Defaults.TIMESTAMP_FIELD; -import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; public class MetadataIndexTemplateServiceTests extends OpenSearchSingleNodeTestCase { @@ -191,17 +190,16 @@ public void testIndexTemplateWithValidateMapping() throws Exception { PutRequest request = new PutRequest("api", "validate_template"); request.patterns(singletonList("te*")); request.mappings( - Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field2") - .field("type", "text") - .field("analyzer", "custom_1") - .endObject() - .endObject() - .endObject() - ) + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field2") + .field("type", "text") + .field("analyzer", "custom_1") + .endObject() + .endObject() + .endObject() + .toString() ); List<Throwable> errors = putTemplateDetail(request); @@ -2148,7 +2146,7 @@ public static void assertTemplatesEqual(ComposableIndexTemplate actual, Composab Map<String, Object> actualMappings; Map<String, Object> expectedMappings; try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( new NamedXContentRegistry(Collections.emptyList()), LoggingDeprecationHandler.INSTANCE, @@ -2160,7 +2158,7 @@ public static void assertTemplatesEqual(ComposableIndexTemplate actual, Composab throw new AssertionError(e); } try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( new NamedXContentRegistry(Collections.emptyList()), LoggingDeprecationHandler.INSTANCE, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java index 3574187a2fa01..d738aa50137bb 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataMappingServiceTests.java @@ -40,8 +40,8 @@ import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.Collection; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index f8c706e639445..618fcb923bc60 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -38,20 +38,21 @@ import org.opensearch.cluster.DataStreamTestHelper; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; @@ -626,6 +627,39 @@ public void testGlobalStateEqualsCoordinationMetadata() { assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); } + public void testGlobalResourcesStateEqualsCoordinationMetadata() { + CoordinationMetadata coordinationMetadata1 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata1 = Metadata.builder() + .coordinationMetadata(coordinationMetadata1) + .clusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(false) + .hashesOfConsistentSettings(Map.of("a", "b")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + CoordinationMetadata coordinationMetadata2 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata2 = Metadata.builder() + .coordinationMetadata(coordinationMetadata2) + .clusterUUIDCommitted(true) + .clusterUUID(randomAlphaOfLength(11)) + .hashesOfConsistentSettings(Map.of("b", "a")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + + assertTrue(Metadata.isGlobalStateEquals(metadata1, metadata1)); + assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); + assertTrue(Metadata.isGlobalResourcesMetadataEquals(metadata1, metadata2)); + } + public void testSerializationWithIndexGraveyard() throws IOException { final IndexGraveyard graveyard = IndexGraveyardTests.createRandom(); final Metadata originalMeta = Metadata.builder().indexGraveyard(graveyard).build(); @@ -724,7 +758,7 @@ public void testFindMappingsWithFilters() throws IOException { Map<String, Object> doc = (Map<String, Object>) stringObjectMap.get("_doc"); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.map(doc); - mapping = Strings.toString(builder); + mapping = builder.toString(); } } @@ -1425,6 +1459,29 @@ public void testMetadataBuildInvocations() { compareMetadata(previousMetadata, builtMetadata, false, true, true); } + public void testIsSegmentReplicationEnabled() { + final String indexName = "test"; + Settings.Builder builder = settings(Version.CURRENT).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings(builder) + .numberOfShards(1) + .numberOfReplicas(1); + Metadata.Builder metadataBuilder = Metadata.builder().put(indexMetadataBuilder); + Metadata metadata = metadataBuilder.build(); + assertTrue(metadata.isSegmentReplicationEnabled(indexName)); + } + + public void testIsSegmentReplicationDisabled() { + final String indexName = "test"; + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1); + Metadata.Builder metadataBuilder = Metadata.builder().put(indexMetadataBuilder); + Metadata metadata = metadataBuilder.build(); + assertFalse(metadata.isSegmentReplicationEnabled(indexName)); + } + public static Metadata randomMetadata() { Metadata.Builder md = Metadata.builder() .put(buildIndexMetadata("index", "alias", randomBoolean() ? null : randomBoolean()).build(), randomBoolean()) diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index c0f0fc699ba78..36d984b7eb99b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.metadata; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.opensearch.action.support.master.AcknowledgedResponse; @@ -46,10 +45,11 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java index b87412a6caadd..6d8439b7b249c 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -35,12 +35,11 @@ import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.RolloverInfo; import org.opensearch.cluster.coordination.CoordinationMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.TestCustomMetadata; @@ -275,7 +274,7 @@ public void testToXContentGateway_FlatSettingTrue_ReduceMappingFalse() throws IO + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -371,7 +370,7 @@ public void testToXContentAPI_SameTypeName() throws IOException { + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -441,7 +440,7 @@ public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IO + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -546,7 +545,7 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } @@ -657,7 +656,7 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java new file mode 100644 index 0000000000000..ad39e2b103087 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/ViewTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.cluster.metadata.View.Target; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.AbstractSerializingTestCase; +import org.hamcrest.MatcherAssert; + +import java.io.IOException; +import java.util.Set; +import java.util.TreeSet; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class ViewTests extends AbstractSerializingTestCase<View> { + + private static Set<Target> randomTargets() { + int numTargets = randomIntBetween(1, 25); + return new TreeSet<>(randomList(1, numTargets, () -> new View.Target(randomAlphaOfLength(8)))); + } + + private static View randomInstance() { + final Set<Target> targets = randomTargets(); + final String viewName = randomAlphaOfLength(10); + final String description = randomAlphaOfLength(100); + return new View(viewName, description, Math.abs(randomLong()), Math.abs(randomLong()), targets); + } + + @Override + protected View doParseInstance(XContentParser parser) throws IOException { + return View.fromXContent(parser); + } + + @Override + protected Writeable.Reader<View> instanceReader() { + return View::new; + } + + @Override + protected View createTestInstance() { + return randomInstance(); + } + + public void testNullName() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View(null, null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Name must be provided")); + } + + public void testNullTargets() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View("name", null, null, null, null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("Targets are required on a view")); + } + + public void testNullTargetIndexPattern() { + final NullPointerException npe = assertThrows(NullPointerException.class, () -> new View.Target((String) null)); + + MatcherAssert.assertThat(npe.getMessage(), equalTo("IndexPattern is required")); + } + + public void testDefaultValues() { + final View view = new View("myName", null, null, null, Set.of()); + + MatcherAssert.assertThat(view.getName(), equalTo("myName")); + MatcherAssert.assertThat(view.getDescription(), equalTo(null)); + MatcherAssert.assertThat(view.getCreatedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getModifiedAt(), equalTo(-1L)); + MatcherAssert.assertThat(view.getTargets(), empty()); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java index b78d1b56364eb..e19bde5d53d8a 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/WeightedRoutingMetadataTests.java @@ -8,29 +8,60 @@ package org.opensearch.cluster.metadata; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.Diff; import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractXContentTestCase; +import org.opensearch.test.AbstractDiffableSerializationTestCase; import java.io.IOException; +import java.util.HashMap; import java.util.Map; -public class WeightedRoutingMetadataTests extends AbstractXContentTestCase<WeightedRoutingMetadata> { +public class WeightedRoutingMetadataTests extends AbstractDiffableSerializationTestCase<Metadata.Custom> { + + @Override + protected Writeable.Reader<Metadata.Custom> instanceReader() { + return WeightedRoutingMetadata::new; + } + @Override protected WeightedRoutingMetadata createTestInstance() { + String attributeName = "zone"; Map<String, Double> weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); - WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + if (randomBoolean()) { + weights = new HashMap<>(); + attributeName = ""; + } + WeightedRouting weightedRouting = new WeightedRouting(attributeName, weights); WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + } + @Override protected WeightedRoutingMetadata doParseInstance(XContentParser parser) throws IOException { return WeightedRoutingMetadata.fromXContent(parser); } @Override - protected boolean supportsUnknownFields() { - return false; + protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + + WeightedRouting weightedRouting = new WeightedRouting("", new HashMap<>()); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, -1); + return weightedRoutingMetadata; } + + @Override + protected Writeable.Reader<Diff<Metadata.Custom>> diffReader() { + return WeightedRoutingMetadata::readDiffFrom; + } + } diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java index 54f9c46e999d7..691a3cb418f94 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java index f906a0f937d28..5f0658f7f110b 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java @@ -33,8 +33,8 @@ package org.opensearch.cluster.node; import org.opensearch.common.settings.Setting; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.HashSet; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 14f6880419286..c8a6fc76ce820 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -34,30 +34,33 @@ import org.opensearch.Version; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.test.NodeRoles.nonRemoteClusterClientNode; +import static org.opensearch.test.NodeRoles.nonSearchNode; +import static org.opensearch.test.NodeRoles.remoteClusterClientNode; +import static org.opensearch.test.NodeRoles.searchNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.not; -import static org.opensearch.test.NodeRoles.nonRemoteClusterClientNode; -import static org.opensearch.test.NodeRoles.remoteClusterClientNode; -import static org.opensearch.test.NodeRoles.searchNode; -import static org.opensearch.test.NodeRoles.nonSearchNode; public class DiscoveryNodeTests extends OpenSearchTestCase { @@ -81,6 +84,22 @@ public void testRolesAreSorted() { } + public void testRemoteStoreRedactionInToString() { + final Set<DiscoveryNodeRole> roles = new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)); + Map<String, String> attributes = new HashMap<>(); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + final DiscoveryNode node = new DiscoveryNode( + "name", + "id", + new TransportAddress(TransportAddress.META_ADDRESS, 9200), + attributes, + roles, + Version.CURRENT + ); + assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + public void testDiscoveryNodeIsCreatedWithHostFromInetAddress() throws Exception { InetAddress inetAddress = randomBoolean() ? InetAddress.getByName("192.0.2.1") diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index 47676ecc13f5e..d2450859dfcd4 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -33,10 +33,11 @@ package org.opensearch.cluster.node; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.settings.Setting; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/opensearch/cluster/routing/AllocationIdTests.java index 40ce062d6e6e2..20b2a80a3f4ce 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/AllocationIdTests.java @@ -33,11 +33,11 @@ package org.opensearch.cluster.routing; import org.opensearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java index 796d73aa715de..a22369c92bd35 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; @@ -40,6 +39,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.Randomness; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java index a3878f5cbeb25..6611554004639 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/DelayedAllocationServiceTests.java @@ -64,8 +64,8 @@ import static org.opensearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; diff --git a/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java b/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java deleted file mode 100644 index a30581e2576e2..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/routing/MovePrimaryFirstTests.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.routing; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; -import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.opensearch.cluster.ClusterStateListener; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.Iterator; -import java.util.concurrent.CountDownLatch; - -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -@ThreadLeakScope(ThreadLeakScope.Scope.NONE) -public class MovePrimaryFirstTests extends OpenSearchIntegTestCase { - - protected String startDataOnlyNode(final String zone) { - final Settings settings = Settings.builder().put("node.attr.zone", zone).build(); - return internalCluster().startDataOnlyNode(settings); - } - - protected void createAndIndex(String index, int replicaCount, int shardCount) { - assertAcked( - prepareCreate( - index, - -1, - Settings.builder() - .put("number_of_shards", shardCount) - .put("number_of_replicas", replicaCount) - .put("max_result_window", 20000) - ) - ); - int startDocCountId = 0; - for (int i = 0; i < 10; i++) { - index(index, "_doc", Integer.toString(startDocCountId), "foo", "bar" + startDocCountId); - ++startDocCountId; - } - flushAndRefresh(index); - } - - /** - * Creates two nodes each in two zones and shuts down nodes in zone1 after - * relocating half the number of shards. Shards per node constraint ensures - * that exactly 50% of shards relocate to nodes in zone2 giving time to shut down - * nodes in zone1. Since primaries are relocated first as movePrimaryFirst is - * enabled, cluster should not become red and zone2 nodes have all the primaries - */ - public void testClusterGreenAfterPartialRelocation() throws InterruptedException { - internalCluster().startClusterManagerOnlyNodes(1); - final String z1 = "zone-1", z2 = "zone-2"; - final int primaryShardCount = 6; - assertTrue("Primary shard count must be even for equal distribution across two nodes", primaryShardCount % 2 == 0); - final String z1n1 = startDataOnlyNode(z1); - ensureGreen(); - createAndIndex("foo", 1, primaryShardCount); - ensureYellow(); - // Start second node in same zone only after yellow cluster to ensure - // that one gets all primaries and other all secondaries - final String z1n2 = startDataOnlyNode(z1); - ensureGreen(); - - // Enable cluster level setting for moving primaries first and keep new - // zone nodes excluded to prevent any shard relocation - ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); - settingsRequest.persistentSettings( - Settings.builder().put("cluster.routing.allocation.move.primary_first", true).put("cluster.routing.allocation.exclude.zone", z2) - ); - client().admin().cluster().updateSettings(settingsRequest).actionGet(); - - final String z2n1 = startDataOnlyNode(z2); - final String z2n2 = startDataOnlyNode(z2); - - // Create cluster state listener to compute number of shards on new zone - // nodes before counting down the latch - final CountDownLatch primaryMoveLatch = new CountDownLatch(1); - final ClusterStateListener listener = event -> { - if (event.routingTableChanged()) { - final RoutingNodes routingNodes = event.state().getRoutingNodes(); - int startedCount = 0; - for (Iterator<RoutingNode> it = routingNodes.iterator(); it.hasNext();) { - RoutingNode routingNode = it.next(); - final String nodeName = routingNode.node().getName(); - if (nodeName.equals(z2n1) || nodeName.equals(z2n2)) { - startedCount += routingNode.numberOfShardsWithState(ShardRoutingState.STARTED); - } - } - - // Count down the latch once all the primary shards have initialized on nodes in zone-2 - if (startedCount == primaryShardCount) { - primaryMoveLatch.countDown(); - } - } - }; - internalCluster().clusterService().addListener(listener); - - // Exclude zone1 nodes for allocation and await latch count down - settingsRequest = new ClusterUpdateSettingsRequest(); - settingsRequest.persistentSettings( - Settings.builder() - .put("cluster.routing.allocation.exclude.zone", z1) - // Total shards per node constraint is added to pause the relocation after primary shards - // have relocated to allow time for node shutdown and validate yellow cluster - .put("cluster.routing.allocation.total_shards_per_node", primaryShardCount / 2) - ); - client().admin().cluster().updateSettings(settingsRequest); - primaryMoveLatch.await(); - - // Shutdown both nodes in zone 1 and ensure cluster does not become red - try { - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n1)); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n2)); - } catch (Exception e) {} - // Due to shards per node constraint cluster cannot be green - // Since yellow suffices for this test, not removing shards constraint - ensureYellow(); - } -} diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java index d9675a548cc08..de787df8c08a9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingAwarenessTests.java @@ -8,13 +8,13 @@ package org.opensearch.cluster.routing; -import org.junit.After; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.cluster.routing.OperationRouting.IGNORE_AWARENESS_ATTRIBUTES; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; public class OperationRoutingAwarenessTests extends OpenSearchIntegTestCase { diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index fd1cb63d61d82..4f3e50eebb9c6 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -46,8 +46,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexModule; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.node.ResponseCollectorService; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; @@ -66,15 +66,15 @@ import java.util.TreeMap; import static java.util.Collections.singletonMap; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.object.HasToString.hasToString; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; public class OperationRoutingTests extends OpenSearchTestCase { public void testGenerateShardId() { diff --git a/server/src/test/java/org/opensearch/cluster/routing/PlainShardIteratorTests.java b/server/src/test/java/org/opensearch/cluster/routing/PlainShardIteratorTests.java index 7aa61ad8d9819..e9d5d377d0a70 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/PlainShardIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/PlainShardIteratorTests.java @@ -33,8 +33,8 @@ package org.opensearch.cluster.routing; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java index 578d537653684..cc4f2e510cb31 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java index 73136a71bc12a..7a0fd76b0fbd9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodesTests.java @@ -32,8 +32,6 @@ package org.opensearch.cluster.routing; -import org.junit.Before; -import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -43,6 +41,7 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.settings.Settings; +import org.junit.Before; import java.util.Iterator; import java.util.List; @@ -124,7 +123,7 @@ private IndexMetadata.Builder createIndexMetadata(String indexName) { .numberOfShards(this.numberOfShards); } - public void testInterleavedShardIterator() { + public void testInterleavedShardIteratorPrimaryFirst() { // Initialize all the shards for test index 1 and 2 initPrimaries(); startInitializingShards(TEST_INDEX_1); @@ -147,7 +146,8 @@ public void testInterleavedShardIterator() { } // Get primary first shard iterator and assert primary shards are iterated over first - final Iterator<ShardRouting> iterator = this.clusterState.getRoutingNodes().nodeInterleavedShardIterator(true); + final Iterator<ShardRouting> iterator = this.clusterState.getRoutingNodes() + .nodeInterleavedShardIterator(ShardMovementStrategy.PRIMARY_FIRST); boolean iteratingPrimary = true; int shardCount = 0; while (iterator.hasNext()) { @@ -155,14 +155,14 @@ public void testInterleavedShardIterator() { if (iteratingPrimary) { iteratingPrimary = shard.primary(); } else { - assert shard.primary() == false; + assertFalse(shard.primary()); } shardCount++; } - assert shardCount == this.totalNumberOfShards; + assertEquals(shardCount, this.totalNumberOfShards); } - public void testSwapPrimaryWithReplica() { + public void testInterleavedShardIteratorNoPreference() { // Initialize all the shards for test index 1 and 2 initPrimaries(); startInitializingShards(TEST_INDEX_1); @@ -170,31 +170,38 @@ public void testSwapPrimaryWithReplica() { startInitializingShards(TEST_INDEX_2); startInitializingShards(TEST_INDEX_2); - // Create primary shard count imbalance between two nodes - final RoutingNodes routingNodes = this.clusterState.getRoutingNodes(); - final RoutingNode node0 = routingNodes.node("node0"); - final RoutingNode node1 = routingNodes.node("node1"); - final List<ShardRouting> shardRoutingList = node0.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); - final RoutingChangesObserver routingChangesObserver = Mockito.mock(RoutingChangesObserver.class); - int swaps = 0; - - for (ShardRouting routing : shardRoutingList) { - if (routing.primary()) { - ShardRouting swap = node1.getByShardId(routing.shardId()); - routingNodes.swapPrimaryWithReplica(logger, routing, swap, routingChangesObserver); - swaps++; - } + final Iterator<ShardRouting> iterator = this.clusterState.getRoutingNodes() + .nodeInterleavedShardIterator(ShardMovementStrategy.NO_PREFERENCE); + int shardCount = 0; + while (iterator.hasNext()) { + final ShardRouting shard = iterator.next(); + shardCount++; } - Mockito.verify(routingChangesObserver, Mockito.times(swaps)).replicaPromoted(Mockito.any()); + assertEquals(shardCount, this.totalNumberOfShards); + } - final List<ShardRouting> shards = node1.shardsWithState(TEST_INDEX_1, ShardRoutingState.STARTED); + public void testInterleavedShardIteratorReplicaFirst() { + // Initialize all the shards for test index 1 and 2 + initPrimaries(); + startInitializingShards(TEST_INDEX_1); + startInitializingShards(TEST_INDEX_1); + startInitializingShards(TEST_INDEX_2); + startInitializingShards(TEST_INDEX_2); + + // Get replica first shard iterator and assert replica shards are iterated over first + final Iterator<ShardRouting> iterator = this.clusterState.getRoutingNodes() + .nodeInterleavedShardIterator(ShardMovementStrategy.REPLICA_FIRST); + boolean iteratingReplica = true; int shardCount = 0; - for (ShardRouting shard : shards) { - if (shard.primary()) { - shardCount++; + while (iterator.hasNext()) { + final ShardRouting shard = iterator.next(); + if (iteratingReplica) { + iteratingReplica = shard.primary() == false; + } else { + assertTrue(shard.primary()); } + shardCount++; } - - assertTrue(shardCount >= swaps); + assertEquals(shardCount, this.totalNumberOfShards); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableGenerator.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableGenerator.java index 7c6752eed418a..5d217d270a30e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableGenerator.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableGenerator.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing; import com.carrotsearch.randomizedtesting.RandomizedContext; + import org.opensearch.OpenSearchException; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java index 0ff9d6f07751a..97283f561d6d4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java @@ -40,31 +40,36 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.node.DiscoveryNodes.Builder; +import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexNotFoundException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.repositories.IndexId; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import java.util.function.Predicate; +import java.util.stream.Collectors; -import static org.mockito.Mockito.mock; +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; - -import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource; -import org.opensearch.repositories.IndexId; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class RoutingTableTests extends OpenSearchAllocationTestCase { @@ -231,6 +236,74 @@ public void testShardsMatchingPredicateCount() { assertThat(clusterState.routingTable().shardsMatchingPredicateCount(predicate), is(2)); } + public void testAllShardsMatchingPredicate() { + MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build()) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = allocation.reroute(clusterState, "reroute"); + + Predicate<ShardRouting> predicate = s -> s.state() == ShardRoutingState.UNASSIGNED && s.unassignedInfo().isDelayed(); + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(predicate).size(), is(0)); + + // starting primaries + clusterState = startInitializingShardsAndReroute(allocation, clusterState); + // starting replicas + clusterState = startInitializingShardsAndReroute(allocation, clusterState); + // remove node2 and reroute + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); + // make sure both replicas are marked as delayed (i.e. not reallocated) + clusterState = allocation.disassociateDeadNodes(clusterState, true, "reroute"); + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(predicate).size(), is(2)); + + // Verifies true against all shards on the node (active/inactive) + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(shard -> true).size(), is(4)); + // Verifies false against all shards on the node (active/inactive) + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(shard -> false).size(), is(0)); + // Verifies against all primary shards on the node + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(ShardRouting::primary).size(), is(2)); + // Verifies a predicate which tests for inactive replicas + assertThat( + clusterState.routingTable() + .allShardsSatisfyingPredicate(shardRouting -> !shardRouting.primary() && !shardRouting.active()) + .size(), + is(2) + ); + } + + public void testAllShardsMatchingPredicateWithSpecificIndices() { + MockAllocationService allocation = createAllocationService(Settings.EMPTY, new DelayedShardsMockGatewayAllocator()); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetadata.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(RoutingTable.builder().addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build()) + .build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(); + clusterState = allocation.reroute(clusterState, "reroute"); + + String[] indices = new String[] { "test1", "test2" }; + // Verifies against all primary shards on the node + assertThat(clusterState.routingTable().allShardsSatisfyingPredicate(indices, ShardRouting::primary).size(), is(2)); + // Verifies against all replica shards on the node + assertThat( + clusterState.routingTable().allShardsSatisfyingPredicate(indices, shardRouting -> !shardRouting.primary()).size(), + is(2) + ); + } + public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0)); assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); @@ -498,8 +571,47 @@ public void testAddAsRecovery() { } } - public void testAddAsRemoteStoreRestore() { - final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN).build(); + private Map<ShardId, IndexShardRoutingTable> getIndexShardRoutingTableMap(Index index, boolean allUnassigned, int numberOfReplicas) { + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap = new HashMap<>(); + List<ShardRoutingState> activeInitializingStates = List.of(INITIALIZING, STARTED, RELOCATING); + for (int i = 0; i < this.numberOfShards; i++) { + IndexShardRoutingTable indexShardRoutingTable = mock(IndexShardRoutingTable.class); + ShardRouting primaryShardRouting = mock(ShardRouting.class); + Boolean primaryUnassigned = allUnassigned || randomBoolean(); + when(primaryShardRouting.unassigned()).thenReturn(primaryUnassigned); + if (primaryUnassigned) { + when(primaryShardRouting.state()).thenReturn(UNASSIGNED); + } else { + when(primaryShardRouting.state()).thenReturn( + activeInitializingStates.get(randomIntBetween(0, activeInitializingStates.size() - 1)) + ); + } + when(indexShardRoutingTable.primaryShard()).thenReturn(primaryShardRouting); + List<ShardRouting> replicaShards = new ArrayList<>(); + for (int j = 0; j < numberOfReplicas; j++) { + ShardRouting replicaShardRouting = mock(ShardRouting.class); + Boolean replicaUnassigned = allUnassigned || randomBoolean(); + when(replicaShardRouting.unassigned()).thenReturn(replicaUnassigned); + if (replicaUnassigned) { + when(replicaShardRouting.state()).thenReturn(UNASSIGNED); + } else { + when(replicaShardRouting.state()).thenReturn( + activeInitializingStates.get(randomIntBetween(0, activeInitializingStates.size() - 1)) + ); + } + replicaShards.add(replicaShardRouting); + } + when(indexShardRoutingTable.replicaShards()).thenReturn(replicaShards); + indexShardRoutingTableMap.put(new ShardId(index, i), indexShardRoutingTable); + } + return indexShardRoutingTableMap; + } + + public void testAddAsRemoteStoreRestoreAllUnassigned() { + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( "restore_uuid", Version.CURRENT, @@ -508,34 +620,78 @@ public void testAddAsRemoteStoreRestore() { final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore( indexMetadata, remoteStoreRecoverySource, - new HashMap<>() + getIndexShardRoutingTableMap(indexMetadata.getIndex(), true, numberOfReplicas), + false ).build(); assertTrue(routingTable.hasIndex(TEST_INDEX_1)); - assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); - assertEquals(this.numberOfShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); + int numberOfShards = this.numberOfShards * (numberOfReplicas + 1); + assertEquals(numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); + assertEquals(numberOfShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); } public void testAddAsRemoteStoreRestoreWithActiveShards() { - final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN).build(); + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( "restore_uuid", Version.CURRENT, new IndexId(TEST_INDEX_1, "1") ); - Map<ShardId, ShardRouting> activeInitializingShards = new HashMap<>(); - for (int i = 0; i < randomIntBetween(1, this.numberOfShards); i++) { - activeInitializingShards.put(new ShardId(indexMetadata.getIndex(), i), mock(ShardRouting.class)); - } + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap = getIndexShardRoutingTableMap( + indexMetadata.getIndex(), + false, + numberOfReplicas + ); final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore( indexMetadata, remoteStoreRecoverySource, - activeInitializingShards + indexShardRoutingTableMap, + false ).build(); assertTrue(routingTable.hasIndex(TEST_INDEX_1)); - assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); - assertEquals( - this.numberOfShards - activeInitializingShards.size(), - routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size() + int numberOfShards = this.numberOfShards * (numberOfReplicas + 1); + assertEquals(numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); + int unassignedShards = 0; + for (IndexShardRoutingTable indexShardRoutingTable : indexShardRoutingTableMap.values()) { + if (indexShardRoutingTable.primaryShard().unassigned()) { + unassignedShards += indexShardRoutingTable.replicaShards().size() + 1; + } else { + for (ShardRouting replicaShardRouting : indexShardRoutingTable.replicaShards()) { + if (replicaShardRouting.unassigned()) { + unassignedShards += 1; + } + } + } + } + assertEquals(unassignedShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); + } + + public void testAddAsRemoteStoreRestoreShardMismatch() { + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); + final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( + "restore_uuid", + Version.CURRENT, + new IndexId(TEST_INDEX_1, "1") + ); + Map<ShardId, IndexShardRoutingTable> indexShardRoutingTableMap = getIndexShardRoutingTableMap( + indexMetadata.getIndex(), + true, + numberOfReplicas + ); + indexShardRoutingTableMap.remove(indexShardRoutingTableMap.keySet().iterator().next()); + assertThrows( + IllegalStateException.class, + () -> new RoutingTable.Builder().addAsRemoteStoreRestore( + indexMetadata, + remoteStoreRecoverySource, + indexShardRoutingTableMap, + false + ).build() ); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java b/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java new file mode 100644 index 0000000000000..7483e69fb0b0e --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/ShardMovementStrategyTests.java @@ -0,0 +1,171 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Iterator; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +public class ShardMovementStrategyTests extends OpenSearchIntegTestCase { + + protected String startDataOnlyNode(final String zone) { + final Settings settings = Settings.builder().put("node.attr.zone", zone).build(); + return internalCluster().startDataOnlyNode(settings); + } + + protected void createAndIndex(String index, int replicaCount, int shardCount) { + assertAcked( + prepareCreate( + index, + -1, + Settings.builder() + .put("number_of_shards", shardCount) + .put("number_of_replicas", replicaCount) + .put("max_result_window", 20000) + ) + ); + int startDocCountId = 0; + for (int i = 0; i < 10; i++) { + index(index, "_doc", Integer.toString(startDocCountId), "foo", "bar" + startDocCountId); + ++startDocCountId; + } + flushAndRefresh(index); + } + + private static Settings.Builder getSettings(ShardMovementStrategy shardMovementStrategy, boolean movePrimaryFirst) { + return Settings.builder() + .put("cluster.routing.allocation.shard_movement_strategy", shardMovementStrategy) + .put("cluster.routing.allocation.move.primary_first", movePrimaryFirst); + } + + public void testClusterRelocationPrimaryFirstShardMovementMovePrimarySettingEnabled() throws InterruptedException { + testClusterGreenAfterPartialRelocation(ShardMovementStrategy.PRIMARY_FIRST, true); + } + + public void testClusterRelocationPrimaryFirstShardMovementMovePrimarySettingDisabled() throws InterruptedException { + testClusterGreenAfterPartialRelocation(ShardMovementStrategy.PRIMARY_FIRST, false); + } + + public void testClusterRelocationReplicaFirstShardMovementPrimaryFirstEnabled() throws InterruptedException { + testClusterGreenAfterPartialRelocation(ShardMovementStrategy.REPLICA_FIRST, true); + } + + public void testClusterRelocationReplicaFirstShardMovementPrimaryFirstDisabled() throws InterruptedException { + testClusterGreenAfterPartialRelocation(ShardMovementStrategy.REPLICA_FIRST, false); + } + + public void testClusterRelocationNoPreferenceShardMovementPrimaryFirstEnabled() throws InterruptedException { + testClusterGreenAfterPartialRelocation(ShardMovementStrategy.NO_PREFERENCE, true); + } + + private boolean shouldMovePrimaryShardsFirst(ShardMovementStrategy shardMovementStrategy, boolean movePrimaryFirst) { + if (shardMovementStrategy == ShardMovementStrategy.NO_PREFERENCE && movePrimaryFirst) { + return true; + } + return shardMovementStrategy == ShardMovementStrategy.PRIMARY_FIRST; + } + + /** + * Creates two nodes each in two zones and shuts down nodes in zone1 after + * relocating half the number of shards. Shards per node constraint ensures + * that exactly 50% of shards relocate to nodes in zone2 giving time to shut down + * nodes in zone1. Depending on the shard movement strategy, we check whether the + * primary or replica shards are moved first, and zone2 nodes have all the shards + */ + private void testClusterGreenAfterPartialRelocation(ShardMovementStrategy shardMovementStrategy, boolean movePrimaryFirst) + throws InterruptedException { + internalCluster().startClusterManagerOnlyNodes(1); + final String z1 = "zone-1", z2 = "zone-2"; + final int primaryShardCount = 6; + assertTrue("Primary shard count must be even for equal distribution across two nodes", primaryShardCount % 2 == 0); + final String z1n1 = startDataOnlyNode(z1); + ensureGreen(); + createAndIndex("foo", 1, primaryShardCount); + ensureYellow(); + // Start second node in same zone only after yellow cluster to ensure + // that one gets all primaries and other all secondaries + final String z1n2 = startDataOnlyNode(z1); + ensureGreen(); + + // Enable cluster level setting for moving primaries first and keep new + // zone nodes excluded to prevent any shard relocation + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest.persistentSettings( + getSettings(shardMovementStrategy, movePrimaryFirst).put("cluster.routing.allocation.exclude.zone", z2) + ); + client().admin().cluster().updateSettings(settingsRequest).actionGet(); + + final String z2n1 = startDataOnlyNode(z2); + final String z2n2 = startDataOnlyNode(z2); + + // Create cluster state listener to compute number of shards on new zone + // nodes before counting down the latch + final CountDownLatch shardMoveLatch = new CountDownLatch(1); + final ClusterStateListener listener = event -> { + if (event.routingTableChanged()) { + final RoutingNodes routingNodes = event.state().getRoutingNodes(); + int startedCount = 0; + for (Iterator<RoutingNode> it = routingNodes.iterator(); it.hasNext();) { + RoutingNode routingNode = it.next(); + final String nodeName = routingNode.node().getName(); + if (nodeName.equals(z2n1) || nodeName.equals(z2n2)) { + int count = 0; + for (ShardRouting shardEntry : routingNode) { + // If shard movement strategy is primary first, asserting that primary shards are moved first; else assert + // shards are replicas + if ((shardEntry.primary() == shouldMovePrimaryShardsFirst(shardMovementStrategy, movePrimaryFirst)) + && shardEntry.state() == ShardRoutingState.STARTED) { + count++; + } + } + startedCount += count; + } + } + + // Count down the latch once all the shards have initialized on nodes in zone-2 + if (startedCount == primaryShardCount) { + shardMoveLatch.countDown(); + } + } + }; + internalCluster().clusterService().addListener(listener); + + // Exclude zone1 nodes for allocation and await latch count down + settingsRequest = new ClusterUpdateSettingsRequest(); + settingsRequest.persistentSettings( + Settings.builder() + .put("cluster.routing.allocation.exclude.zone", z1) + // Total shards per node constraint is added to pause the relocation after shards + // have relocated to allow time for node shutdown and validate yellow cluster + .put("cluster.routing.allocation.total_shards_per_node", primaryShardCount / 2) + ); + client().admin().cluster().updateSettings(settingsRequest); + shardMoveLatch.await(); + + // Shutdown both nodes in zone 1 and ensure cluster does not become red + try { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n1)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(z1n2)); + } catch (Exception e) {} + // Due to shards per node constraint cluster cannot be green + // Since yellow suffices for this test, not removing shards constraint + ensureYellow(); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/ShardRoutingTests.java index 72425eaad11b9..63c7b5f70f85c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/ShardRoutingTests.java @@ -34,12 +34,12 @@ import org.opensearch.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.repositories.IndexId; -import org.opensearch.snapshots.SnapshotId; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java index 741bf8e34c0e3..8800394431a42 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/UnassignedInfoTests.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -45,10 +46,10 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.FailedShard; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.core.index.Index; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index 81464fcd2610d..5c0bdc8547f8b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -8,13 +8,9 @@ package org.opensearch.cluster.routing; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; -import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingResponse; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterAddWeightedRoutingAction; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequestBuilder; @@ -34,12 +30,17 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; import java.util.Collections; import java.util.HashSet; @@ -94,8 +95,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java index 16ff5f753d286..89cf5fe4f76c3 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -46,7 +46,7 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.hamcrest.Matcher; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationCommandsTests.java index 98614c517c811..951bf971eedc4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -59,17 +59,17 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.network.NetworkModule; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.snapshots.SnapshotShardSizeInfo; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java index 55250a81f4247..90546620e9e3e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationConstraintsTests.java @@ -15,13 +15,13 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CONSTRAINT_WEIGHT; -import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class AllocationConstraintsTests extends OpenSearchAllocationTestCase { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java index f9f181402da1b..8cd664c8c13fc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -50,7 +50,7 @@ public class AllocationPriorityTests extends OpenSearchAllocationTestCase { /** * Tests that higher prioritized primaries and replicas are allocated first even on the balanced shard allocator - * See https://github.com/elastic/elasticsearch/issues/13249 for details + * See <a href="https://github.com/elastic/elasticsearch/issues/13249">elasticsearch issue #13249</a> for details */ public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService( diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java index 38b71db27e02f..64d9c243304d8 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationServiceTests.java @@ -73,7 +73,6 @@ import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING; import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING; - import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java index c0cec7e3201bb..02966b835fae0 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -54,7 +54,6 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; - import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -64,11 +63,11 @@ import java.util.Map; import static java.util.Collections.singletonMap; -import static org.hamcrest.MatcherAssert.assertThat; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.sameInstance; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java index f7b1b8694f91a..019db47e74cc3 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalanceTests.java @@ -16,8 +16,8 @@ import java.util.Optional; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; +import static org.hamcrest.Matchers.equalTo; public class AwarenessReplicaBalanceTests extends OpenSearchAllocationTestCase { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index 06acfceedd30a..62dce9c4edeb5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -38,8 +38,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -247,13 +247,13 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. - * + * <p> * This test mimics a cluster state containing four nodes, where one node breaches two constraints while one breaches * only one. In order to have nodes breach constraints, test excludes two nodes (node2, node3) from allocation so * that other two nodes (node0, node1) have all shards assignments resulting in constraints breach. Test asserts that * the new primary shard assignment lands on the node breaching one constraint(node1), while replica land on the other * (node0). Final shard allocation state. - * + * <p> routing_nodes: -----node_id[node2][V] -----node_id[node3][V] @@ -384,13 +384,13 @@ public void testGlobalPrimaryBalance() throws Exception { * This test mimics a cluster state which can not be rebalanced due to * {@link org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider} * allocation decider which prevents shard relocation, leaving cluster unbalanced on primaries. - * + * <p> * There are two nodes (N1, N2) where all primaries land on N1 while replicas on N2. * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + * <p> * -----node_id[node_0][V] * --------[test][1], node[node_0], [P], s[STARTED], a[id=xqfZSToVSQaff2xvuxh_yA] * --------[test][0], node[node_0], [P], s[STARTED], a[id=VGjOeBGdSmu3pJR6T7v29A] @@ -454,14 +454,14 @@ public void testPrimaryBalance_NotSolved_1() { * This test mimics cluster state where re-balancing is not possible due to existing limitation of re-balancing * logic which applies at index level i.e. balance shards single index across all nodes. This will be solved when * primary shard count across indices, constraint is added. - * + * <p> * Please note, P1, P2 belongs to different index - * + * <p> * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + * <p> * -----node_id[node_0][V] * --------[test1][0], node[node_0], [P], s[STARTED], a[id=u7qtyy5AR42hgEa-JpeArg] * --------[test0][0], node[node_0], [P], s[STARTED], a[id=BQrLSo6sQyGlcLdVvGgqLQ] diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java index 10271cad33fec..8f90882c21804 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -66,7 +66,7 @@ * A base testcase that allows to run tests based on the output of the CAT API * The input is a line based cat/shards output like: * kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35 - * + * <p> * the test builds up a clusterstate from the cat input and optionally runs a full balance on it. * This can be used to debug cluster allocation decisions. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java index 35c6aea425b88..fbcfbd6082fd6 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java @@ -35,8 +35,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.health.ClusterStateHealth; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index a8f8296cf9591..6ab57d10b05c1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -51,14 +50,17 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -67,6 +69,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; +import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_CREATE_INDEX_BLOCK_AUTO_RELEASE; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; @@ -581,12 +584,16 @@ protected void setIndexCreateBlock(ActionListener<Void> listener, boolean indexC ); advanceTime.set(false); // will do one reroute and emit warnings, but subsequent reroutes and associated messages are delayed - assertSingleWarningMessage( - monitor, - aboveHighWatermark, + final List<String> messages = new ArrayList<>(); + messages.add( "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete" ); + messages.add( + "Putting index create block on cluster as all nodes are breaching high disk watermark. " + + "Number of nodes above high watermark: 1." + ); + assertMultipleWarningMessages(monitor, aboveHighWatermark, messages); advanceTime.set(true); assertRepeatedWarningMessages( @@ -605,22 +612,11 @@ protected void setIndexCreateBlock(ActionListener<Void> listener, boolean indexC relocatingShardSizeRef.set(-5L); advanceTime.set(true); - assertSingleInfoMessage( - monitor, - aboveHighWatermark, - "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " - + "the node is expected to be below the high disk watermark when these relocations are complete" - ); relocatingShardSizeRef.set(0L); timeSupplier.getAsLong(); // advance time long enough to do another reroute advanceTime.set(false); // will do one reroute and emit warnings, but subsequent reroutes and associated messages are delayed - assertSingleWarningMessage( - monitor, - aboveHighWatermark, - "high disk watermark [90%] exceeded on * shards will be relocated away from this node* " - + "the node is expected to continue to exceed the high disk watermark when these relocations are complete" - ); + assertMultipleWarningMessages(monitor, aboveHighWatermark, messages); advanceTime.set(true); assertRepeatedWarningMessages( @@ -722,6 +718,113 @@ protected void setIndexCreateBlock(ActionListener<Void> listener, boolean indexC assertTrue(countBlocksCalled.get() == 0); } + public void testIndexCreateBlockRemovedOnlyWhenAnyNodeAboveHighWatermark() { + AllocationService allocation = createAllocationService( + Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.blocks.create_index.enabled", false) + .build() + ); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node2")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .put( + IndexMetadata.builder("test_1") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node1")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .put( + IndexMetadata.builder("test_2") + .settings(settings(Version.CURRENT).put("index.routing.allocation.require._id", "node1")) + .numberOfShards(1) + .numberOfReplicas(0) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metadata.index("test")) + .addAsNew(metadata.index("test_1")) + .addAsNew(metadata.index("test_2")) + .build(); + + final ClusterState clusterState = applyStartedShardsUntilNoChange( + ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_CREATE_INDEX_BLOCK).build()) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) + .build(), + allocation + ); + AtomicReference<Set<String>> indices = new AtomicReference<>(); + AtomicInteger countBlocksCalled = new AtomicInteger(); + AtomicInteger countUnblockBlocksCalled = new AtomicInteger(); + AtomicLong currentTime = new AtomicLong(); + Settings settings = Settings.builder().put(CLUSTER_CREATE_INDEX_BLOCK_AUTO_RELEASE.getKey(), true).build(); + DiskThresholdMonitor monitor = new DiskThresholdMonitor( + settings, + () -> clusterState, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + null, + currentTime::get, + (reason, priority, listener) -> { + listener.onResponse(null); + } + ) { + + @Override + protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, ActionListener<Void> listener, boolean readOnly) { + assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly)); + assertTrue(readOnly); + listener.onResponse(null); + } + + @Override + protected void setIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) { + if (indexCreateBlock == true) { + countBlocksCalled.set(countBlocksCalled.get() + 1); + } else { + countUnblockBlocksCalled.set(countUnblockBlocksCalled.get() + 1); + } + + listener.onResponse(null); + } + }; + + Map<String, DiskUsage> builder = new HashMap<>(); + + // Initially all the nodes are breaching high watermark and IndexCreateBlock is already present on the cluster. + // Since block is already present, DiskThresholdMonitor should not again try to apply block. + builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 9)); + builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 9)); + monitor.onNewInfo(clusterInfo(builder)); + // Since Block is already present and nodes are below high watermark so neither block nor unblock will be called. + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 0); + + // Ensure DiskThresholdMonitor does not try to remove block in the next iteration if all nodes are breaching high watermark. + monitor.onNewInfo(clusterInfo(builder)); + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 0); + + builder = new HashMap<>(); + + // If any node is no longer breaching high watermark, DiskThresholdMonitor should remove IndexCreateBlock. + builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 19)); + builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 1)); + // Need to add delay in current time to allow nodes to be removed high watermark list. + currentTime.addAndGet(randomLongBetween(60001, 120000)); + + monitor.onNewInfo(clusterInfo(builder)); + // Block will be removed if any nodes is no longer breaching high watermark. + assertEquals(countBlocksCalled.get(), 0); + assertEquals(countUnblockBlocksCalled.get(), 1); + } + private void assertNoLogging(DiskThresholdMonitor monitor, final Map<String, DiskUsage> diskUsages) throws IllegalAccessException { try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) { mockAppender.addExpectation( @@ -756,10 +859,11 @@ private void assertRepeatedWarningMessages(DiskThresholdMonitor monitor, final M } } - private void assertSingleWarningMessage(DiskThresholdMonitor monitor, final Map<String, DiskUsage> diskUsages, String message) + private void assertMultipleWarningMessages(DiskThresholdMonitor monitor, final Map<String, DiskUsage> diskUsages, List<String> messages) throws IllegalAccessException { - assertLogging(monitor, diskUsages, Level.WARN, message); - assertNoLogging(monitor, diskUsages); + for (int index = 0; index < messages.size(); index++) { + assertLogging(monitor, diskUsages, Level.WARN, messages.get(index)); + } } private void assertSingleInfoMessage(DiskThresholdMonitor monitor, final Map<String, DiskUsage> diskUsages, String message) diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index d23b079e35ef9..22f7b9ecd5206 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -34,7 +34,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; import java.util.Locale; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 80afc1d9b0b0f..c245e608edbec 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.indices.cluster.ClusterStateChanges; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -69,6 +70,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; @@ -137,7 +139,15 @@ public void testSimpleFailedNodeTest() { } } + public void testRandomClusterPromotesOldestReplica() throws InterruptedException { + testRandomClusterPromotesReplica(true); + } + public void testRandomClusterPromotesNewestReplica() throws InterruptedException { + testRandomClusterPromotesReplica(false); + } + + void testRandomClusterPromotesReplica(boolean isSegmentReplicationEnabled) throws InterruptedException { ThreadPool threadPool = new TestThreadPool(getClass().getName()); ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); @@ -164,6 +174,9 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException Settings.Builder settingsBuilder = Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)) .put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4)); + if (isSegmentReplicationEnabled) { + settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); assertTrue(state.metadata().hasIndex(name)); @@ -206,13 +219,23 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException Version candidateVer = getNodeVersion(sr, compareState); if (candidateVer != null) { logger.info("--> candidate on {} node; shard routing: {}", candidateVer, sr); - assertTrue( - "candidate was not on the newest version, new primary is on " - + newPrimaryVersion - + " and there is a candidate on " - + candidateVer, - candidateVer.onOrBefore(newPrimaryVersion) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "candidate was not on the oldest version, new primary is on " + + newPrimaryVersion + + " and there is a candidate on " + + candidateVer, + candidateVer.onOrAfter(newPrimaryVersion) + ); + } else { + assertTrue( + "candidate was not on the newest version, new primary is on " + + newPrimaryVersion + + " and there is a candidate on " + + candidateVer, + candidateVer.onOrBefore(newPrimaryVersion) + ); + } } }); }); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java index f2dc745ad33bf..db4cedbbbe7b5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -647,10 +648,21 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle } public void testReplicaOnNewestVersionIsPromoted() { + testReplicaIsPromoted(false); + } + + public void testReplicaOnOldestVersionIsPromoted() { + testReplicaIsPromoted(true); + } + + private void testReplicaIsPromoted(boolean isSegmentReplicationEnabled) { AllocationService allocation = createAllocationService(Settings.builder().build()); + Settings.Builder settingsBuilder = isSegmentReplicationEnabled + ? settings(Version.CURRENT).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + : settings(Version.CURRENT); Metadata metadata = Metadata.builder() - .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(3)) + .put(IndexMetadata.builder("test").settings(settingsBuilder).numberOfShards(1).numberOfReplicas(3)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); @@ -714,7 +726,12 @@ public void testReplicaOnNewestVersionIsPromoted() { assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); - ShardRouting startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + ShardRouting startedReplica; + if (isSegmentReplicationEnabled) { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithOldestVersion(shardId); + } else { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + } logger.info("--> all shards allocated, replica that should be promoted: {}", startedReplica); // fail the primary shard again and make sure the correct replica is promoted @@ -739,13 +756,24 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.getVersion(); - assertTrue( - "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be after " + replicaNodeVersion, + replicaNodeVersion.onOrBefore(nodeVer) + ); + } else { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, + replicaNodeVersion.onOrAfter(nodeVer) + ); + } } - startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + if (isSegmentReplicationEnabled) { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithOldestVersion(shardId); + } else { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + } logger.info("--> failing primary shard a second time, should select: {}", startedReplica); // fail the primary shard again, and ensure the same thing happens @@ -771,10 +799,17 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.getVersion(); - assertTrue( - "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be after " + replicaNodeVersion, + replicaNodeVersion.onOrBefore(nodeVer) + ); + } else { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, + replicaNodeVersion.onOrAfter(nodeVer) + ); + } } } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index cf32d2b3cf00f..7f2f048485318 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -30,7 +30,7 @@ public class IndexShardConstraintDeciderOverlapTests extends OpenSearchAllocatio /** * High watermark breach blocks new shard allocations to affected nodes. If shard count on such * nodes is low, this will cause IndexShardPerNodeConstraint to breach. - * + * <p> * This test verifies that this doesn't lead to unassigned shards, and there are no hot spots in eligible * nodes. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java index 2efbb256e36bc..617c9b4701722 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java @@ -58,7 +58,7 @@ public void testUnderReplicatedClusterScaleOut() { /** * Test cluster scale in scenario, when nodes are gracefully excluded from * cluster before termination. - * + * <p> * During moveShards(), shards are picked from across indexes in an interleaved manner. * This prevents hot spots by evenly picking up shards. Since shard order can change * in subsequent runs. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index 440471b787e8c..669a8a5da295e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -35,8 +35,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java index 0d53e4bf8c4ed..5f7964e6666e4 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java @@ -32,11 +32,11 @@ import java.util.Map; import static java.util.Collections.singletonMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.sameInstance; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; public class NodeLoadAwareAllocationTests extends OpenSearchAllocationTestCase { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 557d7db142671..ac16c5db05a99 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -67,6 +67,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.snapshots.InternalSnapshotsInfoService; @@ -439,7 +440,9 @@ public void testRebalanceDoesNotAllocatePrimaryAndReplicasOnDifferentVersionNode .routingTable(routingTable) .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)) .build(); - AllocationDeciders allocationDeciders = new AllocationDeciders(Collections.singleton(new NodeVersionAllocationDecider())); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Collections.singleton(new NodeVersionAllocationDecider(Settings.EMPTY)) + ); AllocationService strategy = new MockAllocationService( allocationDeciders, new TestGatewayAllocator(), @@ -509,7 +512,7 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)) .build(); AllocationDeciders allocationDeciders = new AllocationDeciders( - Arrays.asList(new ReplicaAfterPrimaryActiveAllocationDecider(), new NodeVersionAllocationDecider()) + Arrays.asList(new ReplicaAfterPrimaryActiveAllocationDecider(), new NodeVersionAllocationDecider(Settings.EMPTY)) ); AllocationService strategy = new MockAllocationService( allocationDeciders, @@ -526,6 +529,148 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { } } + public void testRebalanceDoesNotAllocatePrimaryOnHigherVersionNodesSegrepEnabled() { + ShardId shard1 = new ShardId("test1", "_na_", 0); + ShardId shard2 = new ShardId("test2", "_na_", 0); + final DiscoveryNode newNode1 = new DiscoveryNode( + "newNode1", + buildNewFakeTransportAddress(), + emptyMap(), + CLUSTER_MANAGER_DATA_ROLES, + Version.CURRENT + ); + final DiscoveryNode newNode2 = new DiscoveryNode( + "newNode2", + buildNewFakeTransportAddress(), + emptyMap(), + CLUSTER_MANAGER_DATA_ROLES, + Version.CURRENT + ); + final DiscoveryNode oldNode1 = new DiscoveryNode( + "oldNode1", + buildNewFakeTransportAddress(), + emptyMap(), + CLUSTER_MANAGER_DATA_ROLES, + VersionUtils.getPreviousVersion() + ); + final DiscoveryNode oldNode2 = new DiscoveryNode( + "oldNode2", + buildNewFakeTransportAddress(), + emptyMap(), + CLUSTER_MANAGER_DATA_ROLES, + VersionUtils.getPreviousVersion() + ); + AllocationId allocationId1P = AllocationId.newInitializing(); + AllocationId allocationId1R = AllocationId.newInitializing(); + AllocationId allocationId2P = AllocationId.newInitializing(); + AllocationId allocationId2R = AllocationId.newInitializing(); + + Settings segmentReplicationSettings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder(shard1.getIndexName()) + .settings(settings(Version.CURRENT).put(segmentReplicationSettings)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(allocationId1P.getId(), allocationId1R.getId())) + ) + .put( + IndexMetadata.builder(shard2.getIndexName()) + .settings(settings(Version.CURRENT).put(segmentReplicationSettings)) + .numberOfShards(1) + .numberOfReplicas(1) + .putInSyncAllocationIds(0, Sets.newHashSet(allocationId2P.getId(), allocationId2R.getId())) + ) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .add( + IndexRoutingTable.builder(shard1.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shard1).addShard( + TestShardRouting.newShardRouting( + shard1.getIndexName(), + shard1.getId(), + oldNode1.getId(), + null, + true, + ShardRoutingState.STARTED, + allocationId1P + ) + ) + .addShard( + TestShardRouting.newShardRouting( + shard1.getIndexName(), + shard1.getId(), + oldNode2.getId(), + null, + false, + ShardRoutingState.STARTED, + allocationId1R + ) + ) + .build() + ) + ) + .add( + IndexRoutingTable.builder(shard2.getIndex()) + .addIndexShard( + new IndexShardRoutingTable.Builder(shard2).addShard( + TestShardRouting.newShardRouting( + shard2.getIndexName(), + shard2.getId(), + oldNode2.getId(), + null, + true, + ShardRoutingState.STARTED, + allocationId2P + ) + ) + .addShard( + TestShardRouting.newShardRouting( + shard2.getIndexName(), + shard2.getId(), + oldNode1.getId(), + null, + false, + ShardRoutingState.STARTED, + allocationId2R + ) + ) + .build() + ) + ) + .build(); + ClusterState state = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().add(newNode1).add(newNode2).add(oldNode1).add(oldNode2)) + .build(); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Collections.singleton(new NodeVersionAllocationDecider(segmentReplicationSettings)) + ); + AllocationService strategy = new MockAllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + state = strategy.reroute(state, new AllocationCommands(), true, false).getClusterState(); + // the two indices must stay as is, the replicas cannot move to oldNode2 because versions don't match + assertThat(state.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); + assertThat( + state.routingTable().index(shard2.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).get(0).primary(), + equalTo(false) + ); + assertThat(state.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1)); + assertThat( + state.routingTable().index(shard1.getIndex()).shardsWithState(ShardRoutingState.RELOCATING).get(0).primary(), + equalTo(false) + ); + } + private ClusterState stabilize(ClusterState clusterState, AllocationService service) { logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes()); @@ -626,7 +771,7 @@ public void testMessages() { RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, null, 0); routingAllocation.debugDecision(true); - final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(); + final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(Settings.EMPTY); Decision decision = allocationDecider.canAllocate(primaryShard, newNode, routingAllocation); assertThat(decision.type(), is(Decision.Type.YES)); assertThat(decision.getExplanation(), is("the primary shard is new or already existed on the node")); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 3f7a998accafe..b5e067de4ed23 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -34,8 +34,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.Metadata.Builder; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java index ed178ed7e1526..0be1a1f36118d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsAllocateUnassignedTests.java @@ -13,11 +13,16 @@ import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.allocator.RemoteShardsBalancer; import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_NO; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; +import static org.opensearch.cluster.routing.UnassignedInfo.AllocationStatus.NO_ATTEMPT; + public class RemoteShardsAllocateUnassignedTests extends RemoteShardsBalancerBaseTestCase { /** @@ -89,6 +94,38 @@ public void testPrimaryAllocation() { } } + /** + * Test remote unassigned shard allocation when deciders make NO or THROTTLED decision. + */ + public void testNoRemoteAllocation() { + final int localOnlyNodes = 10; + final int remoteCapableNodes = 5; + final int localIndices = 2; + final int remoteIndices = 1; + final ClusterState oldState = createInitialCluster(localOnlyNodes, remoteCapableNodes, localIndices, remoteIndices); + final boolean throttle = randomBoolean(); + final AllocationService service = this.createRejectRemoteAllocationService(throttle); + final ClusterState newState = allocateShardsAndBalance(oldState, service); + final RoutingNodes routingNodes = newState.getRoutingNodes(); + final RoutingAllocation allocation = getRoutingAllocation(newState, routingNodes); + + assertEquals(totalShards(remoteIndices), routingNodes.unassigned().size()); + + for (ShardRouting shard : newState.getRoutingTable().allShards()) { + if (RoutingPool.getShardPool(shard, allocation) == RoutingPool.REMOTE_CAPABLE) { + assertTrue(shard.unassigned()); + if (shard.primary()) { + final UnassignedInfo.AllocationStatus expect = throttle ? DECIDERS_THROTTLED : DECIDERS_NO; + assertEquals(expect, shard.unassignedInfo().getLastAllocationStatus()); + } else { + assertEquals(NO_ATTEMPT, shard.unassignedInfo().getLastAllocationStatus()); + } + } else { + assertFalse(shard.unassigned()); + } + } + } + /** * Test remote unassigned shard allocation when remote capable nodes fail to come up. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index dbb08a999877d..6a03a1f79bcde 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -20,7 +20,9 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.RoutingNodes; +import org.opensearch.cluster.routing.RoutingPool; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; @@ -28,6 +30,7 @@ import org.opensearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -201,6 +204,41 @@ public AllocationService createRemoteCapableAllocationService(String excludeNode ); } + public AllocationService createRejectRemoteAllocationService(boolean throttle) { + Settings settings = Settings.Builder.EMPTY_SETTINGS; + return new OpenSearchAllocationTestCase.MockAllocationService( + createRejectRemoteAllocationDeciders(throttle), + new TestGatewayAllocator(), + createShardAllocator(settings), + EmptyClusterInfoService.INSTANCE, + SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES + ); + } + + public AllocationDeciders createRejectRemoteAllocationDeciders(boolean throttle) { + Settings settings = Settings.Builder.EMPTY_SETTINGS; + List<AllocationDecider> deciders = new ArrayList<>( + ClusterModule.createAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, Collections.emptyList()) + ); + deciders.add(new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shardRouting, allocation))) { + return throttle ? Decision.THROTTLE : Decision.NO; + } else { + return Decision.ALWAYS; + } + } + + @Override + public Decision canAllocateAnyShardToNode(RoutingNode node, RoutingAllocation allocation) { + return throttle ? Decision.THROTTLE : Decision.YES; + } + }); + Collections.shuffle(deciders, random()); + return new AllocationDeciders(deciders); + } + public AllocationDeciders createAllocationDeciders() { Settings settings = Settings.Builder.EMPTY_SETTINGS; return randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java index f2e79b319d0dd..b840b78eff448 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsMoveShardsTests.java @@ -47,7 +47,7 @@ public void testExcludeNodeIdMoveBlocked() { /** * Test move operations for index level allocation settings. - * Supported for local indices, not supported for remote indices. + * Supported for local indices and remote indices. */ public void testIndexLevelExclusions() throws InterruptedException { int localOnlyNodes = 7; @@ -102,8 +102,9 @@ public void testIndexLevelExclusions() throws InterruptedException { // No shard of updated local index should be on excluded local capable node assertTrue(routingTable.allShards(localIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedLocalOnlyNode))); - // Since remote index shards are untouched, at least one shard should - // continue to stay on the excluded remote capable node - assertTrue(routingTable.allShards(remoteIndex).stream().anyMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode))); + // No shard of updated remote index should be on excluded remote capable node + assertTrue( + routingTable.allShards(remoteIndex).stream().noneMatch(shard -> shard.currentNodeId().equals(excludedRemoteCapableNode)) + ); } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index ef9ae90e18bb5..e1c0a7eff1f6e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -21,7 +21,7 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe /** * Test remote shard allocation and balancing for standard new cluster setup. - * + * <p> * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { @@ -72,7 +72,7 @@ private int getTotalShardCountAcrossNodes(final Map<String, Integer> nodePrimari /** * Asserts that the expected value is within the variance range. - * + * <p> * Being used to assert the average number of shards per node. * Variance is required in case of non-absolute mean values; * for example, total number of remote capable nodes in a cluster. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index 8afb133cbf248..e2ad8b1cdbb7d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -36,13 +36,13 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.settings.Settings; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java index afc899469a19c..50c2b55e28f82 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -34,8 +34,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 22d148247a854..85865ce048723 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -49,9 +49,9 @@ import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING; public class RoutingNodesIntegrityTests extends OpenSearchAllocationTestCase { private final Logger logger = LogManager.getLogger(IndexBalanceTests.class); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 36b55a0954d32..7917d21635562 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.hamcrest.Matcher; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; @@ -46,16 +45,16 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; - -import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; -import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import org.hamcrest.Matcher; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; public class ShardsLimitAllocationTests extends OpenSearchAllocationTestCase { private final Logger logger = LogManager.getLogger(ShardsLimitAllocationTests.class); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 2180a14f5bf30..bde8a45359814 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -69,6 +69,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.EmptySnapshotsInfoService; @@ -405,6 +406,7 @@ public void testFileCacheRemoteShardsDecisions() { DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(remoteIndexSettings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0)) + .persistentSettings(Settings.builder().put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5).build()) .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java index d688c5a7edfb3..592de8215cd8f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -35,8 +35,8 @@ import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index 0b00d26182346..a8282faaddced 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -33,8 +33,8 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java index 8f2db5db969d2..052c7877404a8 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/TargetPoolAllocationDeciderTests.java @@ -200,4 +200,88 @@ public void testTargetPoolDedicatedSearchNodeAllocationDecisions() { assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(localIdx, localOnlyNode.node(), globalAllocation).type()); assertEquals(Decision.YES.type(), deciders.shouldAutoExpandToNode(remoteIdx, remoteCapableNode.node(), globalAllocation).type()); } + + public void testDebugMessage() { + ClusterState clusterState = createInitialCluster(3, 3, true, 2, 2); + AllocationService service = this.createRemoteCapableAllocationService(); + clusterState = allocateShardsAndBalance(clusterState, service); + + // Add an unassigned primary shard for force allocation checks + Metadata metadata = Metadata.builder(clusterState.metadata()) + .put(IndexMetadata.builder("test_local_unassigned").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + RoutingTable routingTable = RoutingTable.builder(clusterState.routingTable()) + .addAsNew(metadata.index("test_local_unassigned")) + .build(); + clusterState = ClusterState.builder(clusterState).metadata(metadata).routingTable(routingTable).build(); + + // Add remote index unassigned primary + clusterState = createRemoteIndex(clusterState, "test_remote_unassigned"); + + RoutingNodes defaultRoutingNodes = clusterState.getRoutingNodes(); + RoutingAllocation globalAllocation = getRoutingAllocation(clusterState, defaultRoutingNodes); + globalAllocation.setDebugMode(RoutingAllocation.DebugMode.ON); + + ShardRouting localShard = clusterState.routingTable() + .allShards(getIndexName(0, false)) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting remoteShard = clusterState.routingTable() + .allShards(getIndexName(0, true)) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting unassignedLocalShard = clusterState.routingTable() + .allShards("test_local_unassigned") + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + ShardRouting unassignedRemoteShard = clusterState.routingTable() + .allShards("test_remote_unassigned") + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .get(0); + IndexMetadata localIdx = globalAllocation.metadata().getIndexSafe(localShard.index()); + IndexMetadata remoteIdx = globalAllocation.metadata().getIndexSafe(remoteShard.index()); + String localNodeId = LOCAL_NODE_PREFIX; + for (RoutingNode routingNode : globalAllocation.routingNodes()) { + if (routingNode.nodeId().startsWith(LOCAL_NODE_PREFIX)) { + localNodeId = routingNode.nodeId(); + break; + } + } + String remoteNodeId = remoteShard.currentNodeId(); + RoutingNode localOnlyNode = defaultRoutingNodes.node(localNodeId); + RoutingNode remoteCapableNode = defaultRoutingNodes.node(remoteNodeId); + + TargetPoolAllocationDecider targetPoolAllocationDecider = new TargetPoolAllocationDecider(); + Decision decision = targetPoolAllocationDecider.canAllocate(localShard, remoteCapableNode, globalAllocation); + assertEquals( + "Routing pools are incompatible. Shard pool: [LOCAL_ONLY], node pool: [REMOTE_CAPABLE] without [data] role", + decision.getExplanation() + ); + + decision = targetPoolAllocationDecider.canAllocate(remoteShard, localOnlyNode, globalAllocation); + assertEquals("Routing pools are incompatible. Shard pool: [REMOTE_CAPABLE], node pool: [LOCAL_ONLY]", decision.getExplanation()); + + decision = targetPoolAllocationDecider.canAllocate(remoteShard, remoteCapableNode, globalAllocation); + assertEquals("Routing pools are compatible. Shard pool: [REMOTE_CAPABLE], node pool: [REMOTE_CAPABLE]", decision.getExplanation()); + + decision = targetPoolAllocationDecider.canAllocate(localIdx, remoteCapableNode, globalAllocation); + assertEquals( + "Routing pools are incompatible. Index pool: [LOCAL_ONLY], node pool: [REMOTE_CAPABLE] without [data] role", + decision.getExplanation() + ); + + decision = targetPoolAllocationDecider.canAllocate(remoteIdx, localOnlyNode, globalAllocation); + assertEquals("Routing pools are incompatible. Index pool: [REMOTE_CAPABLE], node pool: [LOCAL_ONLY]", decision.getExplanation()); + + decision = targetPoolAllocationDecider.canAllocate(remoteIdx, remoteCapableNode, globalAllocation); + assertEquals("Routing pools are compatible. Index pool: [REMOTE_CAPABLE], node pool: [REMOTE_CAPABLE]", decision.getExplanation()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java index 85cedef39b42a..b6140b0764191 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/ClusterSerializationTests.java @@ -39,8 +39,8 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; import org.opensearch.cluster.Diff; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.NamedDiff; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.RestoreInProgress; import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; @@ -51,11 +51,11 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; diff --git a/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java b/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java index e44964e713710..c978325917fe9 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/ClusterStateToStringTests.java @@ -42,9 +42,9 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.allocation.AllocationService; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import java.util.Arrays; @@ -80,7 +80,7 @@ public void testClusterStateSerialization() throws Exception { AllocationService strategy = createAllocationService(); clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build(); - String clusterStateString = Strings.toString(XContentType.JSON, clusterState); + String clusterStateString = Strings.toString(MediaTypeRegistry.JSON, clusterState); assertNotNull(clusterStateString); assertThat(clusterStateString, containsString("test_idx")); diff --git a/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java b/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java index ac2470e940916..968d6beedd802 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java @@ -37,9 +37,9 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.DiffableUtils.MapDiff; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.set.Sets; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index e6da768650088..c5ed505e6bbf2 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -51,8 +51,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index cc5debfae56f5..e25a0e0b2c3bf 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -22,17 +22,16 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - import static org.opensearch.test.ClusterServiceUtils.setState; /** diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java index ed1131a898ad9..4d88683826af7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java @@ -8,11 +8,11 @@ package org.opensearch.cluster.service; -import org.junit.After; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; +import org.junit.After; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 3c27748daa87d..85f6c129944fa 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -32,6 +32,8 @@ package org.opensearch.cluster.service; +import com.carrotsearch.randomizedtesting.annotations.Timeout; + import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.opensearch.OpenSearchException; @@ -59,8 +61,8 @@ import org.opensearch.common.util.concurrent.BaseFuture; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.node.Node; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -485,6 +487,9 @@ public void onFailure(String source, Exception e) { } }); assertBusy(mockAppender::assertAllExpectationsMatched); + // verify stats values after state is published + assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); + assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } } @@ -863,6 +868,7 @@ public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey( AtomicInteger throttledTask3 = new AtomicInteger(); AtomicInteger succeededTask1 = new AtomicInteger(); AtomicInteger succeededTask2 = new AtomicInteger(); + AtomicInteger succeededTask3 = new AtomicInteger(); AtomicInteger timedOutTask3 = new AtomicInteger(); final ClusterStateTaskListener listener = new ClusterStateTaskListener() { @@ -880,6 +886,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS succeededTask1.incrementAndGet(); } else if (source.equals(task2)) { succeededTask2.incrementAndGet(); + } else if (source.equals(task3)) { + succeededTask3.incrementAndGet(); } latch.countDown(); } @@ -955,7 +963,7 @@ public void run() { assertEquals(numberOfTask1, throttledTask1.get() + succeededTask1.get()); assertEquals(numberOfTask2, succeededTask2.get()); assertEquals(0, throttledTask2.get()); - assertEquals(numberOfTask3, throttledTask3.get() + timedOutTask3.get()); + assertEquals(numberOfTask3, throttledTask3.get() + timedOutTask3.get() + succeededTask3.get()); masterService.close(); } @@ -1378,6 +1386,76 @@ public void testDeprecatedMasterServiceUpdateTaskThreadName() { assertThrows(AssertionError.class, () -> MasterService.assertClusterManagerUpdateThread()); } + @Timeout(millis = 5_000) + public void testTaskTimeout() throws InterruptedException { + try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) { + final AtomicInteger failureCount = new AtomicInteger(); + final AtomicInteger successCount = new AtomicInteger(); + final CountDownLatch taskStartLatch = new CountDownLatch(1); + final CountDownLatch blockingTaskLatch = new CountDownLatch(1); + final CountDownLatch timeoutLatch = new CountDownLatch(1); + final ClusterStateTaskListener blockingListener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Exception e) { + fail("Unexpected failure"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + successCount.incrementAndGet(); + taskStartLatch.countDown(); + try { + blockingTaskLatch.await(); + } catch (InterruptedException e) { + fail("Interrupted"); + } + } + }; + final ClusterStateTaskListener timeoutListener = new ClusterStateTaskListener() { + @Override + public void onFailure(String source, Exception e) { + assertEquals("timeout", source); + failureCount.incrementAndGet(); + timeoutLatch.countDown(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail("Unexpected success"); + } + }; + + final ClusterStateTaskExecutor<Object> executor = (currentState, tasks) -> ClusterStateTaskExecutor.ClusterTasksResult.builder() + .successes(tasks) + .build(currentState); + + // start a task and wait for it to start and block on the clusterStateProcessed callback + clusterManagerService.submitStateUpdateTask( + "success", + new Object(), + ClusterStateTaskConfig.build(randomFrom(Priority.values())), + executor, + blockingListener + ); + taskStartLatch.await(); + + // start a second task that is guaranteed to timeout as the first task is still running + clusterManagerService.submitStateUpdateTask( + "timeout", + new Object(), + ClusterStateTaskConfig.build(randomFrom(Priority.values()), TimeValue.timeValueMillis(1L)), + executor, + timeoutListener + ); + + // wait for the timeout to happen, then unblock and assert one success and one failure + timeoutLatch.await(); + blockingTaskLatch.countDown(); + assertEquals(1, failureCount.get()); + assertEquals(1, successCount.get()); + } + } + /** * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ diff --git a/server/src/test/java/org/opensearch/cluster/service/TaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/service/TaskExecutorTests.java index 64a828446cf6f..28905c570ebfe 100644 --- a/server/src/test/java/org/opensearch/cluster/service/TaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/TaskExecutorTests.java @@ -35,11 +35,11 @@ import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; -import org.opensearch.common.lease.Releasable; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 954be34695009..86adfc1279d9a 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -56,8 +56,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.index.IndexModule; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexModule; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.threadpool.TestThreadPool; @@ -71,13 +71,13 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; public class RoutingIteratorTests extends OpenSearchAllocationTestCase { public void testEmptyIterator() { diff --git a/server/src/test/java/org/opensearch/common/BooleansTests.java b/server/src/test/java/org/opensearch/common/BooleansTests.java deleted file mode 100644 index 7e4a0ad8e456b..0000000000000 --- a/server/src/test/java/org/opensearch/common/BooleansTests.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Locale; - -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; - -public class BooleansTests extends OpenSearchTestCase { - private static final String[] NON_BOOLEANS = new String[] { - "11", - "00", - "sdfsdfsf", - "F", - "T", - "on", - "off", - "yes", - "no", - "0", - "1", - "True", - "False" }; - private static final String[] BOOLEANS = new String[] { "true", "false" }; - - public void testIsBoolean() { - for (String b : BOOLEANS) { - String t = "prefix" + b + "suffix"; - assertTrue("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), b.length())); - assertTrue("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(b)); - } - } - - public void testIsNonBoolean() { - assertThat(Booleans.isBoolean(null, 0, 1), is(false)); - - for (String nb : NON_BOOLEANS) { - String t = "prefix" + nb + "suffix"; - assertFalse("recognized [" + nb + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), nb.length())); - assertFalse("recognized [" + nb + "] as boolean", Booleans.isBoolean(t)); - } - } - - public void testParseBooleanWithFallback() { - assertFalse(Booleans.parseBoolean(null, false)); - assertTrue(Booleans.parseBoolean(null, true)); - assertNull(Booleans.parseBoolean(null, null)); - assertFalse(Booleans.parseBoolean(null, Boolean.FALSE)); - assertTrue(Booleans.parseBoolean(null, Boolean.TRUE)); - - assertTrue(Booleans.parseBoolean("true", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); - assertFalse(Booleans.parseBoolean("false", randomFrom(Boolean.TRUE, Boolean.FALSE, null))); - } - - public void testParseNonBooleanWithFallback() { - for (String nonBoolean : NON_BOOLEANS) { - boolean defaultValue = randomFrom(Boolean.TRUE, Boolean.FALSE); - - expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(nonBoolean, defaultValue)); - expectThrows( - IllegalArgumentException.class, - () -> Booleans.parseBoolean(nonBoolean.toCharArray(), 0, nonBoolean.length(), defaultValue) - ); - } - } - - public void testParseBoolean() { - assertTrue(Booleans.parseBoolean("true")); - assertFalse(Booleans.parseBoolean("false")); - } - - public void testParseNonBoolean() { - expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(null)); - for (String nonBoolean : NON_BOOLEANS) { - expectThrows(IllegalArgumentException.class, () -> Booleans.parseBoolean(nonBoolean)); - } - } - - public void testIsBooleanLenient() { - String[] booleans = new String[] { "true", "false", "on", "off", "yes", "no", "0", "1" }; - String[] notBooleans = new String[] { "11", "00", "sdfsdfsf", "F", "T" }; - assertThat(Booleans.isBooleanLenient(null, 0, 1), is(false)); - - for (String b : booleans) { - String t = "prefix" + b + "suffix"; - assertTrue( - "failed to recognize [" + b + "] as boolean", - Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), b.length()) - ); - } - - for (String nb : notBooleans) { - String t = "prefix" + nb + "suffix"; - assertFalse("recognized [" + nb + "] as boolean", Booleans.isBooleanLenient(t.toCharArray(), "prefix".length(), nb.length())); - } - } - - public void testParseBooleanLenient() { - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomBoolean()), is(false)); - assertThat(Booleans.parseBooleanLenient(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), randomBoolean()), is(true)); - assertThat(Booleans.parseBooleanLenient(null, false), is(false)); - assertThat(Booleans.parseBooleanLenient(null, true), is(true)); - - assertThat( - Booleans.parseBooleanLenient(randomFrom("true", "on", "yes", "1"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(true) - ); - assertThat( - Booleans.parseBooleanLenient(randomFrom("false", "off", "no", "0"), randomFrom(Boolean.TRUE, Boolean.FALSE, null)), - is(false) - ); - assertThat( - Booleans.parseBooleanLenient( - randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), - randomFrom(Boolean.TRUE, Boolean.FALSE, null) - ), - is(true) - ); - assertThat(Booleans.parseBooleanLenient(null, Boolean.FALSE), is(false)); - assertThat(Booleans.parseBooleanLenient(null, Boolean.TRUE), is(true)); - assertThat(Booleans.parseBooleanLenient(null, null), nullValue()); - - char[] chars = randomFrom("true", "on", "yes", "1").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); - chars = randomFrom("false", "off", "no", "0").toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(false)); - chars = randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT).toCharArray(); - assertThat(Booleans.parseBooleanLenient(chars, 0, chars.length, randomBoolean()), is(true)); - } -} diff --git a/server/src/test/java/org/opensearch/common/ChannelsTests.java b/server/src/test/java/org/opensearch/common/ChannelsTests.java index 801ac5bae48c5..23d3784b89d66 100644 --- a/server/src/test/java/org/opensearch/common/ChannelsTests.java +++ b/server/src/test/java/org/opensearch/common/ChannelsTests.java @@ -32,9 +32,9 @@ package org.opensearch.common; +import org.opensearch.common.io.Channels; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.Channels; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/common/NumbersTests.java b/server/src/test/java/org/opensearch/common/NumbersTests.java index 37518e373618a..7990ba74f162a 100644 --- a/server/src/test/java/org/opensearch/common/NumbersTests.java +++ b/server/src/test/java/org/opensearch/common/NumbersTests.java @@ -33,6 +33,7 @@ package org.opensearch.common; import com.carrotsearch.randomizedtesting.annotations.Timeout; + import org.opensearch.test.OpenSearchTestCase; import java.math.BigDecimal; @@ -220,4 +221,25 @@ public void testToUnsignedBigInteger() { assertEquals(random, Numbers.toUnsignedBigInteger(random.longValue())); assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE, Numbers.toUnsignedBigInteger(Numbers.MAX_UNSIGNED_LONG_VALUE.longValue())); } + + public void testNextPowerOfTwo() { + // Negative values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(-500000, -1); + assertEquals(1, Numbers.nextPowerOfTwo(value)); + } + + // Zero value: + assertEquals(1, Numbers.nextPowerOfTwo(0L)); + + // Positive values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(1, 500000); + long nextPowerOfTwo = Numbers.nextPowerOfTwo(value); + + assertTrue(nextPowerOfTwo > value); // must be strictly greater + assertTrue((nextPowerOfTwo >>> 1) <= value); // must be greater by no more than one power of two + assertEquals(0, nextPowerOfTwo & (nextPowerOfTwo - 1)); // must be a power of two + } + } } diff --git a/server/src/test/java/org/opensearch/common/ReleasablesTests.java b/server/src/test/java/org/opensearch/common/ReleasablesTests.java index 4b5699418d430..d9f561d079054 100644 --- a/server/src/test/java/org/opensearch/common/ReleasablesTests.java +++ b/server/src/test/java/org/opensearch/common/ReleasablesTests.java @@ -31,9 +31,9 @@ package org.opensearch.common; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index e0c44e3516e7b..9a6e6a6ac54d0 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -33,7 +33,6 @@ package org.opensearch.common; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.DateTimeUnit; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.unit.TimeValue; @@ -236,10 +235,10 @@ public void testOffsetRounding() { /** * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link ZoneId} and often (50% of the time) + * {@link org.opensearch.common.Rounding.DateTimeUnit} and {@link ZoneId} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + * <p> * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in diff --git a/server/src/test/java/org/opensearch/common/RoundingWireTests.java b/server/src/test/java/org/opensearch/common/RoundingWireTests.java index 6f6e2a95950e5..4e51c0457b564 100644 --- a/server/src/test/java/org/opensearch/common/RoundingWireTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingWireTests.java @@ -33,8 +33,8 @@ package org.opensearch.common; import org.opensearch.common.Rounding.DateTimeUnit; -import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.test.AbstractWireSerializingTestCase; public class RoundingWireTests extends AbstractWireSerializingTestCase<Rounding> { diff --git a/server/src/test/java/org/opensearch/common/StringsTests.java b/server/src/test/java/org/opensearch/common/StringsTests.java deleted file mode 100644 index 50f7be8be170d..0000000000000 --- a/server/src/test/java/org/opensearch/common/StringsTests.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common; - -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Collections; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -public class StringsTests extends OpenSearchTestCase { - - public void testIsAllOrWildCardString() { - assertThat(Strings.isAllOrWildcard("_all"), is(true)); - assertThat(Strings.isAllOrWildcard("*"), is(true)); - assertThat(Strings.isAllOrWildcard("foo"), is(false)); - assertThat(Strings.isAllOrWildcard(""), is(false)); - assertThat(Strings.isAllOrWildcard((String) null), is(false)); - } - - public void testSubstring() { - assertEquals(null, Strings.substring(null, 0, 1000)); - assertEquals("foo", Strings.substring("foo", 0, 1000)); - assertEquals("foo", Strings.substring("foo", 0, 3)); - assertEquals("oo", Strings.substring("foo", 1, 3)); - assertEquals("oo", Strings.substring("foo", 1, 100)); - assertEquals("f", Strings.substring("foo", 0, 1)); - } - - public void testCleanTruncate() { - assertEquals(null, Strings.cleanTruncate(null, 10)); - assertEquals("foo", Strings.cleanTruncate("foo", 10)); - assertEquals("foo", Strings.cleanTruncate("foo", 3)); - // Throws out high surrogates - assertEquals("foo", Strings.cleanTruncate("foo\uD83D\uDEAB", 4)); - // But will keep the whole character - assertEquals("foo\uD83D\uDEAB", Strings.cleanTruncate("foo\uD83D\uDEAB", 5)); - /* - * Doesn't take care around combining marks. This example has its - * meaning changed because that last codepoint is supposed to combine - * backwards into the find "o" and be represented as the "o" with a - * circle around it with a slash through it. As in "no 'o's allowed - * here. - */ - assertEquals("o", Strings.cleanTruncate("o\uD83D\uDEAB", 1)); - assertEquals("", Strings.cleanTruncate("foo", 0)); - } - - public void testToStringToXContent() { - final ToXContent toXContent; - final boolean error; - if (randomBoolean()) { - if (randomBoolean()) { - error = false; - toXContent = (builder, params) -> builder.field("ok", "here").field("catastrophe", ""); - } else { - error = true; - toXContent = (builder, params) -> builder.startObject().field("ok", "here").field("catastrophe", "").endObject(); - } - } else { - if (randomBoolean()) { - error = false; - toXContent = (ToXContentObject) (builder, params) -> builder.startObject() - .field("ok", "here") - .field("catastrophe", "") - .endObject(); - } else { - error = true; - toXContent = (ToXContentObject) (builder, params) -> builder.field("ok", "here").field("catastrophe", ""); - } - } - - String toString = Strings.toString(XContentType.JSON, toXContent); - if (error) { - assertThat(toString, containsString("\"error\":\"error building toString out of XContent:")); - assertThat(toString, containsString("\"stack_trace\":")); - } else { - assertThat(toString, containsString("\"ok\":\"here\"")); - assertThat(toString, containsString("\"catastrophe\":\"\"")); - } - } - - public void testToStringToXContentWithOrWithoutParams() { - ToXContent toXContent = (builder, params) -> builder.field("color_from_param", params.param("color", "red")); - // Rely on the default value of "color" param when params are not passed - assertThat(Strings.toString(XContentType.JSON, toXContent), containsString("\"color_from_param\":\"red\"")); - // Pass "color" param explicitly - assertThat( - Strings.toString(XContentType.JSON, toXContent, new ToXContent.MapParams(Collections.singletonMap("color", "blue"))), - containsString("\"color_from_param\":\"blue\"") - ); - } -} diff --git a/server/src/test/java/org/opensearch/common/UUIDTests.java b/server/src/test/java/org/opensearch/common/UUIDTests.java index a71b68bfe859e..1db6d2b415147 100644 --- a/server/src/test/java/org/opensearch/common/UUIDTests.java +++ b/server/src/test/java/org/opensearch/common/UUIDTests.java @@ -45,8 +45,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.TestUtil; import org.opensearch.common.lucene.Lucene; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java new file mode 100644 index 0000000000000..1780819390052 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.UnaryOperator; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AsyncMultiStreamEncryptedBlobContainerTests extends OpenSearchTestCase { + + // Tests the happy path scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsync() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler<Object, Object> cryptoHandler = mock(CryptoHandler.class); + Object cryptoContext = mock(Object.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenReturn(cryptoContext); + when(cryptoHandler.estimateDecryptedLength(any(), anyLong())).thenReturn((long) size); + long[] adjustedRanges = { 0, size - 1 }; + DecryptedRangedStreamProvider rangedStreamProvider = new DecryptedRangedStreamProvider(adjustedRanges, UnaryOperator.identity()); + when(cryptoHandler.createDecryptingStreamOfRange(eq(cryptoContext), anyLong(), anyLong())).thenReturn(rangedStreamProvider); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener<ReadContext> completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final CompletableFuture<InputStreamContainer> streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + + Mockito.doAnswer(invocation -> { + ActionListener<ReadContext> readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer<Object, Object> asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + ReadContext response = completionListener.getResponse(); + assertEquals(0, completionListener.getFailureCount()); + assertEquals(1, completionListener.getResponseCount()); + assertNull(completionListener.getException()); + + assertTrue(response instanceof AsyncMultiStreamEncryptedBlobContainer.DecryptedReadContext); + assertEquals(1, response.getNumberOfParts()); + assertEquals(size, response.getBlobSize()); + + InputStreamContainer responseContainer = response.getPartStreams().get(0).get().join(); + assertEquals(0, responseContainer.getOffset()); + assertEquals(size, responseContainer.getContentLength()); + assertEquals(100, responseContainer.getInputStream().available()); + } + + // Tests the exception scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsyncException() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler<Object, Object> cryptoHandler = mock(CryptoHandler.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenThrow(new IOException()); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener<ReadContext> completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final CompletableFuture<InputStreamContainer> streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + + Mockito.doAnswer(invocation -> { + ActionListener<ReadContext> readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer<Object, Object> asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + assertEquals(1, completionListener.getFailureCount()); + assertEquals(0, completionListener.getResponseCount()); + assertNull(completionListener.getResponse()); + assertTrue(completionListener.getException() instanceof IOException); + } + +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java index 4a2eeabeb7e58..f8a8f9cefacf5 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterSeekableByteChannel; import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.action.ActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -42,6 +41,7 @@ import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.io.Streams; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java new file mode 100644 index 0000000000000..f2a758b9bbe10 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.UUID; +import java.util.function.UnaryOperator; + +public class FilePartWriterTests extends OpenSearchTestCase { + + private Path path; + + @Before + public void init() throws Exception { + path = createTempDir("FilePartWriterTests"); + } + + public void testFilePartWriter() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 100; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength, Files.size(segmentFilePath)); + } + + public void testFilePartWriterWithOffset() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 100; + int offset = 10; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), offset); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength + offset, Files.size(segmentFilePath)); + } + + public void testFilePartWriterLargeInput() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 20 * 1024 * 1024; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength, Files.size(segmentFilePath)); + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java new file mode 100644 index 0000000000000..a3a32f6db2148 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.core.action.ActionListener; + +/** + * Utility class containing common functionality for read listener based tests + */ +public class ListenerTestUtils { + + /** + * CountingCompletionListener acts as a verification instance for wrapping listener based calls. + * Keeps track of the last response, failure and count of response and failure invocations. + */ + public static class CountingCompletionListener<T> implements ActionListener<T> { + private int responseCount; + private int failureCount; + private T response; + private Exception exception; + + @Override + public void onResponse(T response) { + this.response = response; + responseCount++; + } + + @Override + public void onFailure(Exception e) { + exception = e; + failureCount++; + } + + public int getResponseCount() { + return responseCount; + } + + public int getFailureCount() { + return failureCount; + } + + public T getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java new file mode 100644 index 0000000000000..0163c2275e7f4 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -0,0 +1,229 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.function.UnaryOperator; + +import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; + +/* + WindowsFS tries to simulate file handles in a best case simulation. + The deletion for the open file on an actual Windows system will be performed as soon as the last handle + is closed, which this simulation does not account for. Preventing use of WindowsFS for these tests. + */ +@SuppressFileSystems("WindowsFS") +public class ReadContextListenerTests extends OpenSearchTestCase { + + private Path path; + private static ThreadPool threadPool; + private static final int NUMBER_OF_PARTS = 5; + private static final int PART_SIZE = 10; + private static final String TEST_SEGMENT_FILE = "test_segment_file"; + private static final int MAX_CONCURRENT_STREAMS = 10; + + @BeforeClass + public static void setup() { + threadPool = new TestThreadPool(ReadContextListenerTests.class.getName()); + } + + @AfterClass + public static void cleanup() { + threadPool.shutdown(); + } + + @Before + public void init() throws Exception { + path = createTempDir("ReadContextListenerTests"); + } + + public void testReadContextListener() throws InterruptedException, IOException { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + List<ReadContext.StreamPartCreator> blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener<String> completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ReadContext readContext = new ReadContext((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + + assertTrue(Files.exists(fileLocation)); + assertEquals(NUMBER_OF_PARTS * PART_SIZE, Files.size(fileLocation)); + } + + public void testReadContextListenerFailure() throws Exception { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + List<ReadContext.StreamPartCreator> blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener<String> completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + InputStream badInputStream = new InputStream() { + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return read(); + } + + @Override + public int read() throws IOException { + throw new IOException(); + } + + @Override + public int available() { + return PART_SIZE; + } + }; + + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertFalse(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + public void testReadContextListenerException() { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + CountingCompletionListener<String> listener = new CountingCompletionListener<String>(); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + listener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + IOException exception = new IOException(); + readContextListener.onFailure(exception); + assertEquals(1, listener.getFailureCount()); + assertEquals(exception, listener.getException()); + } + + public void testWriteToTempFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + List<ReadContext.StreamPartCreator> blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener<String> completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ByteArrayInputStream assertingStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)) { + @Override + public int read(byte[] b) throws IOException { + assertTrue("parts written to temp file location", Files.exists(readContextListener.getTmpFileLocation())); + return super.read(b); + } + }; + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(assertingStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS + 1, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + public void testWriteToTempFile_alreadyExists_replacesFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + // create an empty file at location. + Files.createFile(fileLocation); + assertEquals(0, Files.readAllBytes(fileLocation).length); + List<ReadContext.StreamPartCreator> blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener<String> completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertEquals(50, Files.readAllBytes(fileLocation).length); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + private List<ReadContext.StreamPartCreator> initializeBlobPartStreams() { + List<ReadContext.StreamPartCreator> blobPartStreams = new ArrayList<>(); + for (int partNumber = 0; partNumber < NUMBER_OF_PARTS; partNumber++) { + InputStream testStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)); + int finalPartNumber = partNumber; + blobPartStreams.add( + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(testStream, PART_SIZE, (long) finalPartNumber * PART_SIZE), + threadPool.generic() + ) + ); + } + return blobPartStreams; + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java index 48940a0d401fd..074f659850c7b 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainerTests.java @@ -8,21 +8,36 @@ package org.opensearch.common.blobstore.transfer; -import org.junit.Before; -import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.io.InputStreamContainer; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.RateLimiter; import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeFileInputStream; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.blobstore.transfer.stream.RateLimitingOffsetRangeInputStream; import org.opensearch.common.blobstore.transfer.stream.ResettableCheckedInputStream; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; +import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.Arrays; import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; public class RemoteTransferContainerTests extends OpenSearchTestCase { @@ -92,25 +107,37 @@ private void testSupplyStreamContext( int partCount = streamContext.getNumberOfParts(); assertEquals(expectedPartCount, partCount); Thread[] threads = new Thread[partCount]; + InputStream[] streams = new InputStream[partCount]; long totalContentLength = remoteTransferContainer.getContentLength(); assert partSize * (partCount - 1) + lastPartSize == totalContentLength : "part sizes and last part size don't add up to total content length"; logger.info("partSize: {}, lastPartSize: {}, partCount: {}", partSize, lastPartSize, streamContext.getNumberOfParts()); - for (int partIdx = 0; partIdx < partCount; partIdx++) { - int finalPartIdx = partIdx; - long expectedPartSize = (partIdx == partCount - 1) ? lastPartSize : partSize; - threads[partIdx] = new Thread(() -> { + try { + for (int partIdx = 0; partIdx < partCount; partIdx++) { + int finalPartIdx = partIdx; + long expectedPartSize = (partIdx == partCount - 1) ? lastPartSize : partSize; + threads[partIdx] = new Thread(() -> { + try { + InputStreamContainer inputStreamContainer = streamContext.provideStream(finalPartIdx); + streams[finalPartIdx] = inputStreamContainer.getInputStream(); + assertEquals(expectedPartSize, inputStreamContainer.getContentLength()); + } catch (IOException e) { + fail("IOException during stream creation"); + } + }); + threads[partIdx].start(); + } + for (int i = 0; i < partCount; i++) { + threads[i].join(); + } + } finally { + Arrays.stream(streams).forEach(stream -> { try { - InputStreamContainer inputStreamContainer = streamContext.provideStream(finalPartIdx); - assertEquals(expectedPartSize, inputStreamContainer.getContentLength()); + stream.close(); } catch (IOException e) { - fail("IOException during stream creation"); + throw new RuntimeException(e); } }); - threads[partIdx].start(); - } - for (int i = 0; i < partCount; i++) { - threads[i].join(); } } @@ -182,6 +209,7 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { } private void testTypeOfProvidedStreams(boolean isRemoteDataIntegritySupported) throws IOException { + InputStream inputStream = null; try ( RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( testFile.getFileName().toString(), @@ -201,12 +229,132 @@ public OffsetRangeInputStream get(long size, long position) throws IOException { ) { StreamContext streamContext = remoteTransferContainer.supplyStreamContext(16); InputStreamContainer inputStreamContainer = streamContext.provideStream(0); + inputStream = inputStreamContainer.getInputStream(); if (shouldOffsetInputStreamsBeChecked(isRemoteDataIntegritySupported)) { assertTrue(inputStreamContainer.getInputStream() instanceof ResettableCheckedInputStream); } else { assertTrue(inputStreamContainer.getInputStream() instanceof OffsetRangeInputStream); } assertThrows(RuntimeException.class, () -> remoteTransferContainer.supplyStreamContext(16)); + } finally { + if (inputStream != null) { + inputStream.close(); + } + } + } + + public void testCloseDuringOngoingReadOnStream() throws IOException, InterruptedException { + Supplier<RateLimiter> rateLimiterSupplier = Mockito.mock(Supplier.class); + Mockito.when(rateLimiterSupplier.get()).thenReturn(null); + CountDownLatch readInvokedLatch = new CountDownLatch(1); + AtomicBoolean readAfterClose = new AtomicBoolean(); + CountDownLatch streamClosed = new CountDownLatch(1); + AtomicBoolean indexInputClosed = new AtomicBoolean(); + AtomicInteger closedCount = new AtomicInteger(); + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + testFile.getFileName().toString(), + testFile.getFileName().toString(), + TEST_FILE_SIZE_BYTES, + true, + WritePriority.NORMAL, + new RemoteTransferContainer.OffsetRangeInputStreamSupplier() { + @Override + public OffsetRangeInputStream get(long size, long position) throws IOException { + IndexInput indexInput = Mockito.mock(IndexInput.class); + Mockito.doAnswer(invocation -> { + indexInputClosed.set(true); + closedCount.incrementAndGet(); + return null; + }).when(indexInput).close(); + Mockito.when(indexInput.getFilePointer()).thenAnswer((Answer<Long>) invocation -> { + if (readAfterClose.get() == false) { + return 0L; + } + readInvokedLatch.countDown(); + boolean closedSuccess = streamClosed.await(30, TimeUnit.SECONDS); + assertTrue(closedSuccess); + assertFalse(indexInputClosed.get()); + return 0L; + }); + + OffsetRangeIndexInputStream offsetRangeIndexInputStream = new OffsetRangeIndexInputStream( + indexInput, + size, + position + ); + return new RateLimitingOffsetRangeInputStream(offsetRangeIndexInputStream, rateLimiterSupplier, null); + } + }, + 0, + true + ) + ) { + StreamContext streamContext = remoteTransferContainer.supplyStreamContext(16); + InputStreamContainer inputStreamContainer = streamContext.provideStream(0); + assertTrue(inputStreamContainer.getInputStream() instanceof RateLimitingOffsetRangeInputStream); + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + try { + readAfterClose.set(true); + inputStreamContainer.getInputStream().readAllBytes(); + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + latch.countDown(); + } + }).start(); + boolean successReadWait = readInvokedLatch.await(30, TimeUnit.SECONDS); + assertTrue(successReadWait); + // Closing stream here. Test Multiple invocations of close. Shouldn't throw any exception + inputStreamContainer.getInputStream().close(); + inputStreamContainer.getInputStream().close(); + inputStreamContainer.getInputStream().close(); + streamClosed.countDown(); + boolean processed = latch.await(30, TimeUnit.SECONDS); + assertTrue(processed); + assertTrue(readAfterClose.get()); + assertTrue(indexInputClosed.get()); + + // Test Multiple invocations of close. Close count should always be 1. + inputStreamContainer.getInputStream().close(); + inputStreamContainer.getInputStream().close(); + inputStreamContainer.getInputStream().close(); + assertEquals(1, closedCount.get()); + + } + } + + public void testReadAccessWhenStreamClosed() throws IOException { + Supplier<RateLimiter> rateLimiterSupplier = Mockito.mock(Supplier.class); + Mockito.when(rateLimiterSupplier.get()).thenReturn(null); + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + testFile.getFileName().toString(), + testFile.getFileName().toString(), + TEST_FILE_SIZE_BYTES, + true, + WritePriority.NORMAL, + new RemoteTransferContainer.OffsetRangeInputStreamSupplier() { + @Override + public OffsetRangeInputStream get(long size, long position) throws IOException { + IndexInput indexInput = Mockito.mock(IndexInput.class); + OffsetRangeIndexInputStream offsetRangeIndexInputStream = new OffsetRangeIndexInputStream( + indexInput, + size, + position + ); + return new RateLimitingOffsetRangeInputStream(offsetRangeIndexInputStream, rateLimiterSupplier, null); + } + }, + 0, + true + ) + ) { + StreamContext streamContext = remoteTransferContainer.supplyStreamContext(16); + InputStreamContainer inputStreamContainer = streamContext.provideStream(0); + inputStreamContainer.getInputStream().close(); + assertThrows(AlreadyClosedException.class, () -> inputStreamContainer.getInputStream().readAllBytes()); } } diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java new file mode 100644 index 0000000000000..fc2eba4c35e2a --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.store.RateLimiter; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +public class RateLimitingOffsetRangeInputStreamTests extends ResettableCheckedInputStreamBaseTest { + + private Directory directory; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + directory = new NIOFSDirectory(testFile.getParent()); + } + + @Override + protected OffsetRangeInputStream getOffsetRangeInputStream(long size, long position) throws IOException { + return new RateLimitingOffsetRangeInputStream( + new OffsetRangeIndexInputStream(directory.openInput(testFile.getFileName().toString(), IOContext.DEFAULT), size, position), + () -> new RateLimiter.SimpleRateLimiter(randomIntBetween(10, 20)), + (t) -> {} + ); + } + + @Override + @After + public void tearDown() throws Exception { + directory.close(); + super.tearDown(); + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStreamBaseTest.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStreamBaseTest.java index 07e86cd64524f..77763bfc78f26 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStreamBaseTest.java +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/ResettableCheckedInputStreamBaseTest.java @@ -8,9 +8,9 @@ package org.opensearch.common.blobstore.transfer.stream; +import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; -import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java index 02b99c3c4f649..f05e293c33269 100644 --- a/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java @@ -10,9 +10,9 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.hamcrest.Matchers; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.hamcrest.Matchers; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/server/src/test/java/org/opensearch/common/bytes/BytesArrayTests.java b/server/src/test/java/org/opensearch/common/bytes/BytesArrayTests.java index 754e51cb5d130..b7b03d2292e92 100644 --- a/server/src/test/java/org/opensearch/common/bytes/BytesArrayTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/BytesArrayTests.java @@ -32,9 +32,9 @@ package org.opensearch.common.bytes; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.hamcrest.Matchers; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/bytes/CompositeBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/CompositeBytesReferenceTests.java index 2a692d345b4f9..2b9294873d012 100644 --- a/server/src/test/java/org/opensearch/common/bytes/CompositeBytesReferenceTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/CompositeBytesReferenceTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.util.BytesRefIterator; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; -import org.hamcrest.Matchers; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/common/bytes/PagedBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/PagedBytesReferenceTests.java index 4ea35b2a0175f..5cbc1ea9577c4 100644 --- a/server/src/test/java/org/opensearch/common/bytes/PagedBytesReferenceTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/PagedBytesReferenceTests.java @@ -33,11 +33,11 @@ package org.opensearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.common.util.ByteArray; -import org.hamcrest.Matchers; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.PagedBytesReference; +import org.opensearch.core.common.util.ByteArray; +import org.hamcrest.Matchers; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/bytes/RecyclingBytesStreamOutputTests.java b/server/src/test/java/org/opensearch/common/bytes/RecyclingBytesStreamOutputTests.java index 920684f06f83c..ede5658f0decd 100644 --- a/server/src/test/java/org/opensearch/common/bytes/RecyclingBytesStreamOutputTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/RecyclingBytesStreamOutputTests.java @@ -36,7 +36,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/bytes/ReleasableBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/ReleasableBytesReferenceTests.java index 0310ccb283635..169c17c96f817 100644 --- a/server/src/test/java/org/opensearch/common/bytes/ReleasableBytesReferenceTests.java +++ b/server/src/test/java/org/opensearch/common/bytes/ReleasableBytesReferenceTests.java @@ -34,11 +34,11 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; -import org.opensearch.core.common.util.ByteArray; -import org.hamcrest.Matchers; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.core.common.util.ByteArray; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java new file mode 100644 index 0000000000000..35d7877343909 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/module/CacheModuleTests.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.module; + +import org.opensearch.common.cache.ICache; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheModuleTests extends OpenSearchTestCase { + + public void testWithMultiplePlugins() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache1", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache2", factory2)); + + CacheModule cacheModule = new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY); + + Map<String, ICache.Factory> factoryMap = cacheModule.getCacheStoreTypeFactories(); + assertEquals(factoryMap.get("cache1"), factory1); + assertEquals(factoryMap.get("cache2"), factory2); + } + + public void testWithSameCacheStoreTypeAndName() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + CachePlugin mockPlugin2 = mock(CachePlugin.class); + ICache.Factory factory2 = mock(ICache.Factory.class); + when(factory1.getCacheName()).thenReturn("cache"); + when(factory2.getCacheName()).thenReturn("cache"); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(Map.of("cache", factory1)); + when(mockPlugin2.getCacheFactoryMap()).thenReturn(Map.of("cache", factory2)); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new CacheModule(List.of(mockPlugin1, mockPlugin2), Settings.EMPTY) + ); + assertEquals("Cache name: cache is already registered", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java new file mode 100644 index 0000000000000..b355161f6f310 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/service/CacheServiceTests.java @@ -0,0 +1,154 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.plugins.CachePlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CacheServiceTests extends OpenSearchTestCase { + + public void testWithCreateCacheForIndicesRequestCacheType() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").build() + ); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrue() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache1").put(FeatureFlags.PLUGGABLE_CACHE, "true").build() + ); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheForIndicesRequestCacheTypeWithFeatureFlagTrueAndStoreNameIsNull() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class); + Map<String, ICache.Factory> factoryMap = Map.of( + "cache1", + factory1, + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + onHeapCacheFactory + ); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + CacheService cacheService = new CacheService(factoryMap, Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build()); + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> mockOnHeapCache = mock(OpenSearchOnHeapCache.class); + when(onHeapCacheFactory.create(eq(config), eq(CacheType.INDICES_REQUEST_CACHE), any(Map.class))).thenReturn(mockOnHeapCache); + + ICache<String, String> ircCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertEquals(mockOnHeapCache, ircCache); + } + + public void testWithCreateCacheWithNoStoreNamePresentForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService(factoryMap, Settings.builder().build()); + + CacheConfig<String, String> config = mock(CacheConfig.class); + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No store name: [opensearch_onheap] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } + + public void testWithCreateCacheWithDefaultStoreNameForIRC() { + CachePlugin mockPlugin1 = mock(CachePlugin.class); + ICache.Factory factory1 = mock(ICache.Factory.class); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + when(mockPlugin1.getCacheFactoryMap()).thenReturn(factoryMap); + + CacheModule cacheModule = new CacheModule(List.of(mockPlugin1), Settings.EMPTY); + CacheConfig<String, String> config = mock(CacheConfig.class); + when(config.getSettings()).thenReturn(Settings.EMPTY); + when(config.getWeigher()).thenReturn((k, v) -> 100); + when(config.getRemovalListener()).thenReturn(mock(RemovalListener.class)); + + CacheService cacheService = cacheModule.getCacheService(); + ICache<String, String> iCache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + assertTrue(iCache instanceof OpenSearchOnHeapCache); + } + + public void testWithCreateCacheWithInvalidStoreNameAssociatedForCacheType() { + ICache.Factory factory1 = mock(ICache.Factory.class); + Setting<String> indicesRequestCacheSetting = CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE); + Map<String, ICache.Factory> factoryMap = Map.of("cache1", factory1); + CacheService cacheService = new CacheService( + factoryMap, + Settings.builder().put(indicesRequestCacheSetting.getKey(), "cache").build() + ); + + CacheConfig<String, String> config = mock(CacheConfig.class); + ICache<String, String> onHeapCache = mock(OpenSearchOnHeapCache.class); + when(factory1.create(config, CacheType.INDICES_REQUEST_CACHE, factoryMap)).thenReturn(onHeapCache); + + IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE) + ); + assertEquals("No store name: [opensearch_onheap] is registered for cache type: INDICES_REQUEST_CACHE", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/opensearch/common/collect/EvictingQueueTests.java b/server/src/test/java/org/opensearch/common/collect/EvictingQueueTests.java index 9a9e0efcdb190..e976697bd59c8 100644 --- a/server/src/test/java/org/opensearch/common/collect/EvictingQueueTests.java +++ b/server/src/test/java/org/opensearch/common/collect/EvictingQueueTests.java @@ -29,7 +29,7 @@ package org.opensearch.common.collect; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/common/compress/AbstractCompressorTests.java b/server/src/test/java/org/opensearch/common/compress/AbstractCompressorTests.java deleted file mode 100644 index 67ab4e240a4ef..0000000000000 --- a/server/src/test/java/org/opensearch/common/compress/AbstractCompressorTests.java +++ /dev/null @@ -1,408 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.compress; - -import org.apache.lucene.tests.util.LineFileDocs; -import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; -import java.util.Random; -import java.util.concurrent.CountDownLatch; - -abstract class AbstractCompressorTests extends OpenSearchTestCase { - - public void testRandom() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; - r.nextBytes(bytes); - doTest(bytes); - } - } - - public void testRandomThreads() throws Exception { - final Random r = random(); - int threadCount = TestUtil.nextInt(r, 2, 6); - Thread[] threads = new Thread[threadCount]; - final CountDownLatch startingGun = new CountDownLatch(1); - for (int tid = 0; tid < threadCount; tid++) { - final long seed = r.nextLong(); - threads[tid] = new Thread() { - @Override - public void run() { - try { - Random r = new Random(seed); - startingGun.await(); - for (int i = 0; i < 10; i++) { - byte bytes[] = new byte[TestUtil.nextInt(r, 1, 100000)]; - r.nextBytes(bytes); - doTest(bytes); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - threads[tid].start(); - } - startingGun.countDown(); - for (Thread t : threads) { - t.join(); - } - } - - public void testLineDocs() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 10; i++) { - int numDocs = TestUtil.nextInt(r, 1, 200); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int j = 0; j < numDocs; j++) { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - doTest(bos.toByteArray()); - } - lineFileDocs.close(); - } - - public void testLineDocsThreads() throws Exception { - final Random r = random(); - int threadCount = TestUtil.nextInt(r, 2, 6); - Thread[] threads = new Thread[threadCount]; - final CountDownLatch startingGun = new CountDownLatch(1); - for (int tid = 0; tid < threadCount; tid++) { - final long seed = r.nextLong(); - threads[tid] = new Thread() { - @Override - public void run() { - try { - Random r = new Random(seed); - startingGun.await(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 10; i++) { - int numDocs = TestUtil.nextInt(r, 1, 200); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int j = 0; j < numDocs; j++) { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - doTest(bos.toByteArray()); - } - lineFileDocs.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - threads[tid].start(); - } - startingGun.countDown(); - for (Thread t : threads) { - t.join(); - } - } - - public void testRepetitionsL() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numLongs = TestUtil.nextInt(r, 1, 10000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - long theValue = r.nextLong(); - for (int j = 0; j < numLongs; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsLThreads() throws Exception { - final Random r = random(); - int threadCount = TestUtil.nextInt(r, 2, 6); - Thread[] threads = new Thread[threadCount]; - final CountDownLatch startingGun = new CountDownLatch(1); - for (int tid = 0; tid < threadCount; tid++) { - final long seed = r.nextLong(); - threads[tid] = new Thread() { - @Override - public void run() { - try { - Random r = new Random(seed); - startingGun.await(); - for (int i = 0; i < 10; i++) { - int numLongs = TestUtil.nextInt(r, 1, 10000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - long theValue = r.nextLong(); - for (int j = 0; j < numLongs; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - threads[tid].start(); - } - startingGun.countDown(); - for (Thread t : threads) { - t.join(); - } - } - - public void testRepetitionsI() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numInts = TestUtil.nextInt(r, 1, 20000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int theValue = r.nextInt(); - for (int j = 0; j < numInts; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsIThreads() throws Exception { - final Random r = random(); - int threadCount = TestUtil.nextInt(r, 2, 6); - Thread[] threads = new Thread[threadCount]; - final CountDownLatch startingGun = new CountDownLatch(1); - for (int tid = 0; tid < threadCount; tid++) { - final long seed = r.nextLong(); - threads[tid] = new Thread() { - @Override - public void run() { - try { - Random r = new Random(seed); - startingGun.await(); - for (int i = 0; i < 10; i++) { - int numInts = TestUtil.nextInt(r, 1, 20000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int theValue = r.nextInt(); - for (int j = 0; j < numInts; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - threads[tid].start(); - } - startingGun.countDown(); - for (Thread t : threads) { - t.join(); - } - } - - public void testRepetitionsS() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numShorts = TestUtil.nextInt(r, 1, 40000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - short theValue = (short) r.nextInt(65535); - for (int j = 0; j < numShorts; j++) { - if (r.nextInt(10) == 0) { - theValue = (short) r.nextInt(65535); - } - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testMixed() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 2; ++i) { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int prevInt = r.nextInt(); - long prevLong = r.nextLong(); - while (bos.size() < 400000) { - switch (r.nextInt(4)) { - case 0: - addInt(r, prevInt, bos); - break; - case 1: - addLong(r, prevLong, bos); - break; - case 2: - addString(lineFileDocs, bos); - break; - case 3: - addBytes(r, bos); - break; - default: - throw new IllegalStateException("Random is broken"); - } - } - doTest(bos.toByteArray()); - } - } - - private void addLong(Random r, long prev, ByteArrayOutputStream bos) { - long theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addInt(Random r, int prev, ByteArrayOutputStream bos) { - int theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - - private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { - byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; - r.nextBytes(bytes); - bos.write(bytes); - } - - public void testRepetitionsSThreads() throws Exception { - final Random r = random(); - int threadCount = TestUtil.nextInt(r, 2, 6); - Thread[] threads = new Thread[threadCount]; - final CountDownLatch startingGun = new CountDownLatch(1); - for (int tid = 0; tid < threadCount; tid++) { - final long seed = r.nextLong(); - threads[tid] = new Thread() { - @Override - public void run() { - try { - Random r = new Random(seed); - startingGun.await(); - for (int i = 0; i < 10; i++) { - int numShorts = TestUtil.nextInt(r, 1, 40000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - short theValue = (short) r.nextInt(65535); - for (int j = 0; j < numShorts; j++) { - if (r.nextInt(10) == 0) { - theValue = (short) r.nextInt(65535); - } - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - threads[tid].start(); - } - startingGun.countDown(); - for (Thread t : threads) { - t.join(); - } - } - - private void doTest(byte bytes[]) throws IOException { - InputStream rawIn = new ByteArrayInputStream(bytes); - Compressor c = compressor(); - - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - final Random r = random(); - int bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(random(), 1, 70000); - int prepadding = r.nextInt(70000); - int postpadding = r.nextInt(70000); - byte[] buffer = new byte[prepadding + bufferSize + postpadding]; - int len; - try (OutputStream os = c.threadLocalOutputStream(bos)) { - r.nextBytes(buffer); // fill block completely with junk - while ((len = rawIn.read(buffer, prepadding, bufferSize)) != -1) { - os.write(buffer, prepadding, len); - } - } - rawIn.close(); - - // now we have compressed byte array - InputStream in = c.threadLocalInputStream(new ByteArrayInputStream(bos.toByteArray())); - - // randomize constants again - bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(random(), 1, 70000); - prepadding = r.nextInt(70000); - postpadding = r.nextInt(70000); - buffer = new byte[prepadding + bufferSize + postpadding]; - r.nextBytes(buffer); // fill block completely with junk - - ByteArrayOutputStream uncompressedOut = new ByteArrayOutputStream(); - while ((len = in.read(buffer, prepadding, bufferSize)) != -1) { - uncompressedOut.write(buffer, prepadding, len); - } - uncompressedOut.close(); - - assertArrayEquals(bytes, uncompressedOut.toByteArray()); - } - - abstract Compressor compressor(); - -} diff --git a/server/src/test/java/org/opensearch/common/compress/DeflateCompressTests.java b/server/src/test/java/org/opensearch/common/compress/DeflateCompressTests.java index 6178dcac9a390..262a7ec40a8f0 100644 --- a/server/src/test/java/org/opensearch/common/compress/DeflateCompressTests.java +++ b/server/src/test/java/org/opensearch/common/compress/DeflateCompressTests.java @@ -32,15 +32,18 @@ package org.opensearch.common.compress; +import org.opensearch.core.compress.Compressor; +import org.opensearch.test.core.compress.AbstractCompressorTestCase; + /** * Test streaming compression (e.g. used for recovery) */ -public class DeflateCompressTests extends AbstractCompressorTests { +public class DeflateCompressTests extends AbstractCompressorTestCase { private final Compressor compressor = new DeflateCompressor(); @Override - Compressor compressor() { + protected Compressor compressor() { return compressor; } } diff --git a/server/src/test/java/org/opensearch/common/compress/DeflateCompressedXContentTests.java b/server/src/test/java/org/opensearch/common/compress/DeflateCompressedXContentTests.java index eacbcaced2dcd..5c9353d15e24a 100644 --- a/server/src/test/java/org/opensearch/common/compress/DeflateCompressedXContentTests.java +++ b/server/src/test/java/org/opensearch/common/compress/DeflateCompressedXContentTests.java @@ -33,8 +33,9 @@ package org.opensearch.common.compress; import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.compress.Compressor; import org.opensearch.test.OpenSearchTestCase; import org.junit.Assert; diff --git a/server/src/test/java/org/opensearch/common/compress/ZstdCompressTests.java b/server/src/test/java/org/opensearch/common/compress/ZstdCompressTests.java deleted file mode 100644 index b8de4a4e4bb1b..0000000000000 --- a/server/src/test/java/org/opensearch/common/compress/ZstdCompressTests.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.common.compress; - -/** - * Test streaming compression - */ -public class ZstdCompressTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdCompressor(); - - @Override - Compressor compressor() { - return compressor; - } -} diff --git a/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java b/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java index 344368988f5ff..30ed6781d7bfb 100644 --- a/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java +++ b/server/src/test/java/org/opensearch/common/concurrent/AutoCloseableRefCountedTests.java @@ -13,9 +13,9 @@ package org.opensearch.common.concurrent; -import org.junit.Before; import org.opensearch.common.util.concurrent.RefCounted; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import static org.mockito.Mockito.atMostOnce; import static org.mockito.Mockito.mock; diff --git a/server/src/test/java/org/opensearch/common/concurrent/GatedCloseableTests.java b/server/src/test/java/org/opensearch/common/concurrent/GatedCloseableTests.java index 0645f971b8d63..c8cabf391abfa 100644 --- a/server/src/test/java/org/opensearch/common/concurrent/GatedCloseableTests.java +++ b/server/src/test/java/org/opensearch/common/concurrent/GatedCloseableTests.java @@ -13,8 +13,8 @@ package org.opensearch.common.concurrent; -import org.junit.Before; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.FileSystem; diff --git a/server/src/test/java/org/opensearch/common/concurrent/OneWayGateTests.java b/server/src/test/java/org/opensearch/common/concurrent/OneWayGateTests.java index 357bf3ae321f8..87f6600a57483 100644 --- a/server/src/test/java/org/opensearch/common/concurrent/OneWayGateTests.java +++ b/server/src/test/java/org/opensearch/common/concurrent/OneWayGateTests.java @@ -13,8 +13,8 @@ package org.opensearch.common.concurrent; -import org.junit.Before; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; public class OneWayGateTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/concurrent/RefCountedReleasableTests.java b/server/src/test/java/org/opensearch/common/concurrent/RefCountedReleasableTests.java index 63c0873f1593d..d32cfbdf3b7b6 100644 --- a/server/src/test/java/org/opensearch/common/concurrent/RefCountedReleasableTests.java +++ b/server/src/test/java/org/opensearch/common/concurrent/RefCountedReleasableTests.java @@ -13,8 +13,8 @@ package org.opensearch.common.concurrent; -import org.junit.Before; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/test/java/org/opensearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/opensearch/common/geo/BaseGeoParsingTestCase.java index 32d3629a39366..2c8b8e6d9f964 100644 --- a/server/src/test/java/org/opensearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/opensearch/common/geo/BaseGeoParsingTestCase.java @@ -38,11 +38,6 @@ import org.opensearch.index.mapper.GeoShapeIndexer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.hamcrest.OpenSearchGeoAssertions; -import org.locationtech.jts.geom.Geometry; -import org.locationtech.jts.geom.GeometryFactory; -import org.locationtech.spatial4j.shape.Shape; -import org.locationtech.spatial4j.shape.ShapeCollection; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.text.ParseException; @@ -50,6 +45,12 @@ import java.util.Arrays; import java.util.List; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.spatial4j.shape.Shape; +import org.locationtech.spatial4j.shape.ShapeCollection; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + import static org.opensearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT; /** Base class for all geo parsing tests */ diff --git a/server/src/test/java/org/opensearch/common/geo/GeoBoundingBoxTests.java b/server/src/test/java/org/opensearch/common/geo/GeoBoundingBoxTests.java index 507ffcc83eb1a..34451ca407163 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoBoundingBoxTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoBoundingBoxTests.java @@ -34,8 +34,8 @@ import org.apache.lucene.geo.GeoEncodingUtils; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Rectangle; diff --git a/server/src/test/java/org/opensearch/common/geo/GeoDistanceTests.java b/server/src/test/java/org/opensearch/common/geo/GeoDistanceTests.java index d78fd27a8172b..0fad75d556be6 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoDistanceTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoDistanceTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.geo; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonParserTests.java index 9ab0f41f33762..c7a4a9e4865b7 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonParserTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.geo; -import org.opensearch.common.Strings; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -479,85 +478,84 @@ public void testParseInvalidMultipoint() throws IOException { public void testParseInvalidDimensionalMultiPolygon() throws IOException { // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) - String multiPolygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "MultiPolygon") - .startArray("coordinates") - .startArray()// first poly (without holes) - .startArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .endArray() - .endArray() - .startArray()// second poly (with hole) - .startArray() - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .endArray() - .startArray()// hole - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.2) - .value(10.0) - .endArray() - .startArray() - .value(100.8) - .value(0.2) - .endArray() - .startArray() - .value(100.8) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .endArray() - .endArray() - .endArray() - .endObject() - ); + String multiPolygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()// first poly (without holes) + .startArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .endArray() + .endArray() + .startArray()// second poly (with hole) + .startArray() + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .endArray() + .startArray()// hole + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.2) + .value(10.0) + .endArray() + .startArray() + .value(100.8) + .value(0.2) + .endArray() + .startArray() + .value(100.8) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson)) { parser.nextToken(); @@ -572,24 +570,23 @@ public void testParseInvalidPolygon() throws IOException { * per the GeoJSON specification */ // test case 1: create an invalid polygon with only 2 points - String invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .startArray() - .value(-75.022) - .value(41.783) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .startArray() + .value(-75.022) + .value(41.783) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); expectThrows(XContentParseException.class, () -> new GeoJson(true, false, new GeographyValidator(false)).fromXContent(parser)); @@ -597,20 +594,19 @@ public void testParseInvalidPolygon() throws IOException { } // test case 2: create an invalid polygon with only 1 point - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -619,18 +615,17 @@ public void testParseInvalidPolygon() throws IOException { } // test case 3: create an invalid polygon with 0 points - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -639,20 +634,19 @@ public void testParseInvalidPolygon() throws IOException { } // test case 4: create an invalid polygon with null value points - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .nullValue() - .nullValue() - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .nullValue() + .nullValue() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -661,16 +655,15 @@ public void testParseInvalidPolygon() throws IOException { } // test case 5: create an invalid polygon with 1 invalid LinearRing - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .nullValue() - .nullValue() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .nullValue() + .nullValue() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -679,9 +672,13 @@ public void testParseInvalidPolygon() throws IOException { } // test case 6: create an invalid polygon with 0 LinearRings - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder().startObject().field("type", "polygon").startArray("coordinates").endArray().endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -690,18 +687,17 @@ public void testParseInvalidPolygon() throws IOException { } // test case 7: create an invalid polygon with 0 LinearRings - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java index 23855c8f8dc91..480d8dba8f78f 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonSerializationTests.java @@ -32,16 +32,16 @@ package org.opensearch.common.geo; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.utils.GeographyValidator; @@ -153,7 +153,7 @@ public void testToMap() throws IOException { StreamInput input = BytesReference.bytes(builder).streamInput(); try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, input) ) { Map<String, Object> map = GeoJson.toMap(geometry); diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java index f504841b01be9..e16dd859ec14c 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java @@ -35,14 +35,13 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.geo.parsers.ShapeParser; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; import org.opensearch.geometry.Line; @@ -54,6 +53,14 @@ import org.opensearch.index.mapper.Mapper; import org.opensearch.test.VersionUtils; import org.opensearch.test.hamcrest.OpenSearchGeoAssertions; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.LineString; import org.locationtech.jts.geom.LinearRing; @@ -68,13 +75,6 @@ import org.locationtech.spatial4j.shape.ShapeCollection; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import java.io.IOException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - import static org.opensearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT; /** @@ -553,82 +553,81 @@ public void testParseInvalidMultipoint() throws IOException { public void testParseInvalidMultiPolygon() throws IOException { // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) - String multiPolygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "MultiPolygon") - .startArray("coordinates") - .startArray()// one poly (with two holes) - .startArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .endArray() - .startArray()// first hole - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .endArray() - .startArray()// second hole - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.2) - .endArray() - .startArray() - .value(100.8) - .value(0.2) - .endArray() - .startArray() - .value(100.8) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .endArray() - .endArray() - .endArray() - .endObject() - ); + String multiPolygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()// one poly (with two holes) + .startArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .endArray() + .startArray()// first hole + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .endArray() + .startArray()// second hole + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.2) + .endArray() + .startArray() + .value(100.8) + .value(0.2) + .endArray() + .startArray() + .value(100.8) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson)) { parser.nextToken(); @@ -639,85 +638,84 @@ public void testParseInvalidMultiPolygon() throws IOException { public void testParseInvalidDimensionalMultiPolygon() throws IOException { // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) - String multiPolygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "MultiPolygon") - .startArray("coordinates") - .startArray()// first poly (without holes) - .startArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(2.0) - .endArray() - .startArray() - .value(103.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(3.0) - .endArray() - .startArray() - .value(102.0) - .value(2.0) - .endArray() - .endArray() - .endArray() - .startArray()// second poly (with hole) - .startArray() - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(0.0) - .endArray() - .startArray() - .value(101.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(1.0) - .endArray() - .startArray() - .value(100.0) - .value(0.0) - .endArray() - .endArray() - .startArray()// hole - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.2) - .value(10.0) - .endArray() - .startArray() - .value(100.8) - .value(0.2) - .endArray() - .startArray() - .value(100.8) - .value(0.8) - .endArray() - .startArray() - .value(100.2) - .value(0.8) - .endArray() - .endArray() - .endArray() - .endArray() - .endObject() - ); + String multiPolygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()// first poly (without holes) + .startArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(2.0) + .endArray() + .startArray() + .value(103.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(3.0) + .endArray() + .startArray() + .value(102.0) + .value(2.0) + .endArray() + .endArray() + .endArray() + .startArray()// second poly (with hole) + .startArray() + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(0.0) + .endArray() + .startArray() + .value(101.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(1.0) + .endArray() + .startArray() + .value(100.0) + .value(0.0) + .endArray() + .endArray() + .startArray()// hole + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.2) + .value(10.0) + .endArray() + .startArray() + .value(100.8) + .value(0.2) + .endArray() + .startArray() + .value(100.8) + .value(0.8) + .endArray() + .startArray() + .value(100.2) + .value(0.8) + .endArray() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson)) { parser.nextToken(); @@ -728,40 +726,39 @@ public void testParseInvalidDimensionalMultiPolygon() throws IOException { public void testParseOGCPolygonWithoutHoles() throws IOException, ParseException { // test 1: ccw poly not crossing dateline - String polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -775,40 +772,39 @@ public void testParseOGCPolygonWithoutHoles() throws IOException, ParseException } // test 2: ccw poly crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -822,154 +818,151 @@ public void testParseOGCPolygonWithoutHoles() throws IOException, ParseException } // test 3: cw poly not crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(180.0) - .value(10.0) - .endArray() - .startArray() - .value(180.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { - parser.nextToken(); - Shape shape = ShapeParser.parse(parser).buildS4J(); - OpenSearchGeoAssertions.assertPolygon(shape, true); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { - parser.nextToken(); - OpenSearchGeoAssertions.assertPolygon(parse(parser), false); - } - - // test 4: cw poly crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(184.0) - .value(15.0) - .endArray() - .startArray() - .value(184.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(174.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { - parser.nextToken(); - Shape shape = ShapeParser.parse(parser).buildS4J(); - OpenSearchGeoAssertions.assertMultiPolygon(shape, true); - } - - try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { - parser.nextToken(); + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(180.0) + .value(10.0) + .endArray() + .startArray() + .value(180.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { + parser.nextToken(); + Shape shape = ShapeParser.parse(parser).buildS4J(); + OpenSearchGeoAssertions.assertPolygon(shape, true); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { + parser.nextToken(); + OpenSearchGeoAssertions.assertPolygon(parse(parser), false); + } + + // test 4: cw poly crossing dateline + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(184.0) + .value(15.0) + .endArray() + .startArray() + .value(184.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(174.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { + parser.nextToken(); + Shape shape = ShapeParser.parse(parser).buildS4J(); + OpenSearchGeoAssertions.assertMultiPolygon(shape, true); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { + parser.nextToken(); OpenSearchGeoAssertions.assertMultiPolygon(parse(parser), false); } } public void testParseOGCPolygonWithHoles() throws IOException, ParseException { // test 1: ccw poly not crossing dateline - String polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .startArray() - .startArray() - .value(-172.0) - .value(8.0) - .endArray() - .startArray() - .value(174.0) - .value(10.0) - .endArray() - .startArray() - .value(-172.0) - .value(-8.0) - .endArray() - .startArray() - .value(-172.0) - .value(8.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .startArray() + .startArray() + .value(-172.0) + .value(8.0) + .endArray() + .startArray() + .value(174.0) + .value(10.0) + .endArray() + .startArray() + .value(-172.0) + .value(-8.0) + .endArray() + .startArray() + .value(-172.0) + .value(8.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -983,58 +976,57 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } // test 2: ccw poly crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .endArray() - .startArray() - .startArray() - .value(178.0) - .value(8.0) - .endArray() - .startArray() - .value(-178.0) - .value(8.0) - .endArray() - .startArray() - .value(-180.0) - .value(-8.0) - .endArray() - .startArray() - .value(178.0) - .value(8.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .endArray() + .startArray() + .startArray() + .value(178.0) + .value(8.0) + .endArray() + .startArray() + .value(-178.0) + .value(8.0) + .endArray() + .startArray() + .value(-180.0) + .value(-8.0) + .endArray() + .startArray() + .value(178.0) + .value(8.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -1048,58 +1040,57 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } // test 3: cw poly not crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(180.0) - .value(10.0) - .endArray() - .startArray() - .value(179.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .startArray() - .startArray() - .value(177.0) - .value(8.0) - .endArray() - .startArray() - .value(179.0) - .value(10.0) - .endArray() - .startArray() - .value(179.0) - .value(-8.0) - .endArray() - .startArray() - .value(177.0) - .value(8.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(180.0) + .value(10.0) + .endArray() + .startArray() + .value(179.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .startArray() + .startArray() + .value(177.0) + .value(8.0) + .endArray() + .startArray() + .value(179.0) + .value(10.0) + .endArray() + .startArray() + .value(179.0) + .value(-8.0) + .endArray() + .startArray() + .value(177.0) + .value(8.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -1113,58 +1104,57 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } // test 4: cw poly crossing dateline - polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(183.0) - .value(10.0) - .endArray() - .startArray() - .value(183.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(183.0) - .value(10.0) - .endArray() - .endArray() - .startArray() - .startArray() - .value(178.0) - .value(8.0) - .endArray() - .startArray() - .value(182.0) - .value(8.0) - .endArray() - .startArray() - .value(180.0) - .value(-8.0) - .endArray() - .startArray() - .value(178.0) - .value(8.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(183.0) + .value(10.0) + .endArray() + .startArray() + .value(183.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(183.0) + .value(10.0) + .endArray() + .endArray() + .startArray() + .startArray() + .value(178.0) + .value(8.0) + .endArray() + .startArray() + .value(182.0) + .value(8.0) + .endArray() + .startArray() + .value(180.0) + .value(-8.0) + .endArray() + .startArray() + .value(178.0) + .value(8.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); @@ -1179,29 +1169,28 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } public void testParseInvalidPolygon() throws IOException { - /** - * The following 3 test cases ensure proper error handling of invalid polygons - * per the GeoJSON specification + /* + The following 3 test cases ensure proper error handling of invalid polygons + per the GeoJSON specification */ // test case 1: create an invalid polygon with only 2 points - String invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .startArray() - .value(-75.022) - .value(41.783) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .startArray() + .value(-75.022) + .value(41.783) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); OpenSearchGeoAssertions.assertValidException(parser, OpenSearchParseException.class); @@ -1209,20 +1198,19 @@ public void testParseInvalidPolygon() throws IOException { } // test case 2: create an invalid polygon with only 1 point - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1231,18 +1219,17 @@ public void testParseInvalidPolygon() throws IOException { } // test case 3: create an invalid polygon with 0 points - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1251,20 +1238,19 @@ public void testParseInvalidPolygon() throws IOException { } // test case 4: create an invalid polygon with null value points - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .startArray() - .nullValue() - .nullValue() - .endArray() - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .startArray() + .nullValue() + .nullValue() + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1273,16 +1259,15 @@ public void testParseInvalidPolygon() throws IOException { } // test case 5: create an invalid polygon with 1 invalid LinearRing - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .nullValue() - .nullValue() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .nullValue() + .nullValue() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1291,9 +1276,13 @@ public void testParseInvalidPolygon() throws IOException { } // test case 6: create an invalid polygon with 0 LinearRings - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder().startObject().field("type", "polygon").startArray("coordinates").endArray().endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1302,18 +1291,17 @@ public void testParseInvalidPolygon() throws IOException { } // test case 7: create an invalid polygon with 0 LinearRings - invalidPoly = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "polygon") - .startArray("coordinates") - .startArray() - .value(-74.011) - .value(40.753) - .endArray() - .endArray() - .endObject() - ); + invalidPoly = XContentFactory.jsonBuilder() + .startObject() + .field("type", "polygon") + .startArray("coordinates") + .startArray() + .value(-74.011) + .value(40.753) + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidPoly)) { parser.nextToken(); @@ -1408,44 +1396,43 @@ public void testParsePolygonWithHole() throws IOException, ParseException { public void testParseSelfCrossingPolygon() throws IOException { // test self crossing ccw poly not crossing dateline - String polygonGeoJson = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .startArray() - .value(-177.0) - .value(10.0) - .endArray() - .startArray() - .value(-177.0) - .value(-10.0) - .endArray() - .startArray() - .value(176.0) - .value(-15.0) - .endArray() - .startArray() - .value(-177.0) - .value(15.0) - .endArray() - .startArray() - .value(172.0) - .value(0.0) - .endArray() - .startArray() - .value(176.0) - .value(15.0) - .endArray() - .endArray() - .endArray() - .endObject() - ); + String polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .startArray() + .value(-177.0) + .value(10.0) + .endArray() + .startArray() + .value(-177.0) + .value(-10.0) + .endArray() + .startArray() + .value(176.0) + .value(-15.0) + .endArray() + .startArray() + .value(-177.0) + .value(15.0) + .endArray() + .startArray() + .value(172.0) + .value(0.0) + .endArray() + .startArray() + .value(176.0) + .value(15.0) + .endArray() + .endArray() + .endArray() + .endObject() + .toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) { parser.nextToken(); diff --git a/server/src/test/java/org/opensearch/common/geo/GeoUtilTests.java b/server/src/test/java/org/opensearch/common/geo/GeoUtilTests.java index 3960498445138..9768b4ede7277 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoUtilTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoUtilTests.java @@ -32,10 +32,10 @@ package org.opensearch.common.geo; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java index a53d4bda9edde..5b5f053572b8b 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java @@ -50,8 +50,8 @@ import org.opensearch.common.geo.parsers.GeoWKTParser; import org.opensearch.common.geo.parsers.ShapeParser; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.GeometryCollection; @@ -64,6 +64,14 @@ import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.test.geo.RandomShapeGenerator; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.LineString; import org.locationtech.jts.geom.LinearRing; @@ -76,13 +84,6 @@ import org.locationtech.spatial4j.shape.ShapeCollection; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import java.io.IOException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - import static org.opensearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; diff --git a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java index 7b462833041ca..7fc95c2316aef 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java @@ -32,20 +32,9 @@ package org.opensearch.common.geo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; - -import java.io.IOException; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - import org.apache.lucene.index.IndexableField; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geo.GeometryTestUtils; import org.opensearch.geometry.Circle; @@ -61,8 +50,20 @@ import org.opensearch.geometry.Rectangle; import org.opensearch.index.mapper.GeoShapeIndexer; import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.locationtech.spatial4j.exception.InvalidShapeException; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; + public class GeometryIndexerTests extends OpenSearchTestCase { GeoShapeIndexer indexer = new GeoShapeIndexer(true, "test"); @@ -224,7 +225,7 @@ public static MultiPoint remove180s(MultiPoint points) { /** * A randomized test that generates a random lines crossing anti-merdian and checks that the decomposed segments of this line * have the same total length (measured using Euclidean distances between neighboring points) as the original line. - * + * <p> * It also extracts all points from these lines, performs normalization of these points and then compares that the resulting * points of line normalization match the points of points normalization with the exception of points that were created on the * antimeridian as the result of line decomposition. diff --git a/server/src/test/java/org/opensearch/common/geo/GeometryParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeometryParserTests.java index 8caa9d50b2fb0..dded3739086aa 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeometryParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeometryParserTests.java @@ -33,10 +33,9 @@ package org.opensearch.common.geo; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.geometry.Geometry; @@ -73,7 +72,7 @@ public void testGeoJsonParsing() throws Exception { assertEquals(new Point(100, 0), format.fromXContent(parser)); XContentBuilder newGeoJson = XContentFactory.jsonBuilder(); format.toXContent(new Point(100, 10), newGeoJson, ToXContent.EMPTY_PARAMS); - assertEquals("{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}", Strings.toString(newGeoJson)); + assertEquals("{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}", newGeoJson.toString()); } XContentBuilder pointGeoJsonWithZ = XContentFactory.jsonBuilder() @@ -147,7 +146,7 @@ public void testWKTParsing() throws Exception { XContentBuilder newGeoJson = XContentFactory.jsonBuilder().startObject().field("val"); format.toXContent(new Point(100, 10), newGeoJson, ToXContent.EMPTY_PARAMS); newGeoJson.endObject(); - assertEquals("{\"val\":\"POINT (100.0 10.0)\"}", Strings.toString(newGeoJson)); + assertEquals("{\"val\":\"POINT (100.0 10.0)\"}", newGeoJson.toString()); } // Make sure we can parse values outside the normal lat lon boundaries @@ -178,12 +177,12 @@ public void testNullParsing() throws Exception { // if we serialize non-null value - it should be serialized as geojson format.toXContent(new Point(100, 10), newGeoJson, ToXContent.EMPTY_PARAMS); newGeoJson.endObject(); - assertEquals("{\"val\":{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}}", Strings.toString(newGeoJson)); + assertEquals("{\"val\":{\"type\":\"Point\",\"coordinates\":[100.0,10.0]}}", newGeoJson.toString()); newGeoJson = XContentFactory.jsonBuilder().startObject().field("val"); format.toXContent(null, newGeoJson, ToXContent.EMPTY_PARAMS); newGeoJson.endObject(); - assertEquals("{\"val\":null}", Strings.toString(newGeoJson)); + assertEquals("{\"val\":null}", newGeoJson.toString()); } } diff --git a/server/src/test/java/org/opensearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/ShapeBuilderTests.java index ef87786b0b921..e98ac7e0dccc5 100644 --- a/server/src/test/java/org/opensearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/ShapeBuilderTests.java @@ -43,6 +43,7 @@ import org.opensearch.geometry.LinearRing; import org.opensearch.index.mapper.GeoShapeIndexer; import org.opensearch.test.OpenSearchTestCase; + import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.LineString; import org.locationtech.jts.geom.Polygon; diff --git a/server/src/test/java/org/opensearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/server/src/test/java/org/opensearch/common/geo/builders/AbstractShapeBuilderTestCase.java index 417a79da68ae1..72505a6c02ca5 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -34,14 +34,14 @@ import org.opensearch.common.geo.GeoShapeType; import org.opensearch.common.geo.parsers.ShapeParser; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable.Reader; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -86,7 +86,7 @@ public static void afterClass() throws Exception { public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); - XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder contentBuilder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { contentBuilder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/common/geo/builders/CircleBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/CircleBuilderTests.java index 039164881e3e8..71a26db856798 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/CircleBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/CircleBuilderTests.java @@ -33,10 +33,11 @@ package org.opensearch.common.geo.builders; import org.opensearch.common.unit.DistanceUnit; -import org.locationtech.jts.geom.Coordinate; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; + public class CircleBuilderTests extends AbstractShapeBuilderTestCase<CircleBuilder> { @Override diff --git a/server/src/test/java/org/opensearch/common/geo/builders/EnvelopeBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/EnvelopeBuilderTests.java index 03f4a47fd2722..7ef1e6e6b9a46 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/EnvelopeBuilderTests.java @@ -32,13 +32,13 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; - import org.opensearch.test.geo.RandomShapeGenerator; -import org.locationtech.spatial4j.shape.Rectangle; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Rectangle; + public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase<EnvelopeBuilder> { public void testInvalidConstructorArgs() { diff --git a/server/src/test/java/org/opensearch/common/geo/builders/LineStringBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/LineStringBuilderTests.java index 316274c53b134..4e8b6b827842a 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/LineStringBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/LineStringBuilderTests.java @@ -34,11 +34,12 @@ import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; -import org.locationtech.jts.geom.Coordinate; import java.io.IOException; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + public class LineStringBuilderTests extends AbstractShapeBuilderTestCase<LineStringBuilder> { public void testInvalidConstructorArgs() { diff --git a/server/src/test/java/org/opensearch/common/geo/builders/MultiLineStringBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/MultiLineStringBuilderTests.java index 63e7478403677..01f247a061faa 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/MultiLineStringBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -32,12 +32,13 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; + public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase<MultiLineStringBuilder> { @Override diff --git a/server/src/test/java/org/opensearch/common/geo/builders/MultiPointBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/MultiPointBuilderTests.java index 87c41db53c1cf..90fdefa8ea128 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/MultiPointBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/MultiPointBuilderTests.java @@ -32,14 +32,14 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; - import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; import java.io.IOException; import java.util.List; +import org.locationtech.jts.geom.Coordinate; + public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase<MultiPointBuilder> { public void testInvalidBuilderException() { diff --git a/server/src/test/java/org/opensearch/common/geo/builders/PointBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/PointBuilderTests.java index cd74bb6adef15..d9dc010cb5675 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/PointBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/PointBuilderTests.java @@ -32,12 +32,13 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; + public class PointBuilderTests extends AbstractShapeBuilderTestCase<PointBuilder> { @Override diff --git a/server/src/test/java/org/opensearch/common/geo/builders/PolygonBuilderTests.java b/server/src/test/java/org/opensearch/common/geo/builders/PolygonBuilderTests.java index 14af7fc29687f..db9cf815784a9 100644 --- a/server/src/test/java/org/opensearch/common/geo/builders/PolygonBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/geo/builders/PolygonBuilderTests.java @@ -32,14 +32,15 @@ package org.opensearch.common.geo.builders; -import org.locationtech.jts.geom.Coordinate; import org.opensearch.common.geo.builders.ShapeBuilder.Orientation; import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; -import org.locationtech.spatial4j.exception.InvalidShapeException; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.exception.InvalidShapeException; + import static org.hamcrest.Matchers.equalTo; public class PolygonBuilderTests extends AbstractShapeBuilderTestCase<PolygonBuilder> { diff --git a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java index 2cca12ca38fa6..938337fc5146e 100644 --- a/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java +++ b/server/src/test/java/org/opensearch/common/io/VersionedCodecStreamWrapperTests.java @@ -16,11 +16,11 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.OutputStreamIndexOutput; -import org.junit.Before; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/opensearch/common/io/stream/BytesStreamsTests.java index 40b3bb05df974..370c691daf401 100644 --- a/server/src/test/java/org/opensearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/opensearch/common/io/stream/BytesStreamsTests.java @@ -35,27 +35,25 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.joda.Joda; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.script.JodaCompatibleZonedDateTime; import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.EOFException; import java.io.IOException; +import java.time.Instant; import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -327,10 +325,9 @@ public void testSimpleStreams() throws Exception { out.writeOptionalBytesReference(new BytesArray("test")); out.writeOptionalDouble(null); out.writeOptionalDouble(1.2); - Joda.writeTimeZone(out, DateTimeZone.forID("CET")); - Joda.writeOptionalTimeZone(out, DateTimeZone.getDefault()); - Joda.writeOptionalTimeZone(out, null); - out.writeGenericValue(new DateTime(123456, DateTimeZone.forID("America/Los_Angeles"))); + out.writeZoneId(ZoneId.of("CET")); + out.writeOptionalZoneId(ZoneId.systemDefault()); + out.writeGenericValue(ZonedDateTime.ofInstant(Instant.ofEpochMilli(123456), ZoneId.of("America/Los_Angeles"))); final byte[] bytes = BytesReference.toBytes(out.bytes()); StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); assertEquals(in.available(), bytes.length); @@ -360,14 +357,13 @@ public void testSimpleStreams() throws Exception { assertThat(in.readOptionalBytesReference(), equalTo(new BytesArray("test"))); assertNull(in.readOptionalDouble()); assertThat(in.readOptionalDouble(), closeTo(1.2, 0.0001)); - assertEquals(DateTimeZone.forID("CET"), Joda.readTimeZone(in)); - assertEquals(DateTimeZone.getDefault(), Joda.readOptionalTimeZone(in)); - assertNull(Joda.readOptionalTimeZone(in)); + assertEquals(ZoneId.of("CET"), in.readZoneId()); + assertEquals(ZoneId.systemDefault(), in.readOptionalZoneId()); Object dt = in.readGenericValue(); - assertThat(dt, instanceOf(JodaCompatibleZonedDateTime.class)); - JodaCompatibleZonedDateTime jdt = (JodaCompatibleZonedDateTime) dt; - assertThat(jdt.getZonedDateTime().toInstant().toEpochMilli(), equalTo(123456L)); - assertThat(jdt.getZonedDateTime().getZone(), equalTo(ZoneId.of("America/Los_Angeles"))); + assertThat(dt, instanceOf(ZonedDateTime.class)); + ZonedDateTime zdt = (ZonedDateTime) dt; + assertThat(zdt.toInstant().toEpochMilli(), equalTo(123456L)); + assertThat(zdt.getZone(), equalTo(ZoneId.of("America/Los_Angeles"))); assertEquals(0, in.available()); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> out.writeGenericValue(new Object() { @Override diff --git a/server/src/test/java/org/opensearch/common/io/stream/DelayableWriteableTests.java b/server/src/test/java/org/opensearch/common/io/stream/DelayableWriteableTests.java index ed85d1f33aade..2fa56c5bbf770 100644 --- a/server/src/test/java/org/opensearch/common/io/stream/DelayableWriteableTests.java +++ b/server/src/test/java/org/opensearch/common/io/stream/DelayableWriteableTests.java @@ -32,11 +32,6 @@ package org.opensearch.common.io.stream; -import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.equalTo; - -import java.io.IOException; - import org.opensearch.Version; import org.opensearch.core.common.io.stream.NamedWriteable; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -46,6 +41,11 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import java.io.IOException; + +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.equalTo; + public class DelayableWriteableTests extends OpenSearchTestCase { // NOTE: we don't use AbstractWireSerializingTestCase because we don't implement equals and hashCode. private static class Example implements NamedWriteable { diff --git a/server/src/test/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutputTests.java b/server/src/test/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutputTests.java index 20f202149b17d..92ed8fac5aae0 100644 --- a/server/src/test/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutputTests.java +++ b/server/src/test/java/org/opensearch/common/io/stream/ReleasableBytesStreamOutputTests.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 02bc6c58ad233..4fd8986d0b428 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -61,8 +61,8 @@ protected boolean enableWarningsCheck() { } public void testTimezoneParsing() { - /** this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} - * assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); + /* this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} + assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); */ assertSameDateAs("2016-11-30T00+01", "strict_date_optional_time", "strict_date_optional_time"); assertSameDateAs("2016-11-30T00+0100", "strict_date_optional_time", "strict_date_optional_time"); @@ -779,7 +779,9 @@ public void testSamePrinterOutput() { DateTime jodaDate = new DateTime(year, month, day, hour, minute, second, DateTimeZone.UTC); for (FormatNames format : FormatNames.values()) { - if (format == FormatNames.ISO8601 || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS) { + if (format == FormatNames.ISO8601 + || format == FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS + || format == FormatNames.RFC3339_LENIENT) { // Nanos aren't supported by joda continue; } diff --git a/server/src/test/java/org/opensearch/common/logging/HeaderWarningTests.java b/server/src/test/java/org/opensearch/common/logging/HeaderWarningTests.java index b1ff70104d2cc..f2f3b1be2d9a3 100644 --- a/server/src/test/java/org/opensearch/common/logging/HeaderWarningTests.java +++ b/server/src/test/java/org/opensearch/common/logging/HeaderWarningTests.java @@ -32,6 +32,7 @@ package org.opensearch.common.logging; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; + import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java index 17c4f9d0fe13d..d9db57aef15b6 100644 --- a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java @@ -53,40 +53,45 @@ public void testParameterizedMessageLambda() throws Exception { appender.start(); final Logger testLogger = LogManager.getLogger(LoggersTests.class); Loggers.addAppender(testLogger, appender); - Loggers.setLevel(testLogger, Level.TRACE); + try { + Loggers.setLevel(testLogger, Level.TRACE); - Throwable ex = randomException(); - testLogger.error(() -> new ParameterizedMessage("an error message"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); - ex = randomException(); - testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); - testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); - assertThat(appender.lastEvent.getThrown(), nullValue()); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); - ex = randomException(); - testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); - ex = randomException(); - testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + } finally { + Loggers.removeAppender(testLogger, appender); + appender.stop(); + } } private Throwable randomException() { diff --git a/server/src/test/java/org/opensearch/common/logging/RateLimitingFilterTests.java b/server/src/test/java/org/opensearch/common/logging/RateLimitingFilterTests.java index be952712f261f..e95f47bef864c 100644 --- a/server/src/test/java/org/opensearch/common/logging/RateLimitingFilterTests.java +++ b/server/src/test/java/org/opensearch/common/logging/RateLimitingFilterTests.java @@ -38,8 +38,8 @@ import org.junit.After; import org.junit.Before; -import static org.apache.logging.log4j.core.Filter.Result; import static org.hamcrest.Matchers.equalTo; +import static org.apache.logging.log4j.core.Filter.Result; public class RateLimitingFilterTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index f7be2c4876e6f..3325495699e1a 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -31,30 +31,28 @@ package org.opensearch.common.lucene; -import org.apache.lucene.document.LatLonPoint; -import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexFormatTooOldException; -import org.apache.lucene.index.StandardDirectoryReader; -import org.apache.lucene.index.StoredFields; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.document.LatLonPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoDeletionPolicy; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; +import org.apache.lucene.index.StandardDirectoryReader; +import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexOrDocValuesQuery; @@ -75,6 +73,8 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.Bits; @@ -82,8 +82,8 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; diff --git a/server/src/test/java/org/opensearch/common/lucene/ShardCoreKeyMapTests.java b/server/src/test/java/org/opensearch/common/lucene/ShardCoreKeyMapTests.java index a54047ac69cba..323ceeb95e157 100644 --- a/server/src/test/java/org/opensearch/common/lucene/ShardCoreKeyMapTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/ShardCoreKeyMapTests.java @@ -37,9 +37,9 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java b/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java index 9423d3c17c98a..5dbbba570b758 100644 --- a/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java @@ -45,13 +45,13 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.StoredFields; import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java b/server/src/test/java/org/opensearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java index e6a621037ef6d..b907d739a435b 100644 --- a/server/src/test/java/org/opensearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java @@ -32,18 +32,18 @@ package org.opensearch.common.lucene.search.morelikethis; -import org.apache.lucene.tests.analysis.MockAnalyzer; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queries.mlt.MoreLikeThis; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.analysis.MockTokenizer; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java index 827f9dd992294..ee71cfef7d925 100644 --- a/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/store/ByteArrayIndexInputTests.java @@ -32,6 +32,8 @@ package org.opensearch.common.lucene.store; +import org.apache.lucene.store.IndexInput; + import java.io.EOFException; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -153,4 +155,34 @@ public void testRandomAccessReads() throws IOException { // 10001001 00100101 10001001 00110000 11100111 00100100 10110001 00101110 assertEquals(-8564288273245753042L, indexInput.readLong(1)); } + + public void testReadBytesWithSlice() throws IOException { + int inputLength = randomIntBetween(100, 1000); + + byte[] input = randomUnicodeOfLength(inputLength).getBytes(StandardCharsets.UTF_8); + ByteArrayIndexInput indexInput = new ByteArrayIndexInput("test", input); + + int sliceOffset = randomIntBetween(1, inputLength - 10); + int sliceLength = randomIntBetween(2, inputLength - sliceOffset); + IndexInput slice = indexInput.slice("slice", sliceOffset, sliceLength); + + // read a byte from sliced index input and verify if the read value is correct + assertEquals(input[sliceOffset], slice.readByte()); + + // read few more bytes into a byte array + int bytesToRead = randomIntBetween(1, sliceLength - 1); + slice.readBytes(new byte[bytesToRead], 0, bytesToRead); + + // now try to read beyond the boundary of the slice, but within the + // boundary of the original IndexInput. We've already read few bytes + // so this is expected to fail + assertThrows(EOFException.class, () -> slice.readBytes(new byte[sliceLength], 0, sliceLength)); + + // seek to EOF and then try to read + slice.seek(sliceLength); + assertThrows(EOFException.class, () -> slice.readBytes(new byte[1], 0, 1)); + + slice.close(); + indexInput.close(); + } } diff --git a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java index 88627b45efa3c..4a68918587c23 100644 --- a/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/uid/VersionsTests.java @@ -43,10 +43,10 @@ import org.opensearch.Version; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.VersionFieldMapper; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 48a8e5799993d..de4bdcac6c2b2 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -32,21 +32,23 @@ package org.opensearch.common.network; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpInfo; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -55,6 +57,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -116,12 +119,13 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } }; - NetworkModule module = newNetworkModule(settings, plugin); + NetworkModule module = newNetworkModule(settings, null, plugin); assertSame(custom, module.getTransportSupplier()); } @@ -132,7 +136,7 @@ public void testRegisterHttpTransport() { .build(); Supplier<HttpServerTransport> custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map<String, Supplier<HttpServerTransport>> getHttpTransports( Settings settings, @@ -143,7 +147,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } @@ -151,7 +156,7 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( assertSame(custom, module.getHttpServerTransportSupplier()); settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); - NetworkModule newModule = newNetworkModule(settings); + NetworkModule newModule = newNetworkModule(settings, null); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); } @@ -165,7 +170,7 @@ public void testOverrideDefault() { Supplier<Transport> customTransport = () -> null; // content doesn't matter we check reference equality Supplier<HttpServerTransport> custom = FakeHttpTransport::new; Supplier<HttpServerTransport> def = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map<String, Supplier<Transport>> getTransports( Settings settings, @@ -173,7 +178,8 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -188,7 +194,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -208,7 +215,7 @@ public void testDefaultKeys() { Supplier<HttpServerTransport> custom = FakeHttpTransport::new; Supplier<HttpServerTransport> def = FakeHttpTransport::new; Supplier<Transport> customTransport = () -> null; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public Map<String, Supplier<Transport>> getTransports( Settings settings, @@ -216,7 +223,8 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -231,7 +239,8 @@ public Map<String, Supplier<HttpServerTransport>> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map<String, Supplier<HttpServerTransport>> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -265,7 +274,7 @@ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( return actualHandler; } }; - NetworkModule module = newNetworkModule(settings, new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, new NetworkPlugin() { @Override public List<TransportInterceptor> getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -287,7 +296,7 @@ public List<TransportInterceptor> getTransportInterceptors( assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); NullPointerException nullPointerException = expectThrows(NullPointerException.class, () -> { - newNetworkModule(settings, new NetworkPlugin() { + newNetworkModule(settings, null, new NetworkPlugin() { @Override public List<TransportInterceptor> getTransportInterceptors( NamedWriteableRegistry namedWriteableRegistry, @@ -301,7 +310,202 @@ public List<TransportInterceptor> getTransportInterceptors( assertEquals("interceptor must not be null", nullPointerException.getMessage()); } - private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugins) { + public void testRegisterCoreInterceptor() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List<TransportInterceptor> coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(0, called.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertTrue(transportInterceptor instanceof NetworkModule.CompositeTransportInterceptor); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 1); + assertSame(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.get(0), interceptor); + } + + public void testInterceptorOrder() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler + ) { + called1.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + List<TransportInterceptor> coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List<TransportInterceptor> getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + assertEquals(1, called.get()); + assertEquals(1, called1.get()); + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + assertEquals(2, called.get()); + assertEquals(2, called1.get()); + } + + public void testInterceptorOrderException() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); + AtomicInteger called = new AtomicInteger(0); + AtomicInteger called1 = new AtomicInteger(0); + + TransportInterceptor interceptor = new TransportInterceptor() { + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler + ) { + called.incrementAndGet(); + if ("foo/bar/boom".equals(action)) { + assertTrue(forceExecution); + } else { + assertFalse(forceExecution); + } + return actualHandler; + } + }; + + TransportInterceptor interceptor1 = new TransportInterceptor() { + @Override + public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler<T> actualHandler + ) { + called1.incrementAndGet(); + throw new RuntimeException("Handler Invoke Failed"); + } + }; + + List<TransportInterceptor> coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor1); + + NetworkModule module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List<TransportInterceptor> getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor); + } + }); + + TransportInterceptor transportInterceptor = module.getTransportInterceptor(); + assertEquals(((NetworkModule.CompositeTransportInterceptor) transportInterceptor).transportInterceptors.size(), 2); + + assertEquals(0, called.get()); + assertEquals(0, called1.get()); + try { + transportInterceptor.interceptHandler("foo/bar/boom", null, true, null); + } catch (Exception e) { + assertEquals(1, called.get()); + assertEquals(1, called1.get()); + } + + coreTransportInterceptors = new ArrayList<>(); + coreTransportInterceptors.add(interceptor); + module = newNetworkModule(settings, coreTransportInterceptors, new NetworkPlugin() { + @Override + public List<TransportInterceptor> getTransportInterceptors( + NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext + ) { + assertNotNull(threadContext); + return Collections.singletonList(interceptor1); + } + }); + + transportInterceptor = module.getTransportInterceptor(); + + try { + transportInterceptor.interceptHandler("foo/baz/boom", null, false, null); + } catch (Exception e) { + assertEquals(1, called.get()); + assertEquals(2, called1.get()); + } + } + + private NetworkModule newNetworkModule( + Settings settings, + List<TransportInterceptor> coreTransportInterceptors, + NetworkPlugin... plugins + ) { return new NetworkModule( settings, Arrays.asList(plugins), @@ -313,7 +517,9 @@ private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugi xContentRegistry(), null, new NullDispatcher(), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE, + coreTransportInterceptors ); } } diff --git a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java index e366972feeaf2..2f0618ee299b4 100644 --- a/server/src/test/java/org/opensearch/common/path/PathTrieTests.java +++ b/server/src/test/java/org/opensearch/common/path/PathTrieTests.java @@ -36,8 +36,10 @@ import org.opensearch.rest.RestUtils; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -286,4 +288,33 @@ public void testEscapedSlashWithinUrl() { assertThat(params.get("type"), equalTo("type")); assertThat(params.get("id"), equalTo("id")); } + + public void testRetrieveAllEmpty() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + Iterator<String> allPaths = trie.retrieveAll(); + assertFalse(allPaths.hasNext()); + } + + public void testRetrieveAll() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", "test1"); + trie.insert("{testA}/{testB}", "test2"); + trie.insert("a/{testB}", "test3"); + trie.insert("{testA}/b", "test4"); + trie.insert("{testA}/b/c", "test5"); + + Iterator<String> iterator = trie.retrieveAll(); + assertTrue(iterator.hasNext()); + List<String> paths = new ArrayList<>(); + iterator.forEachRemaining(paths::add); + assertEquals(paths, List.of("test1", "test4", "test5", "test2", "test3")); + assertFalse(iterator.hasNext()); + } + + public void testRetrieveAllWithNllValue() { + PathTrie<String> trie = new PathTrie<>(NO_DECODER); + trie.insert("{testA}", null); + Iterator<String> iterator = trie.retrieveAll(); + assertFalse(iterator.hasNext()); + } } diff --git a/server/src/test/java/org/opensearch/common/regex/RegexTests.java b/server/src/test/java/org/opensearch/common/regex/RegexTests.java index b92fcdad56d74..21d3cb2df8f61 100644 --- a/server/src/test/java/org/opensearch/common/regex/RegexTests.java +++ b/server/src/test/java/org/opensearch/common/regex/RegexTests.java @@ -96,7 +96,22 @@ public void testDoubleWildcardMatch() { assertFalse(Regex.simpleMatch("fff**ddd", "fffabcdd")); assertTrue(Regex.simpleMatch("fff*******ddd", "fffabcddd")); assertTrue(Regex.simpleMatch("fff*******ddd", "FffAbcdDd", true)); + assertFalse(Regex.simpleMatch("fff*******ddd", "FffAbcdDd", false)); assertFalse(Regex.simpleMatch("fff******ddd", "fffabcdd")); + assertTrue(Regex.simpleMatch("abCDefGH******ddd", "abCDefGHddd", false)); + assertTrue(Regex.simpleMatch("******", "a")); + assertTrue(Regex.simpleMatch("***WILDcard***", "aaaaaaaaWILDcardZZZZZZ", false)); + assertFalse(Regex.simpleMatch("***xxxxx123456789xxxxxx***", "xxxxxabcdxxxxx", false)); + assertFalse(Regex.simpleMatch("***xxxxxabcdxxxxx***", "xxxxxABCDxxxxx", false)); + assertTrue(Regex.simpleMatch("***xxxxxabcdxxxxx***", "xxxxxABCDxxxxx", true)); + assertTrue(Regex.simpleMatch("**stephenIsSuperCool**", "ItIsTrueThatStephenIsSuperCoolSoYouShouldLetThisIn", true)); + assertTrue( + Regex.simpleMatch( + "**w**X**y**Z**", + "abcdeFGHIJKLMNOPqrstuvwabcdeFGHIJKLMNOPqrstuvwXabcdeFGHIJKLMNOPqrstuvwXyabcdeFGHIJKLMNOPqrstuvwXyZ", + false + ) + ); } public void testSimpleMatch() { diff --git a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java deleted file mode 100644 index 7b87e136c5f38..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.common.rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.rounding.DateTimeUnit.HOUR_OF_DAY; -import static org.opensearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR; -import static org.opensearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.rounding.DateTimeUnit.QUARTER; -import static org.opensearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE; -import static org.opensearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR; -import static org.opensearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY; - -public class DateTimeUnitTests extends OpenSearchTestCase { - - /** - * test that we don't accidentally change enum ids - */ - public void testEnumIds() { - assertEquals(1, WEEK_OF_WEEKYEAR.id()); - assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1)); - - assertEquals(2, YEAR_OF_CENTURY.id()); - assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2)); - - assertEquals(3, QUARTER.id()); - assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3)); - - assertEquals(4, MONTH_OF_YEAR.id()); - assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4)); - - assertEquals(5, DAY_OF_MONTH.id()); - assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5)); - - assertEquals(6, HOUR_OF_DAY.id()); - assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6)); - - assertEquals(7, MINUTES_OF_HOUR.id()); - assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7)); - - assertEquals(8, SECOND_OF_MINUTE.id()); - assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java deleted file mode 100644 index 40f3224d89ff2..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.joda.time.DateTimeZone; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; - -import java.time.ZoneOffset; - -import static org.hamcrest.Matchers.is; - -public class RoundingDuelTests extends OpenSearchTestCase { - - // dont include nano/micro seconds as rounding would become zero then and throw an exception - private static final String[] ALLOWED_TIME_SUFFIXES = new String[] { "d", "h", "ms", "s", "m" }; - - public void testDuellingImplementations() { - org.opensearch.common.Rounding.DateTimeUnit randomDateTimeUnit = randomFrom(org.opensearch.common.Rounding.DateTimeUnit.values()); - org.opensearch.common.Rounding.Prepared rounding; - Rounding roundingJoda; - - if (randomBoolean()) { - rounding = org.opensearch.common.Rounding.builder(randomDateTimeUnit).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - DateTimeUnit dateTimeUnit = DateTimeUnit.resolve(randomDateTimeUnit.getId()); - roundingJoda = Rounding.builder(dateTimeUnit).timeZone(DateTimeZone.UTC).build(); - } else { - TimeValue interval = timeValue(); - rounding = org.opensearch.common.Rounding.builder(interval).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - roundingJoda = Rounding.builder(interval).timeZone(DateTimeZone.UTC).build(); - } - - long roundValue = randomLong(); - assertThat(roundingJoda.round(roundValue), is(rounding.round(roundValue))); - } - - static TimeValue timeValue() { - return TimeValue.parseTimeValue(randomIntBetween(1, 1000) + randomFrom(ALLOWED_TIME_SUFFIXES), "settingName"); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java deleted file mode 100644 index 0f9aa51b7b26e..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java +++ /dev/null @@ -1,822 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.Rounding.TimeIntervalRounding; -import org.opensearch.common.rounding.Rounding.TimeUnitRounding; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeMatcher; -import org.joda.time.DateTime; -import org.joda.time.DateTimeConstants; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.startsWith; - -public class TimeZoneRoundingTests extends OpenSearchTestCase { - - public void testUTCTimeUnitRounding() { - Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.QUARTER).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-04-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T01:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2013-01-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:01:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:01.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:00:01.000Z"), tz)); - } - - public void testUTCIntervalRounding() { - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-05T00:00:00.000Z")), isDate(time("2009-02-07T00:00:00.000Z"), tz)); - } - - /** - * test TimeIntervalRounding, (interval < 12h) with time zone shift - */ - public void testTimeIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-1); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T13:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T13:00:00.000Z")), isDate(time("2009-02-03T19:00:00.000Z"), tz)); - } - - /** - * test DayIntervalRounding, (interval >= 12h) with time zone shift - */ - public void testDayIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-8); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz)); - } - - public void testDayRounding() { - int timezoneOffset = -2; - Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)).build(); - assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis())); - assertThat( - tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), - equalTo(TimeValue.timeValueHours(-timezoneOffset).millis()) - ); - - DateTimeZone tz = DateTimeZone.forID("-08:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z"))); - - // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone - tz = DateTimeZone.forID("-02:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - - // date in Feb-3rd, also in -02:00 timezone - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz)); - } - - public void testTimeRounding() { - // hour unit - DateTimeZone tz = DateTimeZone.forOffsetHours(-2); - Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(0), equalTo(0L)); - assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis())); - - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T01:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T01:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - } - - public void testTimeUnitRoundingDST() { - Rounding tzRounding; - // testing savings to non savings switch - DateTimeZone cet = DateTimeZone.forID("CET"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)), isDate(time("2014-10-26T02:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet)); - - // testing non savings to savings switch - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet)); - - // testing non savings to savings switch (America/Chicago) - DateTimeZone chg = DateTimeZone.forID("America/Chicago"); - Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); - assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); - assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - // testing savings to non savings switch 2013 (America/Chicago) - assertThat(tzRounding_utc.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - - // testing savings to non savings switch 2014 (America/Chicago) - assertThat(tzRounding_utc.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - } - - /** - * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) - * chooses test dates that are exactly on or close to offset changes (e.g. - * DST) in the chosen time zone. - * - * It rounds the test date down and up and performs various checks on the - * rounding unit interval that is defined by this. Assumptions tested are - * described in - * {@link #assertInterval(long, long, long, Rounding, DateTimeZone)} - */ - public void testRoundingRandom() { - for (int i = 0; i < 1000; ++i) { - DateTimeUnit timeUnit = randomTimeUnit(); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis(); - if (randomBoolean()) { - nastyDate(date, tz, unitMillis); - } - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - - assertInterval(roundedDate, date, nextRoundingValue, rounding, tz); - - // check correct unit interval width for units smaller than a day, they should be fixed size except for transitions - if (unitMillis <= DateTimeConstants.MILLIS_PER_DAY) { - // if the interval defined didn't cross timezone offset transition, it should cover unitMillis width - if (tz.getOffset(roundedDate - 1) == tz.getOffset(nextRoundingValue + 1)) { - assertThat( - "unit interval width not as expected for [" + timeUnit + "], [" + tz + "] at " + new DateTime(roundedDate), - nextRoundingValue - roundedDate, - equalTo(unitMillis) - ); - } - } - } - } - - /** - * To be even more nasty, go to a transition in the selected time zone. - * In one third of the cases stay there, otherwise go half a unit back or forth - */ - private static long nastyDate(long initialDate, DateTimeZone timezone, long unitMillis) { - long date = timezone.nextTransition(initialDate); - if (randomBoolean()) { - return date + (randomLong() % unitMillis); // positive and negative offset possible - } else { - return date; - } - } - - /** - * test DST end with interval rounding - * CET: 25 October 2015, 03:00:00 clocks were turned backward 1 hour to 25 October 2015, 02:00:00 local standard time - */ - public void testTimeIntervalCET_DST_End() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+02:00")), isDate(time("2015-10-25T02:20:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+02:00")), isDate(time("2015-10-25T02:40:00+02:00"), tz)); - // after DST shift - assertThat(rounding.round(time("2015-10-25T02:15:00+01:00")), isDate(time("2015-10-25T02:00:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+01:00")), isDate(time("2015-10-25T02:20:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+01:00")), isDate(time("2015-10-25T02:40:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T03:15:00+01:00")), isDate(time("2015-10-25T03:00:00+01:00"), tz)); - } - - /** - * test DST start with interval rounding - * CET: 27 March 2016, 02:00:00 clocks were turned forward 1 hour to 27 March 2016, 03:00:00 local daylight time - */ - public void testTimeIntervalCET_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - // test DST start - assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:15:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:35:00+02:00")), isDate(time("2016-03-27T03:20:00+02:00"), tz)); - } - - /** - * test DST start with offset not fitting interval, e.g. Asia/Kathmandu - * adding 15min on 1986-01-01T00:00:00 the interval from - * 1986-01-01T00:15:00+05:45 to 1986-01-01T00:20:00+05:45 to only be 5min - * long - */ - public void testTimeInterval_Kathmandu_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz)); - assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz)); - assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20))); - assertThat(rounding.round(time("1986-01-01T00:26:00+05:45")), isDate(time("1986-01-01T00:20:00+05:45"), tz)); - assertThat(time("1986-01-01T00:20:00+05:45") - time("1986-01-01T00:15:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(5))); - assertThat(rounding.round(time("1986-01-01T00:46:00+05:45")), isDate(time("1986-01-01T00:40:00+05:45"), tz)); - assertThat(time("1986-01-01T00:40:00+05:45") - time("1986-01-01T00:20:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(20))); - } - - /** - * Special test for intervals that don't fit evenly into rounding interval. - * In this case, when interval crosses DST transition point, rounding in local - * time can land in a DST gap which results in wrong UTC rounding values. - */ - public void testIntervalRounding_NotDivisibleInteval() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.MINUTES.toMillis(14); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:59:00+01:00")), isDate(time("2016-03-27T01:58:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:05:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:12:00+02:00")), isDate(time("2016-03-27T03:08:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:25:00+02:00")), isDate(time("2016-03-27T03:22:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:39:00+02:00")), isDate(time("2016-03-27T03:36:00+02:00"), tz)); - } - - /** - * Test for half day rounding intervals scrossing DST. - */ - public void testIntervalRounding_HalfDay_DST() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.HOURS.toMillis(12); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:00:00+01:00")), isDate(time("2016-03-27T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T13:00:00+02:00")), isDate(time("2016-03-27T12:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T01:00:00+02:00")), isDate(time("2016-03-28T00:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T13:00:00+02:00")), isDate(time("2016-03-28T12:00:00+02:00"), tz)); - } - - /** - * randomized test on {@link TimeIntervalRounding} with random interval and time zone offsets - */ - public void testIntervalRoundingRandom() { - for (int i = 0; i < 1000; i++) { - TimeUnit unit = randomFrom(new TimeUnit[] { TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS }); - long interval = unit.toMillis(randomIntBetween(1, 365)); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - if (randomBoolean()) { - mainDate = nastyDate(mainDate, tz, interval); - } - // check two intervals around date - long previousRoundedValue = Long.MIN_VALUE; - for (long date = mainDate - 2 * interval; date < mainDate + 2 * interval; date += interval / 2) { - try { - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate))); - assertThat("Rounded value smaller or equal than unrounded", roundedDate, lessThanOrEqualTo(date)); - assertThat( - "Values smaller than rounded value should round further down", - rounding.round(roundedDate - 1), - lessThan(roundedDate) - ); - assertThat("Rounding should be >= previous rounding value", roundedDate, greaterThanOrEqualTo(previousRoundedValue)); - - if (tz.isFixed()) { - assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate)); - assertThat( - "NextRounding value should be interval from rounded value", - nextRoundingValue - roundedDate, - equalTo(interval) - ); - assertThat( - "NextRounding value should be a rounded date", - nextRoundingValue, - equalTo(rounding.round(nextRoundingValue)) - ); - } - previousRoundedValue = roundedDate; - } catch (AssertionError e) { - logger.error("Rounding error at {}, timezone {}, interval: {},", new DateTime(date, tz), tz, interval); - throw e; - } - } - } - } - - /** - * Test that rounded values are always greater or equal to last rounded value if date is increasing. - * The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms - */ - public void testIntervalRoundingMonotonic_CET() { - long interval = TimeUnit.MINUTES.toMillis(45); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - List<Tuple<String, String>> expectedDates = new ArrayList<>(); - // first date is the date to be rounded, second the expected result - expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00")); - - long previousDate = Long.MIN_VALUE; - for (Tuple<String, String> dates : expectedDates) { - final long roundedDate = rounding.round(time(dates.v1())); - assertThat(roundedDate, isDate(time(dates.v2()), tz)); - assertThat(roundedDate, greaterThanOrEqualTo(previousDate)); - previousDate = roundedDate; - } - // here's what this means for interval widths - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00")); - } - - /** - * special test for DST switch from #9491 - */ - public void testAmbiguousHoursAfterDSTSwitch() { - Rounding tzRounding; - final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz)); - // the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here - assertThat(time("2014-10-26T03:00:00+03:00"), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+02:00")), isDate(time("2014-10-26T01:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - - // Day interval - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz)); - // Day of switching DST on -> off - assertThat(tzRounding.round(time("2014-10-26T17:00:00", tz)), isDate(time("2014-10-26T00:00:00", tz), tz)); - // Day of switching DST off -> on - assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz)); - - // Month interval - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz)); - - // Year interval - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz)); - - // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491) - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz)); - } - - /** - * test for #10025, strict local to UTC conversion can cause joda exceptions - * on DST start - */ - public void testLenientConversionDST() { - DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); - long start = time("2014-10-18T20:50:00.000", tz); - long end = time("2014-10-19T01:00:00.000", tz); - Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); - Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz); - for (long time = start; time < end; time = time + 60000) { - assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); - assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); - } - } - - public void testEdgeCasesTransition() { - { - // standard +/-1 hour DST transition, CET - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // 29 Mar 2015 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 1 hour to 03:00:00 - assertInterval(time("2015-03-29T00:00:00.000+01:00"), time("2015-03-29T01:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T01:00:00.000+01:00"), time("2015-03-29T03:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T03:00:00.000+02:00"), time("2015-03-29T04:00:00.000+02:00"), rounding, 60, tz); - - // 25 Oct 2015 - Daylight Saving Time Ended - // at 03:00:00 clocks were turned backward 1 hour to 02:00:00 - assertInterval(time("2015-10-25T01:00:00.000+02:00"), time("2015-10-25T02:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+02:00"), time("2015-10-25T02:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+01:00"), time("2015-10-25T03:00:00.000+01:00"), rounding, 60, tz); - } - - { - // time zone "Asia/Kathmandu" - // 1 Jan 1986 - Time Zone Change (IST → NPT), at 00:00:00 clocks were turned forward 00:15 minutes - // - // hour rounding is stable before 1985-12-31T23:00:00.000 and after 1986-01-01T01:00:00.000+05:45 - // the interval between is 105 minutes long because the hour after transition starts at 00:15 - // which is not a round value for hourly rounding - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz); - assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz); - assertInterval(time("1986-01-01T01:00:00.000+05:45"), time("1986-01-01T02:00:00.000+05:45"), rounding, 60, tz); - } - - { - // time zone "Australia/Lord_Howe" - // 3 Mar 1991 - Daylight Saving Time Ended - // at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz); - assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz); - assertInterval(time("1991-03-03T02:00:00.000+10:30"), time("1991-03-03T03:00:00.000+10:30"), rounding, 60, tz); - - // 27 Oct 1991 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 0:30 hours to 02:30:00 - assertInterval(time("1991-10-27T00:00:00.000+10:30"), time("1991-10-27T01:00:00.000+10:30"), rounding, 60, tz); - // the interval containing the switch time is 90 minutes long - assertInterval(time("1991-10-27T01:00:00.000+10:30"), time("1991-10-27T03:00:00.000+11:00"), rounding, 90, tz); - assertInterval(time("1991-10-27T03:00:00.000+11:00"), time("1991-10-27T04:00:00.000+11:00"), rounding, 60, tz); - } - - { - // time zone "Pacific/Chatham" - // 5 Apr 2015 - Daylight Saving Time Ended - // at 03:45:00 clocks were turned backward 1 hour to 02:45:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+12:45"), time("2015-04-05T04:00:00.000+12:45"), rounding, 60, tz); - - // 27 Sep 2015 - Daylight Saving Time Started - // at 02:45:00 clocks were turned forward 1 hour to 03:45:00 - - assertInterval(time("2015-09-27T01:00:00.000+12:45"), time("2015-09-27T02:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T02:00:00.000+12:45"), time("2015-09-27T04:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T04:00:00.000+13:45"), time("2015-09-27T05:00:00.000+13:45"), rounding, 60, tz); - } - } - - public void testDST_Europe_Rome() { - // time zone "Europe/Rome", rounding to days. Rome had two midnights on the day the clocks went back in 1978, and - // timeZone.convertLocalToUTC() gives the later of the two because Rome is east of UTC, whereas we want the earlier. - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Europe/Rome"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - { - long timeBeforeFirstMidnight = time("1978-09-30T23:59:00+02:00"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - - { - long timeBetweenMidnights = time("1978-10-01T00:30:00+02:00"); - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - } - - { - long timeAfterSecondMidnight = time("1978-10-01T00:30:00+01:00"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - - long prevFloor = rounding.round(floor - 1); - assertThat(prevFloor, lessThan(floor)); - assertThat(prevFloor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - } - - /** - * Test for a time zone whose days overlap because the clocks are set back across midnight at the end of DST. - */ - public void testDST_America_St_Johns() { - // time zone "America/St_Johns", rounding to days. - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("America/St_Johns"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - // 29 October 2006 - Daylight Saving Time ended, changing the UTC offset from -02:30 to -03:30. - // This happened at 02:31 UTC, 00:01 local time, so the clocks were set back 1 hour to 23:01 on the 28th. - // This means that 2006-10-29 has _two_ midnights, one in the -02:30 offset and one in the -03:30 offset. - // Only the first of these is considered "rounded". Moreover, the extra time between 23:01 and 23:59 - // should be considered as part of the 28th even though it comes after midnight on the 29th. - - { - // Times before the first midnight should be rounded up to the first midnight. - long timeBeforeFirstMidnight = time("2006-10-28T23:30:00.000-02:30"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeBeforeFirstMidnight); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - assertInterval(floor, timeBeforeFirstMidnight, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the later day should be rounded down to the later day's midnight. - long timeBetweenMidnights = time("2006-10-29T00:00:30.000-02:30"); - // (this is halfway through the last minute before the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the earlier day should be rounded down to the earlier day's midnight. - long timeBetweenMidnights = time("2006-10-28T23:30:00.000-03:30"); - // (this is halfway through the hour after the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times after the second midnight should be rounded down to the first midnight. - long timeAfterSecondMidnight = time("2006-10-29T06:00:00.000-03:30"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeAfterSecondMidnight); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - assertInterval(floor, timeAfterSecondMidnight, ceiling, rounding, tz); - } - } - - /** - * tests for dst transition with overlaps and day roundings. - */ - public void testDST_END_Edgecases() { - // First case, dst happens at 1am local time, switching back one hour. - // We want the overlapping hour to count for the next day, making it a 25h interval - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Atlantic/Azores"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 29 October 2000, 01:00:00 clocks were turned backward 1 hour - // to Sunday, 29 October 2000, 00:00:00 local standard time instead - // which means there were two midnights that day. - - long midnightBeforeTransition = time("2000-10-29T00:00:00", tz); - long midnightOfTransition = time("2000-10-29T00:00:00-01:00"); - assertEquals(60L * 60L * 1000L, midnightOfTransition - midnightBeforeTransition); - long nextMidnight = time("2000-10-30T00:00:00", tz); - - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - assertThat(rounding.round(time("2000-10-29T06:00:00-01:00")), isDate(time("2000-10-29T00:00:00Z"), tz)); - - // Second case, dst happens at 0am local time, switching back one hour to 23pm local time. - // We want the overlapping hour to count for the previous day here - - tz = DateTimeZone.forID("America/Lima"); - rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 1 April 1990, 00:00:00 clocks were turned backward 1 hour to - // Saturday, 31 March 1990, 23:00:00 local standard time instead - - midnightBeforeTransition = time("1990-03-31T00:00:00.000-04:00"); - nextMidnight = time("1990-04-01T00:00:00.000-05:00"); - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - // make sure the next interval is 24h long again - long midnightAfterTransition = time("1990-04-01T00:00:00.000-05:00"); - nextMidnight = time("1990-04-02T00:00:00.000-05:00"); - assertInterval(midnightAfterTransition, nextMidnight, rounding, 24 * 60, tz); - } - - /** - * Test that time zones are correctly parsed. There is a bug with - * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373) - */ - public void testsTimeZoneParsing() { - final DateTime expected = new DateTime(2016, 11, 10, 5, 37, 59, randomDateTimeZone()); - - // Formatter used to print and parse the sample date. - // Printing the date works but parsing it back fails - // with Joda 2.9.4 - DateTimeFormatter formatter = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss " + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'")); - - String dateTimeAsString = formatter.print(expected); - assertThat(dateTimeAsString, startsWith("2016-11-10T05:37:59 ")); - - DateTime parsedDateTime = formatter.parseDateTime(dateTimeAsString); - assertThat(parsedDateTime.getZone(), equalTo(expected.getZone())); - } - - private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { - assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); - assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded); - } - - /** - * perform a number on assertions and checks on {@link TimeUnitRounding} intervals - * @param rounded the expected low end of the rounding interval - * @param unrounded a date in the interval to be checked for rounding - * @param nextRoundingValue the expected upper end of the rounding interval - * @param rounding the rounding instance - */ - private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding, DateTimeZone tz) { - assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz)); - assertThat("rounded value smaller or equal than unrounded" + rounding, rounded, lessThanOrEqualTo(unrounded)); - assertThat("values less than rounded should round further down" + rounding, rounding.round(rounded - 1), lessThan(rounded)); - assertThat("nextRounding value should be a rounded date", rounding.round(nextRoundingValue), isDate(nextRoundingValue, tz)); - assertThat( - "values above nextRounding should round down there", - rounding.round(nextRoundingValue + 1), - isDate(nextRoundingValue, tz) - ); - - if (isTimeWithWellDefinedRounding(tz, unrounded)) { - assertThat("nextRounding value should be greater than date" + rounding, nextRoundingValue, greaterThan(unrounded)); - - long dateBetween = dateBetween(rounded, nextRoundingValue); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round down to roundedDate", - rounding.round(dateBetween), - isDate(rounded, tz) - ); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round up to nextRoundingValue", - rounding.nextRoundingValue(dateBetween), - isDate(nextRoundingValue, tz) - ); - } - } - - private static boolean isTimeWithWellDefinedRounding(DateTimeZone tz, long t) { - if (tz.getID().equals("America/St_Johns") - || tz.getID().equals("America/Goose_Bay") - || tz.getID().equals("America/Moncton") - || tz.getID().equals("Canada/Newfoundland")) { - - // Clocks went back at 00:01 between 1987 and 2010, causing overlapping days. - // These timezones are otherwise uninteresting, so just skip this period. - - return t <= time("1987-10-01T00:00:00Z") || t >= time("2010-12-01T00:00:00Z"); - } - - if (tz.getID().equals("Antarctica/Casey")) { - - // Clocks went back 3 hours at 02:00 on 2010-03-05, causing overlapping days. - - return t <= time("2010-03-03T00:00:00Z") || t >= time("2010-03-07T00:00:00Z"); - } - - return true; - } - - private static long dateBetween(long lower, long upper) { - long dateBetween = randomLongBetween(lower, upper - 1); - assert lower <= dateBetween && dateBetween < upper; - return dateBetween; - } - - private static DateTimeUnit randomTimeUnit() { - byte id = (byte) randomIntBetween(1, 8); - return DateTimeUnit.resolve(id); - } - - private static long time(String time) { - return time(time, DateTimeZone.UTC); - } - - private static long time(String time, DateTimeZone zone) { - return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time); - } - - private static Matcher<Long> isDate(final long expected, DateTimeZone tz) { - return new TypeSafeMatcher<Long>() { - @Override - public boolean matchesSafely(final Long item) { - return expected == item.longValue(); - } - - @Override - public void describeTo(Description description) { - description.appendText(new DateTime(expected, tz) + " [" + expected + "] "); - } - - @Override - protected void describeMismatchSafely(final Long actual, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(new DateTime(actual, tz) + " [" + actual + "]"); - } - }; - } -} diff --git a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java index e7873723bec22..86bc124007829 100644 --- a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java @@ -37,15 +37,16 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import org.mockito.Mockito; -import org.mockito.stubbing.Answer; import java.util.Arrays; import java.util.Locale; import java.util.concurrent.atomic.AtomicReference; -import static org.mockito.Mockito.mock; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class ConsistentSettingsServiceTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java index cca8deda2964d..9358013826a1c 100644 --- a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java @@ -8,25 +8,24 @@ package org.opensearch.common.settings; -import java.util.ArrayList; -import java.util.List; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.config.Property; +import org.opensearch.common.logging.Loggers; +import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Assert; import org.junit.Before; -import org.opensearch.common.logging.Loggers; -import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.List; public class InsecureSettingTests extends OpenSearchTestCase { private List<String> rootLogMsgs = new ArrayList<>(); private AbstractAppender rootAppender; - protected void assertSettingWarning() { + private void assertSettingWarning() { assertWarnings( "[setting.name] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." ); @@ -51,13 +50,14 @@ public void append(LogEvent event) { InsecureSettingTests.this.rootLogMsgs.add(message); } }; - Loggers.addAppender(LogManager.getRootLogger(), rootAppender); rootAppender.start(); + Loggers.addAppender(LogManager.getLogger(SecureSetting.class), rootAppender); } @After public void removeInsecureSettingsAppender() { - Loggers.removeAppender(LogManager.getRootLogger(), rootAppender); + Loggers.removeAppender(LogManager.getLogger(SecureSetting.class), rootAppender); + rootAppender.stop(); } public void testShouldRaiseExceptionByDefault() { diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index 2c7251818e2bc..95db7c2cfacaa 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -33,8 +33,8 @@ package org.opensearch.common.settings; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; @@ -43,9 +43,9 @@ import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.test.OpenSearchTestCase; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.notNullValue; public class MemorySizeSettingsTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/settings/PropertyPlaceholderTests.java b/server/src/test/java/org/opensearch/common/settings/PropertyPlaceholderTests.java index c3c8666b83959..c4fbb27cb80e0 100644 --- a/server/src/test/java/org/opensearch/common/settings/PropertyPlaceholderTests.java +++ b/server/src/test/java/org/opensearch/common/settings/PropertyPlaceholderTests.java @@ -32,11 +32,11 @@ package org.opensearch.common.settings; +import org.opensearch.test.OpenSearchTestCase; + import java.util.LinkedHashMap; import java.util.Map; -import org.opensearch.test.OpenSearchTestCase; - import static org.hamcrest.Matchers.is; public class PropertyPlaceholderTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index 3d5a5090cdc82..c6da96b521276 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -36,9 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.AbstractScopedSettings.SettingUpdater; import org.opensearch.common.settings.Setting.ByteSizeValueParser; @@ -51,13 +49,15 @@ import org.opensearch.common.settings.Setting.MinTimeValueParser; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Setting.RegexValidator; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexSettings; import org.opensearch.monitor.jvm.JvmInfo; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import java.util.Arrays; @@ -909,6 +909,18 @@ public void testDynamicKeySetting() { } } + public void testAffixKeySettingWithDynamicPrefix() { + Setting.AffixSetting<Boolean> setting = Setting.suffixKeySetting( + "enable", + (key) -> Setting.boolSetting(key, false, Property.NodeScope) + ); + Setting<Boolean> concreteSetting = setting.getConcreteSettingForNamespace("foo.bar"); + assertEquals("foo.bar.enable", concreteSetting.getKey()); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> setting.getConcreteSettingForNamespace("foo.")); + assertEquals("key [foo..enable] must match [*.enable] but didn't.", ex.getMessage()); + } + public void testAffixKeySetting() { Setting<Boolean> setting = Setting.affixKeySetting("foo.", "enable", (key) -> Setting.boolSetting(key, false, Property.NodeScope)); assertTrue(setting.hasComplexMatcher()); diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsFilterTests.java index 5dfb5c07ba081..75b6078fc4ea1 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsFilterTests.java @@ -32,15 +32,14 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; -import org.opensearch.common.Strings; +import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.RestRequest; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import java.io.IOException; @@ -163,7 +162,7 @@ private void testFiltering(Settings source, Settings filtered, String... pattern xContentBuilder.startObject(); source.toXContent(xContentBuilder, request); xContentBuilder.endObject(); - String filteredSettingsString = Strings.toString(xContentBuilder); + String filteredSettingsString = xContentBuilder.toString(); filteredSettings = Settings.builder().loadFromSource(filteredSettingsString, xContentBuilder.contentType()).build(); assertThat(filteredSettings, equalTo(filtered)); } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index 4490f6b39996f..66c9801d16598 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -34,11 +34,11 @@ import org.opensearch.common.inject.ModuleTestCase; import org.opensearch.common.settings.Setting.Property; -import org.hamcrest.Matchers; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.search.SearchService; import org.opensearch.test.FeatureFlagSetter; +import org.hamcrest.Matchers; import java.util.Arrays; @@ -286,24 +286,9 @@ public void testDynamicIndexSettingsRegistration() { } public void testConcurrentSegmentSearchClusterSettings() { - // Test that we throw an exception without the feature flag - Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(); - SettingsException ex = expectThrows(SettingsException.class, () -> new SettingsModule(settings)); - assertEquals( - "unknown setting [" - + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey() - + "] please check that any required plugins are installed, or check the breaking " - + "changes documentation for removed settings", - ex.getMessage() - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); boolean settingValue = randomBoolean(); - Settings settingsWithFeatureFlag = Settings.builder() - .put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue) - .build(); - SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); + Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build(); + SettingsModule settingsModule = new SettingsModule(settings); assertEquals(settingValue, SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settingsModule.getSettings())); } @@ -311,28 +296,33 @@ public void testConcurrentSegmentSearchIndexSettings() { Settings.Builder target = Settings.builder().put(Settings.EMPTY); Settings.Builder update = Settings.builder(); - // Test that we throw an exception without the feature flag SettingsModule module = new SettingsModule(Settings.EMPTY); IndexScopedSettings indexScopedSettings = module.getIndexScopedSettings(); - expectThrows( - SettingsException.class, - () -> indexScopedSettings.updateDynamicSettings( - Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), - target, - update, - "node" - ) - ); - - // Test that the settings updates correctly with the feature flag - FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); - SettingsModule moduleWithFeatureFlag = new SettingsModule(Settings.EMPTY); - IndexScopedSettings indexScopedSettingsWithFeatureFlag = moduleWithFeatureFlag.getIndexScopedSettings(); - indexScopedSettingsWithFeatureFlag.updateDynamicSettings( + indexScopedSettings.updateDynamicSettings( Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), target, update, "node" ); } + + public void testMaxSliceCountClusterSettingsForConcurrentSearch() { + int settingValue = randomIntBetween(0, 10); + Settings settings = Settings.builder() + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) + .build(); + SettingsModule settingsModule = new SettingsModule(settings); + assertEquals( + settingValue, + (int) SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.get(settingsModule.getSettings()) + ); + + // Test that negative value is not allowed + settingValue = -1; + final Settings settings_2 = Settings.builder() + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) + .build(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings_2)); + assertTrue(iae.getMessage().contains(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())); + } } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 0c6352d118be2..669d40f40bb2c 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -34,22 +34,23 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.settings.SecureString; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -64,7 +65,9 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -93,6 +96,15 @@ public void testReplacePropertiesPlaceholderSystemPropertyList() { assertThat(settings.getAsList("setting1"), contains(hostname, hostip)); } + public void testReplacePropertiesPlaceholderSystemPropertyEmptyList() { + final Settings settings = Settings.builder() + .put("setting1", "${HOSTNAMES}") + .replacePropertyPlaceholders(name -> name.equals("HOSTNAMES") ? "[]" : null) + .build(); + assertThat(settings.getAsList("setting1"), empty()); + assertThat(settings.get("setting1"), equalTo("[]")); + } + public void testReplacePropertiesPlaceholderSystemVariablesHaveNoEffect() { final String value = System.getProperty("java.home"); assertNotNull(value); @@ -301,6 +313,20 @@ public void testPrefixNormalization() { assertThat(settings.get("foo.test"), equalTo("test")); } + public void testPrefixNormalizationArchived() { + Settings settings = Settings.builder().put("archived.foo.bar", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.bar"), nullValue()); + assertThat(settings.get("archived.foo.bar"), equalTo("baz")); + + settings = Settings.builder().put("archived.foo.*", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.*"), nullValue()); + assertThat(settings.get("archived.foo.*"), equalTo("baz")); + } + public void testFilteredMap() { Settings.Builder builder = Settings.builder(); builder.put("a", "a1"); @@ -515,7 +541,7 @@ public void testToAndFromXContent() throws IOException { .putNull("foo.null.baz") .build(); final boolean flatSettings = randomBoolean(); - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); settings.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "" + flatSettings))); builder.endObject(); @@ -548,24 +574,24 @@ public void testSimpleJsonSettings() throws Exception { public void testToXContent() throws IOException { // this is just terrible but it's the existing behavior! Settings test = Settings.builder().putList("foo.bar", "1", "2", "3").put("foo.bar.baz", "test").build(); - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startObject(); test.toXContent(builder, new ToXContent.MapParams(Collections.emptyMap())); builder.endObject(); - assertEquals("{\"foo\":{\"bar.baz\":\"test\",\"bar\":[\"1\",\"2\",\"3\"]}}", Strings.toString(builder)); + assertEquals("{\"foo\":{\"bar.baz\":\"test\",\"bar\":[\"1\",\"2\",\"3\"]}}", builder.toString()); test = Settings.builder().putList("foo.bar", "1", "2", "3").build(); - builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startObject(); test.toXContent(builder, new ToXContent.MapParams(Collections.emptyMap())); builder.endObject(); - assertEquals("{\"foo\":{\"bar\":[\"1\",\"2\",\"3\"]}}", Strings.toString(builder)); + assertEquals("{\"foo\":{\"bar\":[\"1\",\"2\",\"3\"]}}", builder.toString()); - builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startObject(); test.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true"))); builder.endObject(); - assertEquals("{\"foo.bar\":[\"1\",\"2\",\"3\"]}", Strings.toString(builder)); + assertEquals("{\"foo.bar\":[\"1\",\"2\",\"3\"]}", builder.toString()); } public void testLoadEmptyStream() throws IOException { @@ -589,6 +615,18 @@ public void testSimpleYamlSettings() throws Exception { assertThat(settings.getAsList("test1.test3").size(), equalTo(2)); assertThat(settings.getAsList("test1.test3").get(0), equalTo("test3-1")); assertThat(settings.getAsList("test1.test3").get(1), equalTo("test3-2")); + assertThat(settings.getAsList("test1.test4"), empty()); + } + + public void testYamlPlaceholder() throws IOException { + try (InputStream in = new ByteArrayInputStream("hosts: ${HOSTNAMES}".getBytes(StandardCharsets.UTF_8))) { + Settings settings = Settings.builder() + .loadFromStream("foo.yml", in, false) + .replacePropertyPlaceholders(name -> name.equals("HOSTNAMES") ? "[\"h1\", \"h2\"]" : null) + .build(); + assertThat(settings.getAsList("hosts"), hasSize(2)); + assertThat(settings.getAsList("hosts"), containsInAnyOrder("h1", "h2")); + } } public void testYamlLegacyList() throws IOException { @@ -708,18 +746,18 @@ public void testSetByTimeUnit() { public void testProcessSetting() throws IOException { Settings test = Settings.builder().put("ant", "value1").put("ant.bee.cat", "value2").put("bee.cat", "value3").build(); - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startObject(); test.toXContent(builder, new ToXContent.MapParams(Collections.emptyMap())); builder.endObject(); - assertEquals("{\"ant.bee\":{\"cat\":\"value2\"},\"ant\":\"value1\",\"bee\":{\"cat\":\"value3\"}}", Strings.toString(builder)); + assertEquals("{\"ant.bee\":{\"cat\":\"value2\"},\"ant\":\"value1\",\"bee\":{\"cat\":\"value3\"}}", builder.toString()); test = Settings.builder().put("ant", "value1").put("ant.bee.cat", "value2").put("ant.bee.cat.dog.ewe", "value3").build(); - builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder = XContentBuilder.builder(MediaTypeRegistry.JSON.xContent()); builder.startObject(); test.toXContent(builder, new ToXContent.MapParams(Collections.emptyMap())); builder.endObject(); - assertEquals("{\"ant.bee\":{\"cat.dog\":{\"ewe\":\"value3\"},\"cat\":\"value2\"},\"ant\":\"value1\"}", Strings.toString(builder)); + assertEquals("{\"ant.bee\":{\"cat.dog\":{\"ewe\":\"value3\"},\"cat\":\"value2\"},\"ant\":\"value1\"}", builder.toString()); } } diff --git a/server/src/test/java/org/opensearch/common/settings/WriteableSettingTests.java b/server/src/test/java/org/opensearch/common/settings/WriteableSettingTests.java index 804a53d687c95..391f3d3116430 100644 --- a/server/src/test/java/org/opensearch/common/settings/WriteableSettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/WriteableSettingTests.java @@ -8,15 +8,15 @@ package org.opensearch.common.settings; -import org.junit.Before; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.EnumMap; diff --git a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java index 681daf1755890..85c9919275c3a 100644 --- a/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateFormattersTests.java @@ -249,10 +249,19 @@ public void testEpochSecondParserWithFraction() { } public void testEpochMilliParsersWithDifferentFormatters() { - DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); - TemporalAccessor accessor = formatter.parse("123"); - assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); - assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||epoch_millis")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||epoch_millis"); + TemporalAccessor accessor = formatter.parse("123"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||epoch_millis")); + } } public void testParsersWithMultipleInternalFormats() throws Exception { @@ -317,6 +326,11 @@ public void testEqualsAndHashcode() { assertThat(epochMillisFormatter.hashCode(), is(DateFormatters.forPattern("epoch_millis").hashCode())); assertThat(epochMillisFormatter, sameInstance(DateFormatters.forPattern("epoch_millis"))); assertThat(epochMillisFormatter, equalTo(DateFormatters.forPattern("epoch_millis"))); + + DateFormatter rfc339Formatter = DateFormatters.forPattern("rfc3339_lenient"); + assertThat(rfc339Formatter.hashCode(), is(DateFormatters.forPattern("rfc3339_lenient").hashCode())); + assertThat(rfc339Formatter, sameInstance(DateFormatters.forPattern("rfc3339_lenient"))); + assertThat(rfc339Formatter, equalTo(DateFormatters.forPattern("rfc3339_lenient"))); } public void testSupportBackwardsJava8Format() { @@ -461,6 +475,132 @@ public void testIso8601Parsing() { formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); } + public void testRFC3339Parsing() { + DateFormatter formatter = DateFormatters.forPattern("rfc3339_lenient"); + + // timezone not allowed with just date + formatter.format(formatter.parse("2018")); + formatter.format(formatter.parse("2018-05")); + formatter.format(formatter.parse("2018-05-15")); + + formatter.format(formatter.parse("2018-05-15T17:14Z")); + formatter.format(formatter.parse("2018-05-15T17:14z")); + formatter.format(formatter.parse("2018-05-15T17:14+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14-01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14:56Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56z")); + formatter.format(formatter.parse("2018-05-15T17:14:56+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56-01:00")); + + // milliseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+01:00")); + + // microseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-01:00")); + + // nanoseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); + + // 1994-11-05T08:15:30-05:00 corresponds to November 5, 1994, 8:15:30 am, US Eastern Standard Time/ + // 1994-11-05T13:15:30Z corresponds to the same instant. + final Instant instantA = DateFormatters.from(formatter.parse("1994-11-05T08:15:30-05:00")).toInstant(); + final Instant instantB = DateFormatters.from(formatter.parse("1994-11-05T13:15:30Z")).toInstant(); + assertThat(instantA, is(instantB)); + + // Invalid dates should throw an exception + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), is("failed to parse date field [abc] with format [rfc3339_lenient]")); + // Invalid offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56-00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56-00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.+00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.+00:00] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56_00:00")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56_00:00] with format [rfc3339_lenient]")); + // No offset + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56] with format [rfc3339_lenient]")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // No end of fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.123")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.123] with format [rfc3339_lenient]")); + // Invalid fraction + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.abcZ")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.abcZ] with format [rfc3339_lenient]")); + // Invalid date + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("201805-15T17:14:56.123456+0000")); + assertThat(e.getMessage(), is("failed to parse date field [201805-15T17:14:56.123456+0000] with format [rfc3339_lenient]")); + // More than 9 digits of nanosecond resolution + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("2018-05-15T17:14:56.1234567891Z")); + assertThat(e.getMessage(), is("failed to parse date field [2018-05-15T17:14:56.1234567891Z] with format [rfc3339_lenient]")); + } + + public void testRFC3339ParserWithDifferentFormatters() { + { + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time||rfc3339_lenient"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896000L)); + assertThat(formatter.pattern(), is("strict_date_optional_time||rfc3339_lenient")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123+0100"); + assertThat(DateFormatters.from(accessor).toInstant().toEpochMilli(), is(1526400896123L)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + + { + DateFormatter formatter = DateFormatter.forPattern("rfc3339_lenient||strict_date_optional_time"); + TemporalAccessor accessor = formatter.parse("2018-05-15T17:14:56.123456789+0100"); + assertThat(DateFormatters.from(accessor).toInstant().getNano(), is(123456789)); + assertThat(formatter.pattern(), is("rfc3339_lenient||strict_date_optional_time")); + } + } + + public void testRFC3339ParserAgainstDifferentFormatters() { + DateFormatter rfc3339Formatter = DateFormatter.forPattern("rfc3339_lenient"); + { + DateFormatter isoFormatter = DateFormatter.forPattern("strict_date_optional_time"); + + assertDateTimeEquals("2018-05-15T17:14Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123-01:00", rfc3339Formatter, isoFormatter); + + assertDateTimeEquals("2018-05-15T17:14:56.123456Z", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56.123456789-01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789+01:00", rfc3339Formatter, isoFormatter); + assertDateTimeEquals("2018-05-15T17:14:56,123456789-01:00", rfc3339Formatter, isoFormatter); + } + } + public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second @@ -683,4 +823,10 @@ public void testCamelCaseDeprecation() { } } } + + void assertDateTimeEquals(String toTest, DateFormatter candidateParser, DateFormatter baselineParser) { + Instant gotInstant = DateFormatters.from(candidateParser.parse(toTest)).toInstant(); + Instant expectedInstant = DateFormatters.from(baselineParser.parse(toTest)).toInstant(); + assertThat(gotInstant, is(expectedInstant)); + } } diff --git a/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java b/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java index a177840059f8f..dae16d805efbf 100644 --- a/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java +++ b/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java @@ -34,6 +34,8 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/common/util/BigArraysTests.java b/server/src/test/java/org/opensearch/common/util/BigArraysTests.java index c973a76623452..a12967f257610 100644 --- a/server/src/test/java/org/opensearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/opensearch/common/util/BigArraysTests.java @@ -33,15 +33,15 @@ package org.opensearch.common.util; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.util.BigArray; import org.opensearch.core.common.util.ByteArray; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/common/util/BinarySearcherTests.java b/server/src/test/java/org/opensearch/common/util/BinarySearcherTests.java index bc64a4980947c..b1b29097d56bf 100644 --- a/server/src/test/java/org/opensearch/common/util/BinarySearcherTests.java +++ b/server/src/test/java/org/opensearch/common/util/BinarySearcherTests.java @@ -32,9 +32,9 @@ package org.opensearch.common.util; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasables; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/common/util/BitArrayTests.java b/server/src/test/java/org/opensearch/common/util/BitArrayTests.java index 16e89b81c695c..e88e2b20c6155 100644 --- a/server/src/test/java/org/opensearch/common/util/BitArrayTests.java +++ b/server/src/test/java/org/opensearch/common/util/BitArrayTests.java @@ -32,12 +32,12 @@ package org.opensearch.common.util; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java index 8b719283ed71d..adcec8f07f702 100644 --- a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java @@ -32,11 +32,12 @@ package org.opensearch.common.util; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; @@ -44,6 +45,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.stream.Stream; public class BytesRefHashTests extends OpenSearchTestCase { @@ -57,9 +59,13 @@ private void newHash() { if (hash != null) { hash.close(); } - // Test high load factors to make sure that collision resolution works fine - final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; - hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, randomBigArrays()); + long seed = randomLong(); + hash = new BytesRefHash( + randomIntBetween(1, 100), // random capacity + 0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution + key -> T1ha1.hash(key.bytes, key.offset, key.length, seed), + randomBigArrays() + ); } @Override @@ -68,39 +74,34 @@ public void setUp() throws Exception { newHash(); } - public void testDuel() { - final int len = randomIntBetween(1, 100000); - final BytesRef[] values = new BytesRef[len]; - for (int i = 0; i < values.length; ++i) { - values[i] = new BytesRef(randomAlphaOfLength(5)); - } - final Map<BytesRef, Integer> valueToId = new HashMap<>(); - final BytesRef[] idToValue = new BytesRef[values.length]; - final int iters = randomInt(1000000); - for (int i = 0; i < iters; ++i) { - final BytesRef value = randomFrom(values); - if (valueToId.containsKey(value)) { - assertEquals(-1 - valueToId.get(value), hash.add(value, value.hashCode())); + public void testFuzzy() { + Map<BytesRef, Long> reference = new HashMap<>(); + BytesRef[] keys = Stream.generate(() -> new BytesRef(randomAlphaOfLength(20))) + .limit(randomIntBetween(1000, 2000)) + .toArray(BytesRef[]::new); + + // Verify the behaviour of "add" and "find". + for (int i = 0; i < keys.length * 10; i++) { + BytesRef key = keys[i % keys.length]; + if (reference.containsKey(key)) { + long expectedOrdinal = reference.get(key); + assertEquals(-1 - expectedOrdinal, hash.add(key)); + assertEquals(expectedOrdinal, hash.find(key)); } else { - assertEquals(valueToId.size(), hash.add(value, value.hashCode())); - idToValue[valueToId.size()] = value; - valueToId.put(value, valueToId.size()); + assertEquals(-1, hash.find(key)); + reference.put(key, (long) reference.size()); + assertEquals((long) reference.get(key), hash.add(key)); } } - assertEquals(valueToId.size(), hash.size()); - for (final var next : valueToId.entrySet()) { - assertEquals(next.getValue().longValue(), hash.find(next.getKey(), next.getKey().hashCode())); + // Verify the behaviour of "get". + BytesRef scratch = new BytesRef(); + for (Map.Entry<BytesRef, Long> entry : reference.entrySet()) { + assertEquals(entry.getKey(), hash.get(entry.getValue(), scratch)); } - for (long i = 0; i < hash.capacity(); ++i) { - final long id = hash.id(i); - BytesRef spare = new BytesRef(); - if (id >= 0) { - hash.get(id, spare); - assertEquals(idToValue[(int) id], spare); - } - } + // Verify the behaviour of "size". + assertEquals(reference.size(), hash.size()); hash.close(); } diff --git a/server/src/test/java/org/opensearch/common/util/CollectionUtilsTests.java b/server/src/test/java/org/opensearch/common/util/CollectionUtilsTests.java index f0a0d41aa7aab..1c629bd77fe93 100644 --- a/server/src/test/java/org/opensearch/common/util/CollectionUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/util/CollectionUtilsTests.java @@ -32,6 +32,7 @@ package org.opensearch.common.util; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -45,7 +46,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; -import static org.opensearch.common.util.CollectionUtils.eagerPartition; +import static org.opensearch.core.common.util.CollectionUtils.eagerPartition; import static org.hamcrest.Matchers.containsString; public class CollectionUtilsTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/common/util/LongHashTests.java b/server/src/test/java/org/opensearch/common/util/LongHashTests.java index 295497b9f188c..3716c7ad277b3 100644 --- a/server/src/test/java/org/opensearch/common/util/LongHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/LongHashTests.java @@ -33,7 +33,7 @@ package org.opensearch.common.util; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/common/util/LongLongHashTests.java b/server/src/test/java/org/opensearch/common/util/LongLongHashTests.java index fd0f1b5bad4a7..e0b0182216c27 100644 --- a/server/src/test/java/org/opensearch/common/util/LongLongHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/LongLongHashTests.java @@ -33,7 +33,7 @@ package org.opensearch.common.util; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java b/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java index 73a6fc5d54883..dc4743189f4cf 100644 --- a/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java +++ b/server/src/test/java/org/opensearch/common/util/LongObjectHashMapTests.java @@ -33,7 +33,7 @@ package org.opensearch.common.util; import org.opensearch.common.settings.Settings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnableTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnableTests.java index d771fdbef38d6..056cfb301198d 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnableTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractLifecycleRunnableTests.java @@ -33,12 +33,13 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.SuppressLoggerChecks; -import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.test.OpenSearchTestCase; -import org.mockito.InOrder; import java.util.concurrent.Callable; +import org.mockito.InOrder; + import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractRunnableTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractRunnableTests.java index 286b90f391b3c..d288ec7726e0a 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/AbstractRunnableTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/AbstractRunnableTests.java @@ -33,10 +33,10 @@ import org.opensearch.test.OpenSearchTestCase; -import org.mockito.InOrder; - import java.util.concurrent.Callable; +import org.mockito.InOrder; + import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java index c38956f965ac3..29d7104370049 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessorTests.java @@ -8,14 +8,14 @@ package org.opensearch.common.util.concurrent; -import org.junit.After; -import org.junit.Before; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ListenableFutureTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ListenableFutureTests.java index 32807cf3b88c8..5b7da3b0f95e7 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ListenableFutureTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ListenableFutureTests.java @@ -33,8 +33,8 @@ package org.opensearch.common.util.concurrent; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java index 2063cd26a9e8e..4823ce7a238e3 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/OpenSearchExecutorsTests.java @@ -49,6 +49,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * Tests for OpenSearchExecutors and its components like OpenSearchAbortPolicy. @@ -279,6 +280,41 @@ public void testScaleDown() throws Exception { terminate(pool); } + /** + * The test case is adapted from https://bugs.openjdk.org/browse/JDK-8323659 reproducer. + */ + public void testScaleUpWithSpawningTask() throws Exception { + ThreadPoolExecutor pool = OpenSearchExecutors.newScaling( + getClass().getName() + "/" + getTestName(), + 0, + 1, + between(1, 100), + randomTimeUnit(), + OpenSearchExecutors.daemonThreadFactory("test"), + threadContext + ); + assertThat("Min property", pool.getCorePoolSize(), equalTo(0)); + assertThat("Max property", pool.getMaximumPoolSize(), equalTo(1)); + + final CountDownLatch latch = new CountDownLatch(10); + class TestTask implements Runnable { + @Override + public void run() { + latch.countDown(); + if (latch.getCount() > 0) { + pool.execute(TestTask.this); + } + } + } + pool.execute(new TestTask()); + latch.await(); + + assertThat("wrong pool size", pool.getPoolSize(), lessThanOrEqualTo(1)); + assertThat("wrong active size", pool.getActiveCount(), lessThanOrEqualTo(1)); + + terminate(pool); + } + public void testRejectionMessageAndShuttingDownFlag() throws InterruptedException { int pool = between(1, 10); int queue = between(0, 100); diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ReleasableLockTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ReleasableLockTests.java index d88a281a66d89..7f7667f4db9a5 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ReleasableLockTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ReleasableLockTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.util.concurrent; -import org.opensearch.common.unit.TimeValue; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index dfa239757513e..10669ca1a805b 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -44,11 +44,13 @@ import java.util.Map; import java.util.function.Supplier; +import org.mockito.Mockito; + +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.sameInstance; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class ThreadContextTests extends OpenSearchTestCase { @@ -740,6 +742,71 @@ public void testMarkAsSystemContext() throws IOException { assertFalse(threadContext.isSystemContext()); } + public void testSystemContextWithPropagator() { + Settings build = Settings.builder().put("request.headers.default", "1").build(); + Map<String, Object> transientHeaderMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, Object> transientHeaderTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, Object> headerMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, String> headerTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + ThreadContext threadContext = new ThreadContext(build); + ThreadContextStatePropagator mockPropagator = Mockito.mock(ThreadContextStatePropagator.class); + Mockito.when(mockPropagator.transients(transientHeaderMap, true)).thenReturn(Collections.emptyMap()); + Mockito.when(mockPropagator.transients(transientHeaderMap, false)).thenReturn(transientHeaderTransformedMap); + + Mockito.when(mockPropagator.headers(headerMap, true)).thenReturn(headerTransformedMap); + Mockito.when(mockPropagator.headers(headerMap, false)).thenReturn(headerTransformedMap); + threadContext.registerThreadContextStatePropagator(mockPropagator); + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("test_transient_propagation_key", 1); + assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); + assertEquals("bar", threadContext.getHeader("foo")); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + + public void testSerializeSystemContext() throws IOException { + Settings build = Settings.builder().put("request.headers.default", "1").build(); + Map<String, Object> transientHeaderMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, Object> transientHeaderTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, Object> headerMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map<String, String> headerTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + ThreadContext threadContext = new ThreadContext(build); + ThreadContextStatePropagator mockPropagator = Mockito.mock(ThreadContextStatePropagator.class); + Mockito.when(mockPropagator.transients(transientHeaderMap, true)).thenReturn(Collections.emptyMap()); + Mockito.when(mockPropagator.transients(transientHeaderMap, false)).thenReturn(transientHeaderTransformedMap); + + Mockito.when(mockPropagator.headers(headerMap, true)).thenReturn(headerTransformedMap); + Mockito.when(mockPropagator.headers(headerMap, false)).thenReturn(headerTransformedMap); + threadContext.registerThreadContextStatePropagator(mockPropagator); + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("test_transient_propagation_key", "test"); + BytesStreamOutput out = new BytesStreamOutput(); + BytesStreamOutput outFromSystemContext = new BytesStreamOutput(); + threadContext.writeTo(out); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); + threadContext.markAsSystemContext(); + threadContext.writeTo(outFromSystemContext); + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("test_transient_propagation_key")); + threadContext.readHeaders(outFromSystemContext.bytes().streamInput()); + assertNull(threadContext.getHeader("test_transient_propagation_key")); + } + assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); + threadContext.readHeaders(out.bytes().streamInput()); + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("test", threadContext.getHeader("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + public void testPutHeaders() { Settings build = Settings.builder().put("request.headers.default", "1").build(); ThreadContext threadContext = new ThreadContext(build); diff --git a/server/src/test/java/org/opensearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/opensearch/common/xcontent/BaseXContentTestCase.java index 274b6477e3fbd..930c3415168a7 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/opensearch/common/xcontent/BaseXContentTestCase.java @@ -39,16 +39,17 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.unit.DistanceUnit; import org.opensearch.core.ParseField; -import org.opensearch.common.Strings; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.geo.GeoPoint; -import org.opensearch.common.io.PathUtils; import org.opensearch.core.common.text.Text; -import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -62,12 +63,6 @@ import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.Instant; -import org.joda.time.ReadableInstant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -88,14 +83,15 @@ import java.time.Year; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; import java.util.Date; +import java.util.GregorianCalendar; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -126,11 +122,11 @@ public void testContentType() throws IOException { public void testStartEndObject() throws IOException { expectUnclosedException(() -> BytesReference.bytes(builder().startObject())); expectUnclosedException(() -> builder().startObject().close()); - expectUnclosedException(() -> Strings.toString(builder().startObject())); + expectUnclosedException(() -> builder().startObject().toString()); expectObjectException(() -> BytesReference.bytes(builder().endObject())); expectObjectException(() -> builder().endObject().close()); - expectObjectException(() -> Strings.toString(builder().endObject())); + expectObjectException(() -> builder().endObject().toString()); expectValueException(() -> builder().startObject("foo").endObject()); expectNonNullFieldException(() -> builder().startObject().startObject(null)); @@ -147,11 +143,11 @@ public void testStartEndObject() throws IOException { public void testStartEndArray() throws IOException { expectUnclosedException(() -> BytesReference.bytes(builder().startArray())); expectUnclosedException(() -> builder().startArray().close()); - expectUnclosedException(() -> Strings.toString(builder().startArray())); + expectUnclosedException(() -> builder().startArray().toString()); expectArrayException(() -> BytesReference.bytes(builder().endArray())); expectArrayException(() -> builder().endArray().close()); - expectArrayException(() -> Strings.toString(builder().endArray())); + expectArrayException(() -> builder().endArray().toString()); expectValueException(() -> builder().startArray("foo").endObject()); expectFieldException(() -> builder().startObject().startArray().endArray().endObject()); @@ -420,78 +416,21 @@ public void testText() throws Exception { } } - public void testReadableInstant() throws Exception { - assertResult("{'instant':null}", () -> builder().startObject().timeField("instant", (ReadableInstant) null).endObject()); - assertResult("{'instant':null}", () -> builder().startObject().field("instant").timeValue((ReadableInstant) null).endObject()); - - final DateTime t1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC); - - String expected = "{'t1':'2016-01-01T00:00:00.000Z'}"; - assertResult(expected, () -> builder().startObject().timeField("t1", t1).endObject()); - assertResult(expected, () -> builder().startObject().field("t1").timeValue(t1).endObject()); - - final DateTime t2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC); - - expected = "{'t2':'2016-12-25T07:59:42.213Z'}"; - assertResult(expected, () -> builder().startObject().timeField("t2", t2).endObject()); - assertResult(expected, () -> builder().startObject().field("t2").timeValue(t2).endObject()); - - final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis()); - final DateTime t3 = DateTime.now(); - - expected = "{'t3':'" + formatter.print(t3) + "'}"; - assertResult(expected, () -> builder().startObject().timeField("t3", formatter.print(t3)).endObject()); - assertResult(expected, () -> builder().startObject().field("t3").value(formatter.print(t3)).endObject()); - - final DateTime t4 = new DateTime(randomDateTimeZone()); - - expected = "{'t4':'" + formatter.print(t4) + "'}"; - assertResult(expected, () -> builder().startObject().timeField("t4", formatter.print(t4)).endObject()); - assertResult(expected, () -> builder().startObject().field("t4").value(formatter.print(t4)).endObject()); - - long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - final DateTime t5 = new DateTime(date, randomDateTimeZone()); - - expected = "{'t5':'" + XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(t5) + "'}"; - assertResult(expected, () -> builder().startObject().timeField("t5", t5).endObject()); - assertResult(expected, () -> builder().startObject().field("t5").timeValue(t5).endObject()); - - expected = "{'t5':'" + formatter.print(t5) + "'}"; - assertResult(expected, () -> builder().startObject().timeField("t5", formatter.print(t5)).endObject()); - assertResult(expected, () -> builder().startObject().field("t5").value(formatter.print(t5)).endObject()); - - Instant i1 = new Instant(1451606400000L); // 2016-01-01T00:00:00.000Z - expected = "{'i1':'2016-01-01T00:00:00.000Z'}"; - assertResult(expected, () -> builder().startObject().timeField("i1", i1).endObject()); - assertResult(expected, () -> builder().startObject().field("i1").timeValue(i1).endObject()); - - Instant i2 = new Instant(1482652782213L); // 2016-12-25T07:59:42.213Z - expected = "{'i2':'" + formatter.print(i2) + "'}"; - assertResult(expected, () -> builder().startObject().timeField("i2", formatter.print(i2)).endObject()); - assertResult(expected, () -> builder().startObject().field("i2").value(formatter.print(i2)).endObject()); - } - public void testDate() throws Exception { assertResult("{'date':null}", () -> builder().startObject().timeField("date", (Date) null).endObject()); assertResult("{'date':null}", () -> builder().startObject().field("date").timeValue((Date) null).endObject()); - final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); + final Date d1 = Date.from(ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()); assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().timeField("d1", d1).endObject()); assertResult("{'d1':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("d1").timeValue(d1).endObject()); - final Date d2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC).toDate(); + final Date d2 = Date.from(ZonedDateTime.of(2016, 12, 25, 7, 59, 42, 213000000, ZoneOffset.UTC).toInstant()); assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().timeField("d2", d2).endObject()); assertResult("{'d2':'2016-12-25T07:59:42.213Z'}", () -> builder().startObject().field("d2").timeValue(d2).endObject()); - - final DateTimeFormatter formatter = randomFrom(ISODateTimeFormat.basicDate(), ISODateTimeFormat.dateTimeNoMillis()); - final Date d3 = DateTime.now().toDate(); - - String expected = "{'d3':'" + formatter.print(d3.getTime()) + "'}"; - assertResult(expected, () -> builder().startObject().field("d3").value(formatter.print(d3.getTime())).endObject()); } public void testDateField() throws Exception { - final Date d = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); + final Date d = Date.from(ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()); assertResult( "{'date_in_millis':1451606400000}", @@ -504,7 +443,7 @@ public void testDateField() throws Exception { } public void testCalendar() throws Exception { - Calendar calendar = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toCalendar(Locale.ROOT); + Calendar calendar = GregorianCalendar.from(ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)); assertResult( "{'calendar':'2016-01-01T00:00:00.000Z'}", () -> builder().startObject().field("calendar").timeValue(calendar).endObject() @@ -672,20 +611,13 @@ public void testObjects() throws Exception { final String paths = Constants.WINDOWS ? "{'objects':['a\\\\b\\\\c','d\\\\e']}" : "{'objects':['a/b/c','d/e']}"; objects.put(paths, new Object[] { PathUtils.get("a", "b", "c"), PathUtils.get("d", "e") }); - final DateTimeFormatter formatter = XContentOpenSearchExtension.DEFAULT_DATE_PRINTER; - final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); - final Date d2 = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); - objects.put( - "{'objects':['" + formatter.print(d1.getTime()) + "','" + formatter.print(d2.getTime()) + "']}", - new Object[] { d1, d2 } - ); - - final DateTime dt1 = DateTime.now(); - final DateTime dt2 = new DateTime(2016, 12, 25, 7, 59, 42, 213, DateTimeZone.UTC); - objects.put("{'objects':['" + formatter.print(dt1) + "','2016-12-25T07:59:42.213Z']}", new Object[] { dt1, dt2 }); + final DateTimeFormatter formatter = DateTimeFormatter.ISO_INSTANT; + final Date d1 = Date.from(ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()); + final Date d2 = Date.from(ZonedDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()); + objects.put("{'objects':['2016-01-01T00:00:00.000Z','2015-01-01T00:00:00.000Z']}", new Object[] { d1, d2 }); - final Calendar c1 = new DateTime(2012, 7, 7, 10, 23, DateTimeZone.UTC).toCalendar(Locale.ROOT); - final Calendar c2 = new DateTime(2014, 11, 16, 19, 36, DateTimeZone.UTC).toCalendar(Locale.ROOT); + final Calendar c1 = GregorianCalendar.from(ZonedDateTime.of(2012, 7, 7, 10, 23, 0, 0, ZoneOffset.UTC)); + final Calendar c2 = GregorianCalendar.from(ZonedDateTime.of(2014, 11, 16, 19, 36, 0, 0, ZoneOffset.UTC)); objects.put("{'objects':['2012-07-07T10:23:00.000Z','2014-11-16T19:36:00.000Z']}", new Object[] { c1, c2 }); final ToXContent x1 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", 2).array("f3", 3, 4, 5).endObject(); @@ -726,14 +658,10 @@ public void testObject() throws Exception { final String path = Constants.WINDOWS ? "{'object':'a\\\\b\\\\c'}" : "{'object':'a/b/c'}"; object.put(path, PathUtils.get("a", "b", "c")); - final DateTimeFormatter formatter = XContentOpenSearchExtension.DEFAULT_DATE_PRINTER; - final Date d1 = new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC).toDate(); - object.put("{'object':'" + formatter.print(d1.getTime()) + "'}", d1); - - final DateTime d2 = DateTime.now(); - object.put("{'object':'" + formatter.print(d2) + "'}", d2); + final Date d1 = Date.from(ZonedDateTime.of(2016, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()); + object.put("{'object':'" + "2016-01-01T00:00:00.000Z" + "'}", d1); - final Calendar c1 = new DateTime(2010, 1, 1, 0, 0, DateTimeZone.UTC).toCalendar(Locale.ROOT); + final Calendar c1 = GregorianCalendar.from(ZonedDateTime.of(2010, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)); object.put("{'object':'2010-01-01T00:00:00.000Z'}", c1); final ToXContent x1 = (builder, params) -> builder.startObject().field("f1", "v1").field("f2", 2).array("f3", 3, 4, 5).endObject(); @@ -865,7 +793,7 @@ public void testBasics() throws IOException { generator.writeEndObject(); } byte[] data = os.toByteArray(); - assertEquals(xcontentType(), XContentFactory.xContentType(data)); + assertEquals(xcontentType(), MediaTypeRegistry.xContent(data)); } public void testMissingEndObject() throws IOException { diff --git a/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java b/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java new file mode 100644 index 0000000000000..0feb7bcd1ceec --- /dev/null +++ b/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent; + +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class JsonToStringXContentParserTests extends OpenSearchTestCase { + + private String flattenJsonString(String fieldName, String in) throws IOException { + String transformed; + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + in + ) + ) { + JsonToStringXContentParser jsonToStringXContentParser = new JsonToStringXContentParser( + xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + parser, + fieldName + ); + // Skip the START_OBJECT token: + jsonToStringXContentParser.nextToken(); + + XContentParser transformedParser = jsonToStringXContentParser.parseObject(); + try (XContentBuilder jsonBuilder = XContentFactory.jsonBuilder()) { + jsonBuilder.copyCurrentStructure(transformedParser); + return jsonBuilder.toString(); + } + } + } + + public void testNestedObjects() throws IOException { + String jsonExample = "{" + "\"first\" : \"1\"," + "\"second\" : {" + " \"inner\": \"2.0\"" + "}," + "\"third\": \"three\"" + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testChildHasDots() throws IOException { + // This should be exactly the same as testNestedObjects. We're just using the "flat" notation for the inner + // object. + String jsonExample = "{" + "\"first\" : \"1\"," + "\"second.inner\" : \"2.0\"," + "\"third\": \"three\"" + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testNestChildObjectWithDots() throws IOException { + String jsonExample = "{" + + "\"first\" : \"1\"," + + "\"second.inner\" : {" + + " \"really_inner\" : \"2.0\"" + + "}," + + "\"third\": \"three\"" + + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"really_inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner.really_inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testNestChildObjectWithDotsAndFieldWithDots() throws IOException { + String jsonExample = "{" + + "\"first\" : \"1\"," + + "\"second.inner\" : {" + + " \"totally.absolutely.inner\" : \"2.0\"" + + "}," + + "\"third\": \"three\"" + + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"totally\",\"absolutely\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner.totally.absolutely.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + +} diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentFactoryTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentFactoryTests.java index 866def49af991..fdc115281e7aa 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/XContentFactoryTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/XContentFactoryTests.java @@ -34,9 +34,11 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.opensearch.common.Strings; + import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; @@ -48,7 +50,7 @@ public class XContentFactoryTests extends OpenSearchTestCase { public void testGuessJson() throws IOException { - testGuessType(XContentType.JSON); + testGuessType(MediaTypeRegistry.JSON); } public void testGuessSmile() throws IOException { @@ -63,16 +65,16 @@ public void testGuessCbor() throws IOException { testGuessType(XContentType.CBOR); } - private void testGuessType(XContentType type) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(type); + private void testGuessType(MediaType type) throws IOException { + XContentBuilder builder = MediaTypeRegistry.contentBuilder(type); builder.startObject(); builder.field("field1", "value1"); builder.endObject(); final BytesReference bytes; - if (type == XContentType.JSON && randomBoolean()) { - final int length = randomIntBetween(0, 8 * XContentFactory.GUESS_HEADER_LENGTH); - final String content = Strings.toString(builder); + if (type == MediaTypeRegistry.JSON && randomBoolean()) { + final int length = randomIntBetween(0, 8 * MediaTypeRegistry.GUESS_HEADER_LENGTH); + final String content = builder.toString(); final StringBuilder sb = new StringBuilder(length + content.length()); final char[] chars = new char[length]; Arrays.fill(chars, ' '); @@ -82,24 +84,24 @@ private void testGuessType(XContentType type) throws IOException { bytes = BytesReference.bytes(builder); } - assertThat(XContentHelper.xContentType(bytes), equalTo(type)); - assertThat(XContentFactory.xContentType(bytes.streamInput()), equalTo(type)); + assertThat(MediaTypeRegistry.xContentType(bytes), equalTo(type)); + assertThat(MediaTypeRegistry.xContentType(bytes.streamInput()), equalTo(type)); // CBOR is binary, cannot use String if (type != XContentType.CBOR && type != XContentType.SMILE) { - assertThat(XContentFactory.xContentType(Strings.toString(builder)), equalTo(type)); + assertThat(MediaTypeRegistry.xContentType(builder.toString()), equalTo(type)); } } public void testCBORBasedOnMajorObjectDetection() { // for this {"f "=> 5} perl encoder for example generates: byte[] bytes = new byte[] { (byte) 0xA1, (byte) 0x43, (byte) 0x66, (byte) 6f, (byte) 6f, (byte) 0x5 }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(XContentType.CBOR)); // assertThat(((Number) XContentHelper.convertToMap(bytes, true).v2().get("foo")).intValue(), equalTo(5)); // this if for {"foo" : 5} in python CBOR bytes = new byte[] { (byte) 0xA1, (byte) 0x63, (byte) 0x66, (byte) 0x6f, (byte) 0x6f, (byte) 0x5 }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(XContentType.CBOR)); assertThat(((Number) XContentHelper.convertToMap(new BytesArray(bytes), true).v2().get("foo")).intValue(), equalTo(5)); // also make sure major type check doesn't collide with SMILE and JSON, just in case @@ -111,36 +113,36 @@ public void testCBORBasedOnMajorObjectDetection() { public void testCBORBasedOnMagicHeaderDetection() { byte[] bytes = new byte[] { (byte) 0xd9, (byte) 0xd9, (byte) 0xf7 }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(XContentType.CBOR)); } public void testEmptyStream() throws Exception { ByteArrayInputStream is = new ByteArrayInputStream(new byte[0]); - assertNull(XContentFactory.xContentType(is)); + assertNull(MediaTypeRegistry.xContentType(is)); is = new ByteArrayInputStream(new byte[] { (byte) 1 }); - assertNull(XContentFactory.xContentType(is)); + assertNull(MediaTypeRegistry.xContentType(is)); } public void testInvalidStream() throws Exception { byte[] bytes = new byte[] { (byte) '"' }; - assertNull(XContentFactory.xContentType(bytes)); + assertNull(MediaTypeRegistry.mediaTypeFromBytes(bytes, 0, bytes.length)); bytes = new byte[] { (byte) 'x' }; - assertNull(XContentFactory.xContentType(bytes)); + assertNull(MediaTypeRegistry.mediaTypeFromBytes(bytes, 0, bytes.length)); } public void testJsonFromBytesOptionallyPrecededByUtf8Bom() throws Exception { byte[] bytes = new byte[] { (byte) '{', (byte) '}' }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(MediaTypeRegistry.JSON)); bytes = new byte[] { (byte) 0x20, (byte) '{', (byte) '}' }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(MediaTypeRegistry.JSON)); bytes = new byte[] { (byte) 0xef, (byte) 0xbb, (byte) 0xbf, (byte) '{', (byte) '}' }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(MediaTypeRegistry.JSON)); bytes = new byte[] { (byte) 0xef, (byte) 0xbb, (byte) 0xbf, (byte) 0x20, (byte) '{', (byte) '}' }; - assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.xContent(bytes), equalTo(MediaTypeRegistry.JSON)); } } diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentParserUtilsTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentParserUtilsTests.java index 4f0d4b7cb63dc..60b078be9d639 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/XContentParserUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/XContentParserUtilsTests.java @@ -34,19 +34,20 @@ import org.opensearch.common.CheckedBiConsumer; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.SetOnce; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.SetOnce; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.core.xcontent.XContentParserUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -55,7 +56,7 @@ import java.util.List; import java.util.Map; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; import static org.opensearch.core.xcontent.XContentParserUtils.parseTypedKeysObject; @@ -113,7 +114,7 @@ public void testStoredFieldsValueBoolean() throws IOException { public void testStoredFieldsValueBinary() throws IOException { final byte[] value = randomUnicodeOfLength(scaledRandomIntBetween(10, 1000)).getBytes("UTF-8"); assertParseFieldsSimpleValue(value, (xcontentType, result) -> { - if (xcontentType == XContentType.JSON) { + if (xcontentType == MediaTypeRegistry.JSON) { // binary values will be parsed back and returned as base64 strings when reading from json assertArrayEquals(value, Base64.getDecoder().decode((String) result)); } else { diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java index 8c53d7edebca8..53fbcb4659f72 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java @@ -32,6 +32,7 @@ package org.opensearch.common.xcontent; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; import java.util.Locale; @@ -43,7 +44,7 @@ public class XContentTypeTests extends OpenSearchTestCase { public void testFromJson() throws Exception { String mediaType = "application/json"; - XContentType expectedXContentType = XContentType.JSON; + MediaType expectedXContentType = MediaTypeRegistry.JSON; assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); @@ -51,7 +52,7 @@ public void testFromJson() throws Exception { public void testFromNdJson() throws Exception { String mediaType = "application/x-ndjson"; - XContentType expectedXContentType = XContentType.JSON; + MediaType expectedXContentType = MediaTypeRegistry.JSON; assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); @@ -59,7 +60,7 @@ public void testFromNdJson() throws Exception { public void testFromJsonUppercase() throws Exception { String mediaType = "application/json".toUpperCase(Locale.ROOT); - XContentType expectedXContentType = XContentType.JSON; + MediaType expectedXContentType = MediaTypeRegistry.JSON; assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType)); @@ -89,14 +90,14 @@ public void testFromCbor() throws Exception { public void testFromWildcard() throws Exception { String mediaType = "application/*"; - XContentType expectedXContentType = XContentType.JSON; + MediaType expectedXContentType = MediaTypeRegistry.JSON; assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } public void testFromWildcardUppercase() throws Exception { String mediaType = "APPLICATION/*"; - XContentType expectedXContentType = XContentType.JSON; + MediaType expectedXContentType = MediaTypeRegistry.JSON; assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType)); assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType)); } @@ -109,15 +110,15 @@ public void testFromRubbish() throws Exception { } public void testVersionedMediaType() throws Exception { - assertThat(MediaType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON)); + assertThat(MediaType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(MediaTypeRegistry.JSON)); assertThat(MediaType.fromMediaType("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML)); assertThat(MediaType.fromMediaType("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR)); assertThat(MediaType.fromMediaType("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE)); - assertThat(MediaType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON)); + assertThat(MediaType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(MediaTypeRegistry.JSON)); String mthv = "application/vnd.opensearch+json ;compatible-with=7;charset=utf-8"; - assertThat(MediaType.fromMediaType(mthv), equalTo(XContentType.JSON)); - assertThat(MediaType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(XContentType.JSON)); + assertThat(MediaType.fromMediaType(mthv), equalTo(MediaTypeRegistry.JSON)); + assertThat(MediaType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(MediaTypeRegistry.JSON)); } } diff --git a/server/src/test/java/org/opensearch/common/xcontent/builder/XContentBuilderTests.java b/server/src/test/java/org/opensearch/common/xcontent/builder/XContentBuilderTests.java index 6c97297ce43e7..36e61a32525c1 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/builder/XContentBuilderTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/builder/XContentBuilderTests.java @@ -33,19 +33,18 @@ package org.opensearch.common.xcontent.builder; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentOpenSearchExtension; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentGenerator; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentGenerator; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayOutputStream; @@ -68,7 +67,7 @@ public class XContentBuilderTests extends OpenSearchTestCase { public void testPrettyWithLfAtEnd() throws Exception { ByteArrayOutputStream os = new ByteArrayOutputStream(); - XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(os); + XContentGenerator generator = MediaTypeRegistry.JSON.xContent().createGenerator(os); generator.usePrettyPrint(); generator.usePrintLineFeedAtEnd(); @@ -87,7 +86,7 @@ public void testPrettyWithLfAtEnd() throws Exception { public void testReuseJsonGenerator() throws Exception { ByteArrayOutputStream os = new ByteArrayOutputStream(); - XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(os); + XContentGenerator generator = MediaTypeRegistry.JSON.xContent().createGenerator(os); generator.writeStartObject(); generator.writeStringField("test", "value"); generator.writeEndObject(); @@ -107,42 +106,39 @@ public void testReuseJsonGenerator() throws Exception { public void testRaw() throws IOException { { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); xContentBuilder.startObject(); xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput()); xContentBuilder.endObject(); - assertThat(Strings.toString(xContentBuilder), equalTo("{\"foo\":{\"test\":\"value\"}}")); + assertThat(xContentBuilder.toString(), equalTo("{\"foo\":{\"test\":\"value\"}}")); } { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); xContentBuilder.startObject(); xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput()); xContentBuilder.rawField("foo1", new BytesArray("{\"test\":\"value\"}").streamInput()); xContentBuilder.endObject(); - assertThat(Strings.toString(xContentBuilder), equalTo("{\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"}}")); + assertThat(xContentBuilder.toString(), equalTo("{\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"}}")); } { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); xContentBuilder.startObject(); xContentBuilder.field("test", "value"); xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput()); xContentBuilder.endObject(); - assertThat(Strings.toString(xContentBuilder), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"}}")); + assertThat(xContentBuilder.toString(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"}}")); } { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); xContentBuilder.startObject(); xContentBuilder.field("test", "value"); xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput()); xContentBuilder.field("test1", "value1"); xContentBuilder.endObject(); - assertThat( - Strings.toString(xContentBuilder), - equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}") - ); + assertThat(xContentBuilder.toString(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}")); } { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); xContentBuilder.startObject(); xContentBuilder.field("test", "value"); xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput()); @@ -150,32 +146,32 @@ public void testRaw() throws IOException { xContentBuilder.field("test1", "value1"); xContentBuilder.endObject(); assertThat( - Strings.toString(xContentBuilder), + xContentBuilder.toString(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}") ); } } public void testSimpleGenerator() throws Exception { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("test", "value").endObject(); - assertThat(Strings.toString(builder), equalTo("{\"test\":\"value\"}")); + assertThat(builder.toString(), equalTo("{\"test\":\"value\"}")); - builder = XContentFactory.contentBuilder(XContentType.JSON); + builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("test", "value").endObject(); - assertThat(Strings.toString(builder), equalTo("{\"test\":\"value\"}")); + assertThat(builder.toString(), equalTo("{\"test\":\"value\"}")); } public void testOverloadedList() throws Exception { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("test", Arrays.asList("1", "2")).endObject(); - assertThat(Strings.toString(builder), equalTo("{\"test\":[\"1\",\"2\"]}")); + assertThat(builder.toString(), equalTo("{\"test\":[\"1\",\"2\"]}")); } public void testWritingBinaryToStream() throws Exception { BytesStreamOutput bos = new BytesStreamOutput(); - XContentGenerator gen = XContentFactory.xContent(XContentType.JSON).createGenerator(bos); + XContentGenerator gen = MediaTypeRegistry.JSON.xContent().createGenerator(bos); gen.writeStartObject(); gen.writeStringField("name", "something"); gen.flush(); @@ -189,39 +185,39 @@ public void testWritingBinaryToStream() throws Exception { } public void testByteConversion() throws Exception { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("test_name", (Byte) (byte) 120).endObject(); assertThat(BytesReference.bytes(builder).utf8ToString(), equalTo("{\"test_name\":120}")); } public void testDateTypesConversion() throws Exception { Date date = new Date(); - String expectedDate = XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(date.getTime()); + String expectedDate = XContentOpenSearchExtension.DEFAULT_FORMATTER.format(date.toInstant()); Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT); - String expectedCalendar = XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(calendar.getTimeInMillis()); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + String expectedCalendar = XContentOpenSearchExtension.DEFAULT_FORMATTER.format(calendar.toInstant()); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().timeField("date", date).endObject(); - assertThat(Strings.toString(builder), equalTo("{\"date\":\"" + expectedDate + "\"}")); + assertThat(builder.toString(), equalTo("{\"date\":\"" + expectedDate + "\"}")); - builder = XContentFactory.contentBuilder(XContentType.JSON); + builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("calendar", calendar).endObject(); - assertThat(Strings.toString(builder), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}")); + assertThat(builder.toString(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}")); - builder = XContentFactory.contentBuilder(XContentType.JSON); + builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); Map<String, Object> map = new HashMap<>(); map.put("date", date); builder.map(map); - assertThat(Strings.toString(builder), equalTo("{\"date\":\"" + expectedDate + "\"}")); + assertThat(builder.toString(), equalTo("{\"date\":\"" + expectedDate + "\"}")); - builder = XContentFactory.contentBuilder(XContentType.JSON); + builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); map = new HashMap<>(); map.put("calendar", calendar); builder.map(map); - assertThat(Strings.toString(builder), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}")); + assertThat(builder.toString(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}")); } public void testCopyCurrentStructure() throws Exception { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject().field("test", "test field").startObject("filter").startObject("terms"); // up to 20k random terms @@ -248,7 +244,7 @@ public void testCopyCurrentStructure() throws Exception { } } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { - filterBuilder = XContentFactory.contentBuilder(parser.contentType()); + filterBuilder = MediaTypeRegistry.contentBuilder(parser.contentType()); filterBuilder.copyCurrentStructure(parser); } } @@ -288,61 +284,61 @@ public void testHandlingOfPath_absolute() throws IOException { } private void checkPathSerialization(Path path) throws IOException { - XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder pathBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); pathBuilder.startObject().field("file", path).endObject(); - XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder stringBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); stringBuilder.startObject().field("file", path.toString()).endObject(); - assertThat(Strings.toString(pathBuilder), equalTo(Strings.toString(stringBuilder))); + assertThat(pathBuilder.toString(), equalTo(stringBuilder.toString())); } public void testHandlingOfPath_StringName() throws IOException { Path path = PathUtils.get("path"); String name = new String("file"); - XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder pathBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); pathBuilder.startObject().field(name, path).endObject(); - XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder stringBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); stringBuilder.startObject().field(name, path.toString()).endObject(); - assertThat(Strings.toString(pathBuilder), equalTo(Strings.toString(stringBuilder))); + assertThat(pathBuilder.toString(), equalTo(stringBuilder.toString())); } public void testHandlingOfCollectionOfPaths() throws IOException { Path path = PathUtils.get("path"); - XContentBuilder pathBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder pathBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); pathBuilder.startObject().field("file", Arrays.asList(path)).endObject(); - XContentBuilder stringBuilder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder stringBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); stringBuilder.startObject().field("file", Arrays.asList(path.toString())).endObject(); - assertThat(Strings.toString(pathBuilder), equalTo(Strings.toString(stringBuilder))); + assertThat(pathBuilder.toString(), equalTo(stringBuilder.toString())); } public void testIndentIsPlatformIndependent() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON).prettyPrint(); builder.startObject().field("test", "foo").startObject("foo").field("foobar", "boom").endObject().endObject(); - String string = Strings.toString(builder); + String string = builder.toString(); assertEquals("{\n" + " \"test\" : \"foo\",\n" + " \"foo\" : {\n" + " \"foobar\" : \"boom\"\n" + " }\n" + "}", string); - builder = XContentFactory.contentBuilder(XContentType.YAML).prettyPrint(); + builder = MediaTypeRegistry.contentBuilder(XContentType.YAML).prettyPrint(); builder.startObject().field("test", "foo").startObject("foo").field("foobar", "boom").endObject().endObject(); - string = Strings.toString(builder); + string = builder.toString(); assertEquals("---\n" + "test: \"foo\"\n" + "foo:\n" + " foobar: \"boom\"\n", string); } public void testRenderGeoPoint() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON).prettyPrint(); builder.startObject().field("foo").value(new GeoPoint(1, 2)).endObject(); - String string = Strings.toString(builder); + String string = builder.toString(); assertEquals("{\n" + " \"foo\" : {\n" + " \"lat\" : 1.0,\n" + " \"lon\" : 2.0\n" + " }\n" + "}", string.trim()); } public void testWriteMapWithNullKeys() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); try { builder.map(Collections.singletonMap(null, "test")); fail("write map should have failed"); @@ -352,7 +348,7 @@ public void testWriteMapWithNullKeys() throws IOException { } public void testWriteMapValueWithNullKeys() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); try { builder.map(Collections.singletonMap(null, "test")); fail("write map should have failed"); @@ -362,7 +358,7 @@ public void testWriteMapValueWithNullKeys() throws IOException { } public void testWriteFieldMapWithNullKeys() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); try { builder.startObject(); builder.field("map", Collections.singletonMap(null, "test")); @@ -374,7 +370,7 @@ public void testWriteFieldMapWithNullKeys() throws IOException { public void testMissingEndObject() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> { - try (XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()))) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values()))) { builder.startObject(); builder.field("foo", true); } @@ -385,7 +381,7 @@ public void testMissingEndObject() throws IOException { public void testMissingEndArray() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> { - try (XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()))) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values()))) { builder.startObject(); builder.startArray("foo"); builder.value(0); diff --git a/server/src/test/java/org/opensearch/common/xcontent/cbor/CborXContentParserTests.java b/server/src/test/java/org/opensearch/common/xcontent/cbor/CborXContentParserTests.java index 5b53862f98772..d471b8674be24 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/cbor/CborXContentParserTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/cbor/CborXContentParserTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent.cbor; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/xcontent/cbor/JsonVsCborTests.java b/server/src/test/java/org/opensearch/common/xcontent/cbor/JsonVsCborTests.java index e304798a0ff0c..f4ca3c5117196 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/cbor/JsonVsCborTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/cbor/JsonVsCborTests.java @@ -33,11 +33,11 @@ package org.opensearch.common.xcontent.cbor; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentGenerator; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentGenerator; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -48,10 +48,10 @@ public class JsonVsCborTests extends OpenSearchTestCase { public void testCompareParsingTokens() throws IOException { BytesStreamOutput xsonOs = new BytesStreamOutput(); - XContentGenerator xsonGen = XContentFactory.xContent(XContentType.CBOR).createGenerator(xsonOs); + XContentGenerator xsonGen = XContentType.CBOR.xContent().createGenerator(xsonOs); BytesStreamOutput jsonOs = new BytesStreamOutput(); - XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs); + XContentGenerator jsonGen = MediaTypeRegistry.JSON.xContent().createGenerator(jsonOs); xsonGen.writeStartObject(); jsonGen.writeStartObject(); diff --git a/server/src/test/java/org/opensearch/common/xcontent/smile/JsonVsSmileTests.java b/server/src/test/java/org/opensearch/common/xcontent/smile/JsonVsSmileTests.java index 4f26bdc7e9c26..5b5871e9b5084 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/smile/JsonVsSmileTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/smile/JsonVsSmileTests.java @@ -33,11 +33,11 @@ package org.opensearch.common.xcontent.smile; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentGenerator; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentGenerator; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -48,10 +48,10 @@ public class JsonVsSmileTests extends OpenSearchTestCase { public void testCompareParsingTokens() throws IOException { BytesStreamOutput xsonOs = new BytesStreamOutput(); - XContentGenerator xsonGen = XContentFactory.xContent(XContentType.SMILE).createGenerator(xsonOs); + XContentGenerator xsonGen = XContentType.SMILE.xContent().createGenerator(xsonOs); BytesStreamOutput jsonOs = new BytesStreamOutput(); - XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs); + XContentGenerator jsonGen = MediaTypeRegistry.JSON.xContent().createGenerator(jsonOs); xsonGen.writeStartObject(); jsonGen.writeStartObject(); diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/AbstractFilteringTestCase.java b/server/src/test/java/org/opensearch/common/xcontent/support/AbstractFilteringTestCase.java index 8a8e40fc6167a..e3eeeaaf62f18 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/AbstractFilteringTestCase.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/AbstractFilteringTestCase.java @@ -33,8 +33,8 @@ package org.opensearch.common.xcontent.support; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/XContentHelperTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/XContentHelperTests.java index 652e11c2fd99f..297b9f4b39751 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/XContentHelperTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/XContentHelperTests.java @@ -32,16 +32,17 @@ package org.opensearch.common.xcontent.support; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; @@ -211,7 +212,7 @@ public void testChildBytes() throws IOException { } public void testEmbeddedObject() throws IOException { - // Need to test this separately as XContentType.JSON never produces VALUE_EMBEDDED_OBJECT + // Need to test this separately as MediaTypeRegistry.JSON never produces VALUE_EMBEDDED_OBJECT XContentBuilder builder = XContentBuilder.builder(XContentType.CBOR.xContent()); builder.startObject().startObject("root"); CompressedXContent embedded = new CompressedXContent("{\"field\":\"value\"}"); @@ -252,7 +253,7 @@ public void testEmptyChildBytes() throws IOException { String inputJson = "{ \"mappings\" : {} }"; try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputJson) ) { diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java index 3077f30c5f57c..be194c070135a 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java @@ -32,12 +32,12 @@ package org.opensearch.common.xcontent.support; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; @@ -55,7 +55,7 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singleton; import static org.opensearch.common.xcontent.XContentHelper.convertToMap; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; @@ -95,7 +95,7 @@ public void testExtractValue() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("test", "value").endObject(); Map<String, Object> map; - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractValue("test", map).toString(), equalTo("value")); @@ -111,7 +111,7 @@ public void testExtractValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractValue("path1.path2.test", map).toString(), equalTo("value")); @@ -136,7 +136,7 @@ public void testExtractValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } @@ -160,7 +160,7 @@ public void testExtractValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } @@ -174,7 +174,7 @@ public void testExtractValue() throws Exception { // fields with . in them builder = XContentFactory.jsonBuilder().startObject().field("xxx.yyy", "value").endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractValue("xxx.yyy", map).toString(), equalTo("value")); @@ -188,7 +188,7 @@ public void testExtractValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractValue("path1.xxx.path2.yyy.test", map).toString(), equalTo("value")); @@ -216,7 +216,7 @@ public void testExtractValueWithNullValue() throws Exception { .endObject(); Map<String, Object> map; - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertEquals("value", XContentMapValues.extractValue("field", map, "NULL")); @@ -234,14 +234,14 @@ public void testExtractRawValue() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("test", "value").endObject(); Map<String, Object> map; - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractRawValues("test", map).get(0).toString(), equalTo("value")); builder = XContentFactory.jsonBuilder().startObject().field("test.me", "value").endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractRawValues("test.me", map).get(0).toString(), equalTo("value")); @@ -255,7 +255,7 @@ public void testExtractRawValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractRawValues("path1.path2.test", map).get(0).toString(), equalTo("value")); @@ -269,7 +269,7 @@ public void testExtractRawValue() throws Exception { .endObject() .endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.toString())) { map = parser.map(); } assertThat(XContentMapValues.extractRawValues("path1.xxx.path2.yyy.test", map).get(0).toString(), equalTo("value")); diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java index c318dd89f573c..4ff1140d5fe4d 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java @@ -32,17 +32,15 @@ package org.opensearch.common.xcontent.support.filtering; -import org.junit.Assert; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.support.AbstractFilteringTestCase; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.support.AbstractFilteringTestCase; +import org.junit.Assert; import java.io.IOException; import java.util.Set; @@ -85,11 +83,11 @@ public void testSingleFieldObject() throws IOException { } static void assertXContentBuilderAsString(final XContentBuilder expected, final XContentBuilder actual) { - Assert.assertThat(Strings.toString(actual), is(Strings.toString(expected))); + Assert.assertThat(actual.toString(), is(expected.toString())); } static void assertXContentBuilderAsBytes(final XContentBuilder expected, final XContentBuilder actual) { - XContent xContent = XContentFactory.xContent(actual.contentType()); + XContent xContent = actual.contentType().xContent(); try ( XContentParser jsonParser = xContent.createParser( NamedXContentRegistry.EMPTY, diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/CborXContentFilteringTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/CborXContentFilteringTests.java index 844d32b1003c3..a728f3c1be766 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/CborXContentFilteringTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/CborXContentFilteringTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent.support.filtering; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; public class CborXContentFilteringTests extends AbstractXContentFilteringTestCase { diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java index 88cec303b4754..3dc209877214a 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java @@ -35,6 +35,7 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate; + import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.xcontent.filtering.FilterPathBasedFilter; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java index 93affce9cf256..62adad8e593a9 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent.support.filtering; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; public class JsonXContentFilteringTests extends AbstractXContentFilteringTestCase { diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java index 1acc35f21b14a..8340fae311464 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent.support.filtering; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; public class SmileFilteringGeneratorTests extends AbstractXContentFilteringTestCase { diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java index 29262b51990c3..c33b285067e08 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java @@ -32,8 +32,8 @@ package org.opensearch.common.xcontent.support.filtering; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; public class YamlFilteringGeneratorTests extends AbstractXContentFilteringTestCase { diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java new file mode 100644 index 0000000000000..f8dba99aa8b60 --- /dev/null +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.PriorityQueue; + +public class RestStatusTests extends OpenSearchTestCase { + + public void testStatusReturns200ForNoFailures() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = randomIntBetween(1, totalShards); + + assertEquals(RestStatus.OK, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturns503ForUnavailableShards() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + assertEquals(RestStatus.SERVICE_UNAVAILABLE, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturnsFailureStatusWhenFailuresExist() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + TestException[] failures = new TestException[totalShards]; + PriorityQueue<TestException> heapOfFailures = new PriorityQueue<>((x, y) -> y.status().compareTo(x.status())); + + for (int i = 0; i < totalShards; ++i) { + /* + * Status here doesn't need to convey failure and is not as per rest + * contract. We're not testing the contract, but if status() returns + * the greatest rest code from the failures selection + */ + RestStatus status = randomFrom(RestStatus.values()); + TestException failure = new TestException(status); + + failures[i] = failure; + heapOfFailures.add(failure); + } + + assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + } + + public void testSerialization() throws IOException { + final RestStatus status = randomFrom(RestStatus.values()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + RestStatus.writeTo(out, status); + + try (StreamInput in = out.bytes().streamInput()) { + RestStatus deserializedStatus = RestStatus.readFrom(in); + + assertEquals(status, deserializedStatus); + } + } + } + + private static class TestException extends ShardOperationFailedException { + TestException(final RestStatus status) { + super("super-idx", randomInt(), "gone-fishing", status, new Throwable("cake")); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("not implemented"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new IOException("not implemented"); + } + } + +} diff --git a/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java new file mode 100644 index 0000000000000..93a7b3d3eb4b9 --- /dev/null +++ b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; + +public class CryptoHandlerRegistryTests extends OpenSearchTestCase { + + private TestCryptoHandlerRegistry cryptoManagerRegistry; + private String pluginTypeWithCreationFailure; + private CryptoKeyProviderPlugin cryptoPlugin1; + private CryptoKeyProviderPlugin cryptoPlugin2; + + @Before + public void setup() { + List<CryptoKeyProviderPlugin> cryptoKPPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType1 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType1); + MasterKeyProvider masterKeyProvider1 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin1.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider1); + this.cryptoPlugin1 = cryptoPlugin1; + cryptoKPPlugins.add(cryptoPlugin1); + + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType2 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType2); + MasterKeyProvider masterKeyProvider2 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin2.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider2); + cryptoKPPlugins.add(cryptoPlugin2); + this.cryptoPlugin2 = cryptoPlugin2; + + CryptoKeyProviderPlugin cryptoPluginCreationFailure = Mockito.mock(CryptoKeyProviderPlugin.class); + pluginTypeWithCreationFailure = UUID.randomUUID().toString(); + Mockito.when(cryptoPluginCreationFailure.type()).thenReturn(pluginTypeWithCreationFailure); + Mockito.when(cryptoPluginCreationFailure.createKeyProvider(ArgumentMatchers.any())) + .thenThrow(new RuntimeException("Injected failure")); + cryptoKPPlugins.add(cryptoPluginCreationFailure); + + cryptoManagerRegistry = new TestCryptoHandlerRegistry(new TestCryptoPlugin(), cryptoKPPlugins, Settings.EMPTY); + } + + static class TestCryptoPlugin implements CryptoPlugin { + + @Override + public CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return Mockito.mock(CryptoHandler.class); + } + } + + static class TestCryptoHandlerRegistry extends CryptoHandlerRegistry { + + protected TestCryptoHandlerRegistry(CryptoPlugin cryptoPlugin, List<CryptoKeyProviderPlugin> cryptoPlugins, Settings settings) { + super(List.of(cryptoPlugin), cryptoPlugins, settings); + } + + @Override + public Map<String, CryptoKeyProviderPlugin> loadCryptoFactories(List<CryptoKeyProviderPlugin> cryptoKPPlugins) { + return super.loadCryptoFactories(cryptoKPPlugins); + } + } + + public void testInitRegistryWithDuplicateKPType() { + List<CryptoKeyProviderPlugin> cryptoPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType); + cryptoPlugins.add(cryptoPlugin1); + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType); + cryptoPlugins.add(cryptoPlugin2); + expectThrows(IllegalArgumentException.class, () -> cryptoManagerRegistry.loadCryptoFactories(cryptoPlugins)); + } + + public void testRegistry() { + List<CryptoKeyProviderPlugin> cryptoPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType1 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType1); + MasterKeyProvider masterKeyProvider1 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin1.createKeyProvider(Mockito.any())).thenReturn(masterKeyProvider1); + cryptoPlugins.add(cryptoPlugin1); + + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType2 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType2); + MasterKeyProvider masterKeyProvider2 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin2.createKeyProvider(Mockito.any())).thenReturn(masterKeyProvider2); + cryptoPlugins.add(cryptoPlugin2); + + Map<String, CryptoKeyProviderPlugin> loadedPlugins = cryptoManagerRegistry.loadCryptoFactories(cryptoPlugins); + + CryptoKeyProviderPlugin keyProviderPlugin = loadedPlugins.get(pluginType1); + assertNotNull(keyProviderPlugin); + assertEquals(cryptoPlugin1, keyProviderPlugin); + + keyProviderPlugin = loadedPlugins.get(pluginType2); + assertNotNull(keyProviderPlugin); + assertEquals(cryptoPlugin2, keyProviderPlugin); + } + + public void testCryptoManagerMissing() { + String pluginName = UUID.randomUUID().toString(); + String pluginType = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginType, Settings.EMPTY); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); + } + + public void testCryptoManagerCreationFailure() { + String pluginName = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginTypeWithCreationFailure, Settings.EMPTY); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); + } + + public void testCryptoManagerCreationSuccess() { + + String pluginName1 = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY); + CryptoHandler cryptoHandler = cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata); + assertNotNull(cryptoHandler); + + String pluginName2 = UUID.randomUUID().toString(); + CryptoHandler cryptoHandler2 = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName2, cryptoPlugin2.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandler2); + CryptoHandler cryptoHandler3 = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandler3); + assertEquals(cryptoHandler, cryptoHandler3); + assertNotEquals(cryptoHandler2, cryptoHandler); + + CryptoHandler cryptoHandlerNewType = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName1, cryptoPlugin2.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandlerNewType); + assertNotEquals(cryptoHandler, cryptoHandlerNewType); + assertNotEquals(cryptoHandler2, cryptoHandlerNewType); + assertNotEquals(cryptoHandler3, cryptoHandlerNewType); + } +} diff --git a/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java b/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java new file mode 100644 index 0000000000000..91f86a2b4104a --- /dev/null +++ b/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +public class CryptoRegistryExceptionTests extends OpenSearchTestCase { + + public void testConstructorWithClientNameAndType() { + String clientName = "test-client"; + String clientType = "test-type"; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + } + + public void testConstructorWithClientNameTypeAndCause() { + String clientName = "test-client"; + String clientType = "test-type"; + String causeMessage = "Something went wrong."; + Throwable cause = new Throwable(causeMessage); + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, cause); + + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + assertEquals(cause, exception.getCause()); + } + + public void testConstructorWithClientNameTypeAndIllegalArgsCause() { + String clientName = "test-client"; + String clientType = "test-type"; + String causeMessage = "Bad arguments."; + IllegalArgumentException cause = new IllegalArgumentException(causeMessage); + ; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, cause); + + assertEquals(RestStatus.BAD_REQUEST, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + assertEquals(cause, exception.getCause()); + } + + public void testConstructorWithClientNameTypeAndCustomMessage() { + String clientName = "TestClient"; + String clientType = "TestType"; + String customMessage = "Invalid client data."; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, customMessage); + + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + } + + public void testSerializationAndDeserialization() throws IOException { + String clientName = "TestClient"; + String clientType = "TestType"; + CryptoRegistryException originalException = new CryptoRegistryException(clientName, clientType); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + StreamOutput streamOutput = new OutputStreamStreamOutput(outputStream); + originalException.writeTo(streamOutput); + + byte[] byteArray = outputStream.toByteArray(); + ByteArrayInputStream inputStream = new ByteArrayInputStream(byteArray); + StreamInput streamInput = new InputStreamStreamInput(inputStream); + CryptoRegistryException deserializedException = new CryptoRegistryException(streamInput); + + assertEquals(originalException.getMessage(), deserializedException.getMessage()); + assertEquals(originalException.status(), deserializedException.status()); + assertEquals(originalException.getName(), deserializedException.getName()); + assertEquals(originalException.getType(), deserializedException.getType()); + } +} diff --git a/server/src/test/java/org/opensearch/deps/jackson/JacksonLocationTests.java b/server/src/test/java/org/opensearch/deps/jackson/JacksonLocationTests.java index 8da37f1e10a3a..6a57c3bf3b7ba 100644 --- a/server/src/test/java/org/opensearch/deps/jackson/JacksonLocationTests.java +++ b/server/src/test/java/org/opensearch/deps/jackson/JacksonLocationTests.java @@ -36,6 +36,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java index 0f831ff28af79..880eaeca99bb0 100644 --- a/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/opensearch/deps/lucene/VectorHighlighterTests.java @@ -125,7 +125,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { assertThat(fragment, nullValue()); prefixQuery = new PrefixQuery(new Term("content", "ba"), PrefixQuery.SCORING_BOOLEAN_REWRITE); - Query rewriteQuery = prefixQuery.rewrite(reader); + Query rewriteQuery = prefixQuery.rewrite(searcher); fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery), reader, topDocs.scoreDocs[0].doc, "content", 30); assertThat(fragment, notNullValue()); diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java index fda1e0022c754..d5e51efbb534f 100644 --- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java @@ -44,9 +44,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.NetworkDisruption.Bridge; import org.opensearch.test.disruption.NetworkDisruption.DisruptedLinks; diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index ff29bfb8894db..b33ebf8333b36 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -34,18 +34,21 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterManagerService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.GatewayMetaState; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -76,6 +79,8 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private ClusterSettings clusterSettings; private GatewayMetaState gatewayMetaState; + private RemoteStoreNodeService remoteStoreNodeService; + public interface DummyHostsProviderPlugin extends DiscoveryPlugin { Map<String, Supplier<SeedHostsProvider>> impl(); @@ -92,12 +97,13 @@ default Map<String, Supplier<SeedHostsProvider>> getSeedHostProviders( public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); clusterManagerService = mock(ClusterManagerService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); gatewayMetaState = mock(GatewayMetaState.class); + remoteStoreNodeService = mock(RemoteStoreNodeService.class); } @After @@ -120,7 +126,9 @@ private DiscoveryModule newModule(Settings settings, List<DiscoveryPlugin> plugi createTempDir().toAbsolutePath(), gatewayMetaState, mock(RerouteService.class), - null + null, + new PersistedStateRegistry(), + remoteStoreNodeService ); } diff --git a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java index 61aac8247647a..f4515361a89b8 100644 --- a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java @@ -33,15 +33,16 @@ package org.opensearch.discovery; import org.opensearch.Version; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -99,7 +100,8 @@ private void createTransportSvc() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { @@ -114,7 +116,8 @@ public BoundTransportAddress boundAddress() { transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index 5d252168c7b28..0d694bcfa135b 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -37,15 +37,16 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -122,7 +123,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, address -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java index 63b79c9b53081..daa42d3abbc50 100644 --- a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java @@ -10,10 +10,10 @@ import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionDependency; import org.opensearch.test.OpenSearchTestCase; @@ -27,6 +27,7 @@ public class InitializeExtensionRequestTests extends OpenSearchTestCase { public void testInitializeExtensionRequest() throws Exception { String expectedUniqueId = "test uniqueid"; Version expectedVersion = Version.fromString("2.0.0"); + String expectedServiceAccountHeader = "test"; ExtensionDependency expectedDependency = new ExtensionDependency(expectedUniqueId, expectedVersion); DiscoveryExtensionNode expectedExtensionNode = new DiscoveryExtensionNode( "firstExtension", @@ -46,9 +47,14 @@ public void testInitializeExtensionRequest() throws Exception { Version.CURRENT ); - InitializeExtensionRequest initializeExtensionRequest = new InitializeExtensionRequest(expectedSourceNode, expectedExtensionNode); + InitializeExtensionRequest initializeExtensionRequest = new InitializeExtensionRequest( + expectedSourceNode, + expectedExtensionNode, + expectedServiceAccountHeader + ); assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + assertEquals(expectedServiceAccountHeader, initializeExtensionRequest.getServiceAccountHeader()); try (BytesStreamOutput out = new BytesStreamOutput()) { initializeExtensionRequest.writeTo(out); @@ -58,6 +64,7 @@ public void testInitializeExtensionRequest() throws Exception { assertEquals(expectedExtensionNode, initializeExtensionRequest.getExtension()); assertEquals(expectedSourceNode, initializeExtensionRequest.getSourceNode()); + assertEquals(expectedServiceAccountHeader, initializeExtensionRequest.getServiceAccountHeader()); } } } diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java index dce868bd395ad..2cc42957cf120 100644 --- a/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionResponseTests.java @@ -8,9 +8,9 @@ package org.opensearch.discovery; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java index bcf41ebf65a04..3e6786baf91ef 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderMessagesTests.java @@ -35,9 +35,9 @@ import org.opensearch.Version; import org.opensearch.cluster.coordination.PeersResponse; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 91eec3d2edfaf..f861ab90896db 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -34,16 +34,17 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.coordination.PeersResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.node.DiscoveryNodes.Builder; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.TransportAddressConnector; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; @@ -242,7 +243,8 @@ public void setup() { boundTransportAddress -> localNode, null, emptySet(), - connectionManager + connectionManager, + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java index 95f84a70986e1..421f6c6fe279b 100644 --- a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java @@ -35,19 +35,20 @@ import org.apache.logging.log4j.Logger; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -56,7 +57,6 @@ import org.opensearch.transport.nio.MockNioTransport; import org.junit.After; import org.junit.Before; -import org.mockito.Mockito; import java.io.Closeable; import java.io.IOException; @@ -77,6 +77,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.Mockito; + import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -183,7 +185,8 @@ public void testRemovingLocalAddresses() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -202,7 +205,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List<TransportAddress> transportAddresses = SeedHostsResolver.resolveHostsLists( @@ -234,7 +238,8 @@ public void testUnknownHost() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -260,7 +265,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); @@ -288,7 +294,8 @@ public void testResolveTimeout() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -325,7 +332,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(3, 5)); @@ -363,7 +371,8 @@ public void testCancellationOnClose() throws InterruptedException { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -401,7 +410,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); recreateSeedHostsResolver( @@ -426,7 +436,8 @@ public void testInvalidHosts() throws IllegalAccessException { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { @@ -445,7 +456,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List<TransportAddress> transportAddresses = SeedHostsResolver.resolveHostsLists( diff --git a/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java index 4648c98bec5df..9a828e6708d41 100644 --- a/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java @@ -32,8 +32,8 @@ package org.opensearch.discovery; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider.HostsResolver; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.TransportService; diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 46f98b7a2755e..962eb743dca6e 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -39,16 +39,16 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.set.Sets; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.gateway.MetadataStateFormat; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexSettings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.MetadataStateFormat; +import org.opensearch.index.IndexSettings; import org.opensearch.node.Node; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.NodeRoles; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Files; @@ -65,8 +65,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; +import static org.opensearch.test.NodeRoles.nonDataNode; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; @@ -359,6 +359,57 @@ protected void doRun() throws Exception { env.close(); } + public void testIndexStoreListener() throws Exception { + final AtomicInteger shardCounter = new AtomicInteger(0); + final AtomicInteger indexCounter = new AtomicInteger(0); + final Index index = new Index("foo", "fooUUID"); + final ShardId shardId = new ShardId(index, 0); + final NodeEnvironment.IndexStoreListener listener = new NodeEnvironment.IndexStoreListener() { + @Override + public void beforeShardPathDeleted(ShardId inShardId, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(shardId, inShardId); + shardCounter.incrementAndGet(); + } + + @Override + public void beforeIndexPathDeleted(Index inIndex, IndexSettings indexSettings, NodeEnvironment env) { + assertEquals(index, inIndex); + indexCounter.incrementAndGet(); + } + }; + final NodeEnvironment env = newNodeEnvironment(listener); + + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve("0")); + } + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path.resolve("0"))); + } + assertEquals(0, shardCounter.get()); + + env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path.resolve("0"))); + } + assertEquals(1, shardCounter.get()); + + for (Path path : env.indexPaths(index)) { + assertTrue(Files.exists(path)); + } + assertEquals(0, indexCounter.get()); + + env.deleteIndexDirectorySafe(index, 5000, idxSettings); + + for (Path path : env.indexPaths(index)) { + assertFalse(Files.exists(path)); + } + assertEquals(1, indexCounter.get()); + assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); + env.close(); + } + public void testStressShardLock() throws IOException, InterruptedException { class Int { int value = 0; @@ -629,6 +680,11 @@ public NodeEnvironment newNodeEnvironment() throws IOException { return newNodeEnvironment(Settings.EMPTY); } + public NodeEnvironment newNodeEnvironment(NodeEnvironment.IndexStoreListener listener) throws IOException { + Settings build = buildEnvSettings(Settings.EMPTY); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), listener); + } + @Override public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = buildEnvSettings(settings); diff --git a/server/src/test/java/org/opensearch/env/NodeMetadataTests.java b/server/src/test/java/org/opensearch/env/NodeMetadataTests.java index 92ec33d7e78e0..86ea4546c63b9 100644 --- a/server/src/test/java/org/opensearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/opensearch/env/NodeMetadataTests.java @@ -33,8 +33,8 @@ import org.opensearch.Version; import org.opensearch.common.collect.Tuple; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import java.nio.file.Path; diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 382698a300591..2a3525143c01f 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -46,11 +46,11 @@ import org.opensearch.common.CheckedRunnable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matcher; import org.junit.Before; @@ -69,8 +69,8 @@ import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.clusterManagerNode; -import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; +import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.NodeRoles.removeRoles; import static org.hamcrest.Matchers.allOf; diff --git a/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java b/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java index 578e7503b76a9..8146062c15b73 100644 --- a/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java +++ b/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java @@ -10,7 +10,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionResponseTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionResponseTests.java index 5b438f6130c76..84e13a1ddf5a7 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionResponseTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionResponseTests.java @@ -8,9 +8,9 @@ package org.opensearch.extensions; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; public class ExtensionResponseTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 41b841aec7be8..3c25dbdff3342 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -8,33 +8,8 @@ package org.opensearch.extensions; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.mock; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; - -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; -import org.junit.After; -import org.junit.Before; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionModule; @@ -42,33 +17,36 @@ import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterSettingsResponse; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.settings.WriteableSetting; -import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.WriteableSetting.SettingType; -import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; +import org.opensearch.env.EnvironmentSettingsResponse; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.extensions.proto.ExtensionRequestProto; import org.opensearch.extensions.rest.RegisterRestActionsRequest; import org.opensearch.extensions.settings.RegisterCustomSettingsRequest; -import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.identity.IdentityService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.rest.RestController; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; @@ -77,10 +55,35 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.opensearch.usage.UsageService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class ExtensionsManagerTests extends OpenSearchTestCase { private TransportService transportService; @@ -93,6 +96,8 @@ public class ExtensionsManagerTests extends OpenSearchTestCase { private Setting customSetting = Setting.simpleString("custom_extension_setting", "none", Property.ExtensionScope); private NodeClient client; private MockNioTransport transport; + private IdentityService identityService; + private final ThreadPool threadPool = new TestThreadPool(ExtensionsManagerTests.class.getSimpleName()); private final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) @@ -112,7 +117,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -128,7 +134,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); actionModule = mock(ActionModule.class); extAwarePlugin = new ExtensionAwarePlugin() { @@ -164,6 +171,7 @@ public List<Setting<?>> getExtensionSettings() { Collections.emptyList() ); client = new NoOpNodeClient(this.getTestName()); + identityService = new IdentityService(Settings.EMPTY, List.of()); } @Override @@ -179,7 +187,7 @@ public void testLoadExtensions() throws Exception { Set<Setting<?>> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); ExtensionDependency dependentExtension = new ExtensionDependency("uniqueid0", Version.fromString("2.0.0")); Extension firstExtension = new Extension( @@ -275,7 +283,7 @@ public void testNonUniqueLoadedExtensions() throws Exception { null, null ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); extensionsManager.loadExtension(firstExtension); IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(secondExtension)); assertEquals( @@ -314,7 +322,7 @@ public void testNonUniqueLoadedExtensions() throws Exception { public void testMissingRequiredFieldsWhileLoadingExtension() throws Exception { Extension firstExtension = new Extension("firstExtension", "uniqueid1", "127.0.0.0", "9300", "0.0.7", "3.0.0", "", null, null); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); IOException exception = expectThrows(IOException.class, () -> extensionsManager.loadExtension(firstExtension)); assertEquals("Required field [minimum opensearch version] is missing in the request", exception.getMessage()); @@ -371,7 +379,7 @@ public void testExtensionDependency() throws Exception { } public void testInitialize() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); @@ -404,19 +412,94 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client, + identityService + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); + List<String> actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List<String> deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -426,8 +509,60 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); + initialize(extensionsManager); + + List<String> actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List<String> deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set<Setting<?>> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "3.0.0", + "3.0.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; @@ -443,10 +578,13 @@ public void testHandleRegisterSettingsRequest() throws Exception { } public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List<String> actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List<String> deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -458,10 +596,13 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List<String> actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List<String> deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -473,9 +614,12 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th } public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List<String> actionsList = List.of("GET", "PUT /bar", "POST /baz"); List<String> deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -487,9 +631,12 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio } public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "3.0.0", "3.0.0", List.of(), null) + ); List<String> actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List<String> deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -501,7 +648,7 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw } public void testHandleExtensionRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionRequestProto.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); @@ -655,7 +802,7 @@ public void testEnvironmentSettingsDefaultValue() throws Exception { } public void testAddSettingsUpdateConsumerRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); List<Setting<?>> componentSettings = List.of( @@ -699,7 +846,7 @@ public void testAddSettingsUpdateConsumerRequest() throws Exception { } public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); List<Setting<?>> componentSettings = List.of( @@ -719,7 +866,7 @@ public void testHandleAddSettingsUpdateConsumerRequest() throws Exception { } public void testUpdateSettingsRequest() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); initialize(extensionsManager); Setting<?> componentSetting = Setting.boolSetting("falseSetting", false, Property.Dynamic); @@ -748,7 +895,7 @@ public void testUpdateSettingsRequest() throws Exception { public void testRegisterHandler() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); TransportService mockTransportService = spy( new TransportService( @@ -758,7 +905,8 @@ public void testRegisterHandler() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) ); extensionsManager.initializeServicesAndRestHandler( @@ -767,14 +915,15 @@ public void testRegisterHandler() throws Exception { mockTransportService, clusterService, settings, - client + client, + identityService ); verify(mockTransportService, times(9)).registerRequestHandler(anyString(), anyString(), anyBoolean(), anyBoolean(), any(), any()); } public void testIncompatibleExtensionRegistration() throws IOException { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), identityService); Extension firstExtension = new Extension( "firstExtension", "uniqueid1", @@ -815,7 +964,7 @@ public List<Setting<?>> getExtensionSettings() { extensionScopedSettings ); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( @@ -853,7 +1002,7 @@ public void testAdditionalExtensionSettingsForExtensionWithoutCustomSettingSet() extensionScopedSettings ); - ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings); + ExtensionsManager extensionsManager = new ExtensionsManager(additionalSettings, identityService); extensionsManager.loadExtension(firstExtension); DiscoveryExtensionNode extension = new DiscoveryExtensionNode( @@ -884,7 +1033,8 @@ private void initialize(ExtensionsManager extensionsManager) { transportService, clusterService, settings, - client + client, + identityService ); } } diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java index 57ff3b74f586f..33ec97cd0f112 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionRequestTests.java @@ -9,9 +9,9 @@ package org.opensearch.extensions.action; import com.google.protobuf.ByteString; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; public class ExtensionActionRequestTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java index 07eb721e1bb7f..4ef95d36fb51f 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionResponseTests.java @@ -8,9 +8,9 @@ package org.opensearch.extensions.action; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java index d9274a4c45e5b..293051e03d883 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionActionUtilTests.java @@ -8,8 +8,6 @@ package org.opensearch.extensions.action; -import org.junit.Before; -import org.mockito.Mockito; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -18,11 +16,14 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import org.mockito.Mockito; + import static org.opensearch.extensions.action.ExtensionActionUtil.UNIT_SEPARATOR; import static org.opensearch.extensions.action.ExtensionActionUtil.createProxyRequestBytes; diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java index b8a54392f7388..77a86d772a6ca 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionHandleTransportRequestTests.java @@ -9,9 +9,9 @@ package org.opensearch.extensions.action; import com.google.protobuf.ByteString; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; public class ExtensionHandleTransportRequestTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index e46e05cd2dbec..c4d2f81f7cf79 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -9,23 +9,22 @@ package org.opensearch.extensions.action; import com.google.protobuf.ByteString; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.support.ActionFilters; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.extensions.AcknowledgedResponse; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.rest.RestSendToExtensionActionTests; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.transport.MockTransportService; @@ -35,6 +34,8 @@ import org.opensearch.transport.NodeNotConnectedException; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; +import org.junit.After; +import org.junit.Before; import java.net.InetAddress; import java.util.Collections; @@ -43,11 +44,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ExtensionTransportActionsHandlerTests extends OpenSearchTestCase { private static final ActionFilters EMPTY_FILTERS = new ActionFilters(Collections.emptySet()); @@ -68,7 +68,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -84,7 +85,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", diff --git a/server/src/test/java/org/opensearch/extensions/action/RegisterTransportActionsRequestTests.java b/server/src/test/java/org/opensearch/extensions/action/RegisterTransportActionsRequestTests.java index b03f87a5057e0..d60bdad8ac6b0 100644 --- a/server/src/test/java/org/opensearch/extensions/action/RegisterTransportActionsRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/RegisterTransportActionsRequestTests.java @@ -8,10 +8,10 @@ package org.opensearch.extensions.action; -import org.junit.Before; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.List; diff --git a/server/src/test/java/org/opensearch/extensions/action/RemoteExtensionActionResponseTests.java b/server/src/test/java/org/opensearch/extensions/action/RemoteExtensionActionResponseTests.java index 7ade7a695ebdd..a20405975e6b7 100644 --- a/server/src/test/java/org/opensearch/extensions/action/RemoteExtensionActionResponseTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/RemoteExtensionActionResponseTests.java @@ -8,9 +8,9 @@ package org.opensearch.extensions.action; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; diff --git a/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java b/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java index ade96ac51ab2b..6e9e827fdaf9e 100644 --- a/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/TransportActionRequestFromExtensionTests.java @@ -9,9 +9,9 @@ package org.opensearch.extensions.action; import com.google.protobuf.ByteString; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; public class TransportActionRequestFromExtensionTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java index e12549b93ab53..8b73f2e81972f 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java @@ -8,20 +8,26 @@ package org.opensearch.extensions.rest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.http.HttpRequest; +import org.opensearch.identity.IdentityService; +import org.opensearch.identity.Subject; +import org.opensearch.identity.tokens.OnBehalfOfClaims; +import org.opensearch.identity.tokens.TokenManager; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest.Method; -import org.opensearch.http.HttpRequest; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; @@ -30,6 +36,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; + import static java.util.Map.entry; public class ExtensionRestRequestTests extends OpenSearchTestCase { @@ -39,15 +46,15 @@ public class ExtensionRestRequestTests extends OpenSearchTestCase { private String expectedUri; Map<String, String> expectedParams; Map<String, List<String>> expectedHeaders; - XContentType expectedContentType; + MediaType expectedContentType; BytesReference expectedContent; String extensionUniqueId1; Principal userPrincipal; HttpRequest.HttpVersion expectedHttpVersion; - // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity String extensionTokenProcessor; String expectedRequestIssuerIdentity; NamedWriteableRegistry registry; + private IdentityService identityService; public void setUp() throws Exception { super.setUp(); @@ -59,13 +66,19 @@ public void setUp() throws Exception { entry("Content-Type", Arrays.asList("application/json")), entry("foo", Arrays.asList("hello", "world")) ); - expectedContentType = XContentType.JSON; + expectedContentType = MediaTypeRegistry.JSON; expectedContent = new BytesArray("{\"key\": \"value\"}".getBytes(StandardCharsets.UTF_8)); extensionUniqueId1 = "ext_1"; userPrincipal = () -> "user1"; expectedHttpVersion = HttpRequest.HttpVersion.HTTP_1_1; extensionTokenProcessor = "placeholder_extension_token_processor"; - expectedRequestIssuerIdentity = "placeholder_request_issuer_identity"; + identityService = new IdentityService(Settings.EMPTY, List.of()); + TokenManager tokenManager = identityService.getTokenManager(); + Subject subject = this.identityService.getSubject(); + OnBehalfOfClaims claims = new OnBehalfOfClaims("testID", subject.getPrincipal().getName()); + expectedRequestIssuerIdentity = identityService.getTokenManager() + .issueOnBehalfOfToken(identityService.getSubject(), claims) + .asAuthHeaderValue(); } public void testExtensionRestRequest() throws Exception { diff --git a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java index e76e6b98811f7..b2d9f698e199c 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestResponseTests.java @@ -8,26 +8,26 @@ package org.opensearch.extensions.rest; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.List; -import java.util.Map; - import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.http.HttpRequest; import org.opensearch.http.HttpResponse; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.RestRequest.Method; import org.opensearch.test.OpenSearchTestCase; -import static org.opensearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import java.util.Map; + import static org.opensearch.core.rest.RestStatus.ACCEPTED; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; public class ExtensionRestResponseTests extends OpenSearchTestCase { @@ -110,7 +110,7 @@ public HttpRequest releaseAndCopy() { } public void testConstructorWithBuilder() throws IOException { - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); builder.field("status", ACCEPTED); builder.endObject(); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RegisterRestActionsTests.java b/server/src/test/java/org/opensearch/extensions/rest/RegisterRestActionsTests.java index 8cb29bf264db9..de40347b8b391 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RegisterRestActionsTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RegisterRestActionsTests.java @@ -8,13 +8,13 @@ package org.opensearch.extensions.rest; -import java.util.List; - +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.test.OpenSearchTestCase; +import java.util.List; + public class RegisterRestActionsTests extends OpenSearchTestCase { public void testRegisterRestActionsRequest() throws Exception { diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index 030df865c4e4a..0dae0ae1b4e0b 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -8,37 +8,23 @@ package org.opensearch.extensions.rest; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - -import org.junit.After; -import org.junit.Before; -import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.rest.RestRequest; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.ExtensionsSettings.Extension; +import org.opensearch.identity.IdentityService; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; @@ -47,6 +33,24 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; +import org.junit.After; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import org.mockito.Mockito; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class RestInitializeExtensionActionTests extends OpenSearchTestCase { @@ -64,7 +68,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -80,7 +85,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } @@ -103,7 +109,7 @@ public void testRestInitializeExtensionActionResponse() throws Exception { + "\"minimumCompatibleVersion\":\"" + Version.CURRENT.minimumCompatibilityVersion().toString() + "\"}"; - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), MediaTypeRegistry.JSON) .withMethod(RestRequest.Method.POST) .build(); @@ -115,7 +121,7 @@ public void testRestInitializeExtensionActionResponse() throws Exception { } public void testRestInitializeExtensionActionFailure() throws Exception { - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"\",\"hostAddress\":\"127.0.0.1\"," @@ -125,7 +131,7 @@ public void testRestInitializeExtensionActionFailure() throws Exception { + "\"minimumCompatibleVersion\":\"" + Version.CURRENT.minimumCompatibilityVersion().toString() + "\"}"; - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), MediaTypeRegistry.JSON) .withMethod(RestRequest.Method.POST) .build(); @@ -148,13 +154,16 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th Function.identity(), Setting.Property.ExtensionScope ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager extensionsManager = new ExtensionsManager( + Set.of(boolSetting, stringSetting, intSetting, listSetting), + new IdentityService(Settings.EMPTY, List.of()) + ); ExtensionsManager spy = spy(extensionsManager); // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -163,17 +172,17 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th + "\"minimumCompatibleVersion\":\"" + Version.CURRENT.minimumCompatibilityVersion().toString() + "\",\"boolSetting\":true,\"stringSetting\":\"customSetting\",\"intSetting\":5,\"listSetting\":[\"one\",\"two\",\"three\"]}"; - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), MediaTypeRegistry.JSON) .withMethod(RestRequest.Method.POST) .build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional<ExtensionsSettings.Extension> extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional<Extension> extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -195,13 +204,16 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing Function.identity(), Setting.Property.ExtensionScope ); - ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager extensionsManager = new ExtensionsManager( + Set.of(boolSetting, stringSetting, intSetting, listSetting), + new IdentityService(Settings.EMPTY, List.of()) + ); ExtensionsManager spy = spy(extensionsManager); // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -210,17 +222,17 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing + "\"minimumCompatibleVersion\":\"" + Version.CURRENT.minimumCompatibilityVersion().toString() + "\"}"; - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), MediaTypeRegistry.JSON) .withMethod(RestRequest.Method.POST) .build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional<ExtensionsSettings.Extension> extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional<Extension> extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 8695e409de0bc..9da976de7d7f6 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -8,24 +8,6 @@ package org.opensearch.extensions.rest; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Set; -import java.util.Map; -import java.util.Arrays; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static org.mockito.Mockito.mock; - -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.ActionModule; import org.opensearch.action.ActionModule.DynamicActionRegistry; @@ -34,22 +16,23 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.action.ExtensionAction; import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.identity.IdentityService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestHandler.Route; import org.opensearch.rest.RestRequest.Method; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -57,6 +40,24 @@ import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.opensearch.usage.UsageService; +import org.junit.After; +import org.junit.Before; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.Mockito.mock; public class RestSendToExtensionActionTests extends OpenSearchTestCase { @@ -65,6 +66,7 @@ public class RestSendToExtensionActionTests extends OpenSearchTestCase { private DiscoveryExtensionNode discoveryExtensionNode; private ActionModule actionModule; private DynamicActionRegistry dynamicActionRegistry; + private IdentityService identityService; private final ThreadPool threadPool = new TestThreadPool(RestSendToExtensionActionTests.class.getSimpleName()); @Before @@ -77,7 +79,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -93,7 +96,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", @@ -119,8 +123,9 @@ public void setup() throws Exception { usageService, null, new IdentityService(Settings.EMPTY, new ArrayList<>()), - new ExtensionsManager(Set.of()) + new ExtensionsManager(Set.of(), new IdentityService(Settings.EMPTY, List.of())) ); + identityService = new IdentityService(Settings.EMPTY, new ArrayList<>()); dynamicActionRegistry = actionModule.getDynamicActionRegistry(); } @@ -142,10 +147,11 @@ public void testRestSendToExtensionAction() throws Exception { registerRestActionRequest, discoveryExtensionNode, transportService, - dynamicActionRegistry + dynamicActionRegistry, + identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List<Route> expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -174,10 +180,11 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { registerRestActionRequest, discoveryExtensionNode, transportService, - dynamicActionRegistry + dynamicActionRegistry, + identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List<NamedRoute> expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -219,10 +226,11 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr registerRestActionRequest, discoveryExtensionNode, transportService, - dynamicActionRegistry + dynamicActionRegistry, + identityService ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List<NamedRoute> expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) @@ -271,7 +279,13 @@ public void testRestSendToExtensionActionWithoutUniqueNameShouldFail() { ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -283,7 +297,13 @@ public void testRestSendToExtensionMultipleNamedRoutesWithSameName() throws Exce ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -295,7 +315,13 @@ public void testRestSendToExtensionMultipleNamedRoutesWithSameLegacyActionName() ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -307,7 +333,13 @@ public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPath() throws ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -319,7 +351,13 @@ public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithDiffer ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -331,7 +369,13 @@ public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithPathPa ); try { - new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry); + new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ); } catch (IllegalArgumentException e) { fail("IllegalArgumentException should not be thrown for different paths"); } @@ -353,7 +397,13 @@ public void testRestSendToExtensionWithNamedRouteCollidingWithDynamicTransportAc expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -367,7 +417,13 @@ public void testRestSendToExtensionWithNamedRouteCollidingWithNativeTransportAct ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -381,7 +437,8 @@ public void testRestSendToExtensionActionFilterHeaders() throws Exception { registerRestActionRequest, discoveryExtensionNode, transportService, - dynamicActionRegistry + dynamicActionRegistry, + identityService ); Map<String, List<String>> headers = new HashMap<>(); @@ -407,7 +464,13 @@ public void testRestSendToExtensionActionBadMethod() throws Exception { ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -419,7 +482,13 @@ public void testRestSendToExtensionActionBadDeprecatedMethod() throws Exception ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -431,7 +500,13 @@ public void testRestSendToExtensionActionMissingUri() throws Exception { ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } @@ -443,7 +518,13 @@ public void testRestSendToExtensionActionMissingDeprecatedUri() throws Exception ); expectThrows( IllegalArgumentException.class, - () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + () -> new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry, + identityService + ) ); } } diff --git a/server/src/test/java/org/opensearch/extensions/settings/RegisterCustomSettingsTests.java b/server/src/test/java/org/opensearch/extensions/settings/RegisterCustomSettingsTests.java index bf9234a7f129d..ec752073a9055 100644 --- a/server/src/test/java/org/opensearch/extensions/settings/RegisterCustomSettingsTests.java +++ b/server/src/test/java/org/opensearch/extensions/settings/RegisterCustomSettingsTests.java @@ -8,19 +8,19 @@ package org.opensearch.extensions.settings; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; +import java.util.List; +import java.util.concurrent.TimeUnit; + public class RegisterCustomSettingsTests extends OpenSearchTestCase { public void testRegisterCustomSettingsRequest() throws Exception { diff --git a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java index 31a27503069d7..4e5e9c71e1fe4 100644 --- a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -46,12 +47,13 @@ import org.junit.Before; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import static java.util.Collections.emptySet; +import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; @@ -84,7 +86,16 @@ public class AsyncShardFetchTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); this.threadPool = new TestThreadPool(getTestName()); - this.test = new TestFetch(threadPool); + if (randomBoolean()) { + this.test = new TestFetch(threadPool); + } else { + HashMap<ShardId, ShardAttributes> shardToCustomDataPath = new HashMap<>(); + ShardId shardId0 = new ShardId("index1", "index_uuid1", 0); + ShardId shardId1 = new ShardId("index2", "index_uuid2", 0); + shardToCustomDataPath.put(shardId0, new ShardAttributes(shardId0, "")); + shardToCustomDataPath.put(shardId1, new ShardAttributes(shardId1, "")); + this.test = new TestFetch(threadPool, shardToCustomDataPath); + } } @After @@ -97,7 +108,7 @@ public void testClose() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -107,7 +118,7 @@ public void testClose() throws Exception { assertThat(test.reroute.get(), equalTo(1)); test.close(); try { - test.fetchData(nodes, emptySet()); + test.fetchData(nodes, emptyMap()); fail("fetch data should fail when closed"); } catch (IllegalStateException e) { // all is well @@ -119,7 +130,7 @@ public void testFullCircleSingleNodeSuccess() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -127,7 +138,7 @@ public void testFullCircleSingleNodeSuccess() throws Exception { test.fireSimulationAndWait(node1.getId()); // verify we get back the data node assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -139,7 +150,7 @@ public void testFullCircleSingleNodeFailure() throws Exception { test.addSimulation(node1.getId(), failure1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -147,19 +158,19 @@ public void testFullCircleSingleNodeFailure() throws Exception { test.fireSimulationAndWait(node1.getId()); // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); // on failure, we reset the failure on a successive call to fetchData, and try again afterwards test.addSimulation(node1.getId(), response1); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); // 2 reroutes, cause we have a failure that we clear assertThat(test.reroute.get(), equalTo(3)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -170,7 +181,7 @@ public void testIgnoreResponseFromDifferentRound() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -183,7 +194,7 @@ public void testIgnoreResponseFromDifferentRound() throws Exception { test.fireSimulationAndWait(node1.getId()); // verify we get back the data node assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -195,7 +206,7 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { test.addSimulation(node1.getId(), failure1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -212,7 +223,7 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { test.fireSimulationAndWait(node1.getId()); // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); } @@ -223,7 +234,7 @@ public void testTwoNodesOnSetup() throws Exception { test.addSimulation(node2.getId(), response2); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -231,14 +242,14 @@ public void testTwoNodesOnSetup() throws Exception { test.fireSimulationAndWait(node1.getId()); // there is still another on going request, so no data assertThat(test.getNumberOfInFlightFetches(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); // no more ongoing requests, we should fetch the data assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -251,21 +262,21 @@ public void testTwoNodesOnSetupAndFailure() throws Exception { test.addSimulation(node2.getId(), failure2); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); // fire the first response, it should trigger a reroute test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); assertThat(test.reroute.get(), equalTo(2)); // since one of those failed, we should only have one entry - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -276,7 +287,7 @@ public void testTwoNodesAddedInBetween() throws Exception { test.addSimulation(node1.getId(), response1); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -287,14 +298,14 @@ public void testTwoNodesAddedInBetween() throws Exception { nodes = DiscoveryNodes.builder(nodes).add(node2).build(); test.addSimulation(node2.getId(), response2); // no fetch data, has a new node introduced - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); // since one of those failed, we should only have one entry - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -309,7 +320,7 @@ public void testClearCache() throws Exception { test.clearCacheForNode(node1.getId()); // no fetched data, request still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -317,13 +328,13 @@ public void testClearCache() throws Exception { assertThat(test.reroute.get(), equalTo(1)); // verify we get back right data from node - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); // second fetch gets same data - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -334,14 +345,14 @@ public void testClearCache() throws Exception { test.addSimulation(node1.getId(), response1_2); // no fetched data, new request on going - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); // verify we get new data back - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); @@ -352,7 +363,7 @@ public void testConcurrentRequestAndClearCache() throws Exception { test.addSimulation(node1.getId(), response1); // no fetched data, request still on going - AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -366,14 +377,14 @@ public void testConcurrentRequestAndClearCache() throws Exception { test.addSimulation(node1.getId(), response1_2); // verify still no fetched data, request still on going - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); // verify we get new data back - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); @@ -403,6 +414,11 @@ static class Entry { this.threadPool = threadPool; } + TestFetch(ThreadPool threadPool, Map<ShardId, ShardAttributes> shardAttributesMap) { + super(LogManager.getLogger(TestFetch.class), "test", shardAttributesMap, null, "test-batch"); + this.threadPool = threadPool; + } + public void addSimulation(String nodeId, Response response) { simulations.put(nodeId, new Entry(response, null)); } @@ -418,7 +434,7 @@ public void fireSimulationAndWait(String nodeId) throws InterruptedException { } @Override - protected void reroute(ShardId shardId, String reason) { + protected void reroute(String shardId, String reason) { reroute.incrementAndGet(); } diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index c83da46b23fb1..1c43bb565ef69 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -40,6 +40,11 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; @@ -48,10 +53,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.repositories.IndexId; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; @@ -269,6 +278,108 @@ public void testUpdateRoutingTable() { } } + public void testRoutingTableUpdateWhenRemoteStateRecovery() { + final int numOfShards = randomIntBetween(1, 10); + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .build() + ); + + // Test remote index routing table is generated with ExistingStoreRecoverySource + { + final Index index = remoteMetadata.getIndex(); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .build(); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + + // Test remote index routing table is overridden if recovery source is RemoteStoreRecoverySource + { + final Index index = remoteMetadata.getIndex(); + Map<ShardId, IndexShardRoutingTable> routingTableMap = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + routingTableMap.put(shardId, new IndexShardRoutingTable.Builder(new ShardId(remoteMetadata.getIndex(), 1)).build()); + } + IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTableMap, + true + ); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .routingTable(new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()).build()) + .build(); + assertTrue(initialState.routingTable().hasIndex(index)); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + } + public void testMixCurrentAndRecoveredState() { final ClusterState currentState = ClusterState.builder(ClusterState.EMPTY_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) diff --git a/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java b/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java index d10ba879fc18e..3f62abfc17037 100644 --- a/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java @@ -38,8 +38,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; +import org.opensearch.env.NodeEnvironment; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 830a8a9ad8ab7..74bae7b5eb7cf 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -35,12 +35,16 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationState; +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Manifest; import org.opensearch.cluster.metadata.Metadata; @@ -48,18 +52,28 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.common.util.set.Sets; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.util.set.Sets; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.PersistedClusterStateService.Writer; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemotePersistenceStats; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -72,23 +86,44 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; + +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class GatewayMetaStatePersistedStateTests extends OpenSearchTestCase { + private NodeEnvironment nodeEnvironment; private ClusterName clusterName; private Settings settings; private DiscoveryNode localNode; private BigArrays bigArrays; + private MockGatewayMetaState gateway; + @Override public void setUp() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -108,14 +143,22 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { nodeEnvironment.close(); + IOUtils.close(gateway); super.tearDown(); } - private CoordinationState.PersistedState newGatewayPersistedState() { - final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); - gateway.start(settings, nodeEnvironment, xContentRegistry()); + private CoordinationState.PersistedState newGatewayPersistedState() throws IOException { + IOUtils.close(gateway); + gateway = new MockGatewayMetaState(localNode, bigArrays); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), + instanceOf(GatewayMetaState.LucenePersistedState.class) + ); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); return persistedState; } @@ -412,7 +455,10 @@ public void testDataOnlyNodePersistence() throws Exception { cleanup.add(gateway); final TransportService transportService = mock(TransportService.class); TestThreadPool threadPool = new TestThreadPool("testMarkAcceptedConfigAsCommittedOnDataOnlyNode"); - cleanup.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + cleanup.add(() -> { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + threadPool.shutdown(); + }); when(transportService.getThreadPool()).thenReturn(threadPool); ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( @@ -425,6 +471,27 @@ public void testDataOnlyNodePersistence() throws Exception { new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ); + Supplier<RemoteClusterStateService> remoteClusterStateServiceSupplier = () -> { + if (isRemoteStoreClusterStateEnabled(settings)) { + return new RemoteClusterStateService( + nodeEnvironment.nodeId(), + () -> new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + transportService.getThreadPool() + ), + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L, + threadPool + ); + } else { + return null; + } + }; gateway.start( settings, transportService, @@ -432,7 +499,10 @@ public void testDataOnlyNodePersistence() throws Exception { new MetaStateService(nodeEnvironment, xContentRegistry()), null, null, - persistedClusterStateService + persistedClusterStateService, + remoteClusterStateServiceSupplier.get(), + new PersistedStateRegistry(), + null ); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.AsyncLucenePersistedState.class)); @@ -647,6 +717,415 @@ Directory createDirectory(Path path) { } } + public void testRemotePersistedState() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().clusterTerm(1L).stateVersion(5L).build(); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + remotePersistedState.setLastAcceptedState(clusterState); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + + final ClusterState secondClusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + remotePersistedState.setLastAcceptedState(secondClusterState); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + + remotePersistedState.markLastAcceptedStateAsCommitted(); + Mockito.verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(Mockito.any(), Mockito.any()); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(false)); + + final ClusterState thirdClusterState = ClusterState.builder(secondClusterState) + .metadata(Metadata.builder(secondClusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).build()) + .build(); + remotePersistedState.setLastAcceptedState(thirdClusterState); + remotePersistedState.markLastAcceptedStateAsCommitted(); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); + } + + public void testRemotePersistedStateNotCommitted() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .previousClusterUUID(previousClusterUUID) + .clusterTerm(1L) + .stateVersion(5L) + .build(); + Mockito.when(remoteClusterStateService.getLatestClusterMetadataManifest(Mockito.any(), Mockito.any())) + .thenReturn(Optional.of(manifest)); + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState( + remoteClusterStateService, + ClusterState.UNKNOWN_UUID + ); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).clusterUUIDCommitted(false).build()) + .build(); + + remotePersistedState.setLastAcceptedState(clusterState); + ArgumentCaptor<String> previousClusterUUIDCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor<ClusterState> clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture()); + assertEquals(previousClusterUUID, previousClusterUUIDCaptor.getValue()); + } + + public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + } + + public void testRemotePersistedStateFailureStats() throws IOException { + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + when(remoteClusterStateService.getStats()).thenReturn(remoteStateStats); + doCallRealMethod().when(remoteClusterStateService).writeMetadataFailed(); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + assertEquals(1, remoteClusterStateService.getStats().getFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testGatewayForRemoteState() throws IOException { + MockGatewayMetaState gateway = null; + try { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + gateway = new MockGatewayMetaState(localNode, bigArrays, remoteClusterStateService, remoteStoreRestoreService); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "randomRepoName" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "randomRepoName" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "randomRepoName") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); + + final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); + assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), + instanceOf(GatewayMetaState.LucenePersistedState.class) + ); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), + instanceOf(GatewayMetaState.RemotePersistedState.class) + ); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn(ClusterState.UNKNOWN_UUID); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); // change this + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(ClusterState.EMPTY_STATE.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReplacement() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + final ClusterState previousState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put( + IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(), + false + ) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn( + previousState.metadata().clusterUUID() + ); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, previousState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); + verify(remoteStoreRestoreService).restore(any(), any(), anyBoolean(), any()); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(previousState.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReboot() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + clusterState, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verifyNoInteractions(remoteClusterStateService); + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + logger.info("lucene state metadata: {}", lucenePersistedState.getLastAcceptedState().toString()); + logger.info("initial metadata: {}", clusterState.toString()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().size(), equalTo(1)); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().get("test-index1"), equalTo(indexMetadata)); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn("test-cluster-uuid"); + + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings( + settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10)) + .put(IndexMetadata.INDEX_READ_ONLY_SETTING.getKey(), true) + ) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + + final ClusterState clusterState = ClusterState.builder( + createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(ClusterState.UNKNOWN_UUID) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build() + ) + ).nodes(DiscoveryNodes.EMPTY_NODES).build(); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, clusterState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + true + ); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + PersistedState lucenePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(clusterName.value()); // change this + verify(remoteStoreRestoreService).restore(any(ClusterState.class), any(String.class), anyBoolean(), any(String[].class)); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat( + Metadata.isGlobalStateEquals(lucenePersistedState.getLastAcceptedState().metadata(), clusterState.metadata()), + equalTo(true) + ); + assertThat( + lucenePersistedState.getLastAcceptedState().blocks().hasGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK), + equalTo(true) + ); + assertThat( + IndexMetadata.INDEX_READ_ONLY_SETTING.get( + lucenePersistedState.getLastAcceptedState().metadata().index("test-index1").getSettings() + ), + equalTo(true) + ); + } finally { + IOUtils.close(gateway); + } + } + + private MockGatewayMetaState newGatewayForRemoteState( + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService, + PersistedStateRegistry persistedStateRegistry, + ClusterState currentState, + boolean prepareFullState + ) throws IOException { + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays, prepareFullState); + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + Settings settingWithRemoteStateEnabled = Settings.builder() + .put(settings) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + final TransportService transportService = mock(TransportService.class); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + when(transportService.getLocalNode()).thenReturn(mock(DiscoveryNode.class)); + final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( + nodeEnvironment, + xContentRegistry(), + getBigArrays(), + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L + ); + if (!ClusterState.EMPTY_STATE.equals(currentState)) { + Writer writer = persistedClusterStateService.createWriter(); + writer.writeFullStateAndCommit(currentState.term(), currentState); + writer.close(); + } + final MetaStateService metaStateService = mock(MetaStateService.class); + when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), ClusterState.EMPTY_STATE.metadata())); + gateway.start( + settingWithRemoteStateEnabled, + transportService, + clusterService, + metaStateService, + null, + null, + persistedClusterStateService, + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService + ); + return gateway; + } + private static BigArrays getBigArrays() { return usually() ? BigArrays.NON_RECYCLING_INSTANCE diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 2d8a26f8bbe87..c448c4b07e03b 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -50,8 +50,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; diff --git a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java index f3c1108c163b5..8e67c1cdf4fd0 100644 --- a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java +++ b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java @@ -53,14 +53,13 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.NodeEnvironment; -import org.opensearch.core.index.Index; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.file.Path; @@ -72,6 +71,8 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicLong; +import org.mockito.ArgumentCaptor; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; diff --git a/server/src/test/java/org/opensearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/MetaStateServiceTests.java index 4e8f48cc4c1cc..885280c13c209 100644 --- a/server/src/test/java/org/opensearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/MetaStateServiceTests.java @@ -38,8 +38,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.Index; +import org.opensearch.env.NodeEnvironment; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/gateway/MetadataStateFormatTests.java b/server/src/test/java/org/opensearch/gateway/MetadataStateFormatTests.java index 67d99b9880a59..53f370471c91f 100644 --- a/server/src/test/java/org/opensearch/gateway/MetadataStateFormatTests.java +++ b/server/src/test/java/org/opensearch/gateway/MetadataStateFormatTests.java @@ -37,8 +37,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.metadata.Metadata; diff --git a/server/src/test/java/org/opensearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/PersistedClusterStateServiceTests.java index 211c13e00e6db..9255b0086c073 100644 --- a/server/src/test/java/org/opensearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/PersistedClusterStateServiceTests.java @@ -57,14 +57,14 @@ import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeMetadata; import org.opensearch.gateway.PersistedClusterStateService.Writer; -import org.opensearch.core.index.Index; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import java.io.IOError; diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index 61bf5f347c2d5..dceda6433575c 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -60,18 +60,18 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.Environment; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotShardSizeInfo; -import org.junit.Before; import org.opensearch.test.IndexSettingsModule; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -857,7 +857,11 @@ protected AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.Nod ShardRouting shard, RoutingAllocation allocation ) { - return new AsyncShardFetch.FetchResult<>(shardId, data, Collections.<String>emptySet()); + return new AsyncShardFetch.FetchResult<>(data, new HashMap<>() { + { + put(shardId, Collections.<String>emptySet()); + } + }); } } } diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java index 968a0474051f7..ae56bc0f8b3d2 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java @@ -33,9 +33,11 @@ package org.opensearch.gateway; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.opensearch.Version; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -59,14 +61,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; -import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.snapshots.SnapshotShardSizeInfo; import org.junit.Before; @@ -664,7 +666,7 @@ static String randomSyncId() { class TestAllocator extends ReplicaShardAllocator { - private Map<DiscoveryNode, TransportNodesListShardStoreMetadata.StoreFilesMetadata> data = null; + private Map<DiscoveryNode, StoreFilesMetadata> data = null; private AtomicBoolean fetchDataCalled = new AtomicBoolean(false); public void clean() { @@ -702,7 +704,7 @@ TestAllocator addData( } data.put( node, - new TransportNodesListShardStoreMetadata.StoreFilesMetadata( + new StoreFilesMetadata( shardId, new Store.MetadataSnapshot(unmodifiableMap(filesAsMap), unmodifiableMap(commitData), randomInt()), peerRecoveryRetentionLeases @@ -720,14 +722,18 @@ protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetadata.NodeS Map<DiscoveryNode, TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata> tData = null; if (data != null) { tData = new HashMap<>(); - for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetadata.StoreFilesMetadata> entry : data.entrySet()) { + for (Map.Entry<DiscoveryNode, StoreFilesMetadata> entry : data.entrySet()) { tData.put( entry.getKey(), new TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata(entry.getKey(), entry.getValue()) ); } } - return new AsyncShardFetch.FetchResult<>(shardId, tData, Collections.emptySet()); + return new AsyncShardFetch.FetchResult<>(tData, new HashMap<>() { + { + put(shardId, Collections.emptySet()); + } + }); } @Override diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java new file mode 100644 index 0000000000000..6c9a3201656d7 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ClusterMetadataManifestTests extends OpenSearchTestCase { + + public void testClusterMetadataManifestXContentV0() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V0, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContentV0(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestXContent() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V1, + "test-global-metadata-file", + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContent(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestSerializationEqualsHashCode() { + ClusterMetadataManifest initialManifest = new ClusterMetadataManifest( + 1337L, + 7L, + "HrYF3kP5SmSPWtKlWhnNSA", + "6By9p9G0Rv2MmFYJcPAOgA", + Version.CURRENT, + "B10RX1f5RJenMQvYccCgSQ", + true, + 1, + "test-global-metadata-file", + randomUploadedIndexMetadataList(), + "yfObdx8KSMKKrXf8UyHhM", + true + ); + { // Mutate Cluster Term + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterTerm(1338L); + return builder.build(); + } + ); + } + { // Mutate State Version + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.stateVersion(8L); + return builder.build(); + } + ); + } + { // Mutate Cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUID("efOkMiPbQZCUQQgtFWdbPw"); + return builder.build(); + } + ); + } + { // Mutate State UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.stateUUID("efOkMiPbQZCUQQgtFWdbPw"); + return builder.build(); + } + ); + } + { // Mutate OpenSearch Version + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.opensearchVersion(Version.V_EMPTY); + return builder.build(); + } + ); + } + { // Mutate Committed State + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.committed(false); + return builder.build(); + } + ); + } + { // Mutate Indices + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.indices(randomUploadedIndexMetadataList()); + return builder.build(); + } + ); + } + { // Mutate Previous cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.previousClusterUUID("vZX62DCQEOzGXlxXCrEu"); + return builder.build(); + } + ); + + } + { // Mutate cluster uuid committed + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUIDCommitted(false); + return builder.build(); + } + ); + } + } + + private List<UploadedIndexMetadata> randomUploadedIndexMetadataList() { + final int size = randomIntBetween(1, 10); + final List<UploadedIndexMetadata> uploadedIndexMetadataList = new ArrayList<>(size); + while (uploadedIndexMetadataList.size() < size) { + assertTrue(uploadedIndexMetadataList.add(randomUploadedIndexMetadata())); + } + return uploadedIndexMetadataList; + } + + private UploadedIndexMetadata randomUploadedIndexMetadata() { + return new UploadedIndexMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public void testUploadedIndexMetadataSerializationEqualsHashCode() { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + uploadedIndexMetadata, + orig -> OpenSearchTestCase.copyWriteable(orig, new NamedWriteableRegistry(Collections.emptyList()), UploadedIndexMetadata::new), + metadata -> randomlyChangingUploadedIndexMetadata(uploadedIndexMetadata) + ); + } + + private UploadedIndexMetadata randomlyChangingUploadedIndexMetadata(UploadedIndexMetadata uploadedIndexMetadata) { + switch (randomInt(2)) { + case 0: + return new UploadedIndexMetadata( + randomAlphaOfLength(10), + uploadedIndexMetadata.getIndexUUID(), + uploadedIndexMetadata.getUploadedFilename() + ); + case 1: + return new UploadedIndexMetadata( + uploadedIndexMetadata.getIndexName(), + randomAlphaOfLength(10), + uploadedIndexMetadata.getUploadedFilename() + ); + case 2: + return new UploadedIndexMetadata( + uploadedIndexMetadata.getIndexName(), + uploadedIndexMetadata.getIndexUUID(), + randomAlphaOfLength(10) + ); + } + return uploadedIndexMetadata; + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java new file mode 100644 index 0000000000000..65477051cdb30 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -0,0 +1,1540 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.IndexGraveyard; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.indices.IndicesModule; +import org.opensearch.repositories.FilterRepository; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import java.io.ByteArrayInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.RETAINED_MANIFESTS; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteClusterStateServiceTests extends OpenSearchTestCase { + + private RemoteClusterStateService remoteClusterStateService; + private ClusterSettings clusterSettings; + private Supplier<RepositoriesService> repositoriesServiceSupplier; + private RepositoriesService repositoriesService; + private BlobStoreRepository blobStoreRepository; + private BlobStore blobStore; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "remote_store_repository" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "remote_store_repository" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + + blobStoreRepository = mock(BlobStoreRepository.class); + blobStore = mock(BlobStore.class); + when(blobStoreRepository.blobStore()).thenReturn(blobStore); + when(repositoriesService.repository("remote_store_repository")).thenReturn(blobStoreRepository); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); + remoteClusterStateService = new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + settings, + clusterSettings, + () -> 0L, + threadPool + ); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + remoteClusterStateService.close(); + threadPool.shutdown(); + } + + public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); + Assert.assertThat(manifest, nullValue()); + } + + public void testFailInitializationWhenRemoteStateDisabled() { + final Settings settings = Settings.builder().build(); + assertThrows( + AssertionError.class, + () -> new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L, + threadPool + ) + ); + } + + public void testFailInitializeWhenRepositoryNotSet() { + doThrow(new RepositoryMissingException("repository missing")).when(repositoriesService).repository("remote_store_repository"); + assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.start()); + } + + public void testFailWriteFullMetadataWhenNotBlobRepository() { + final FilterRepository filterRepository = mock(FilterRepository.class); + when(repositoriesService.repository("remote_store_repository")).thenReturn(filterRepository); + assertThrows(AssertionError.class, () -> remoteClusterStateService.start()); + } + + public void testWriteFullMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); + } + + public void testWriteFullMetadataInParallelSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor<WriteContext> writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + AtomicReference<WriteContext> capturedWriteContext = new AtomicReference<>(); + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + capturedWriteContext.set(writeContextArgumentCaptor.getValue()); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); + + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 3); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 3); + + byte[] writtenBytes = capturedWriteContext.get() + .getStreamProvider(Integer.MAX_VALUE) + .provideStream(0) + .getInputStream() + .readAllBytes(); + IndexMetadata writtenIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.deserialize( + capturedWriteContext.get().getFileName(), + blobStoreRepository.getNamedXContentRegistry(), + new BytesArray(writtenBytes) + ); + + assertEquals(capturedWriteContext.get().getWritePriority(), WritePriority.URGENT); + assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); + assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); + assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); + assertEquals(writtenIndexMetadata.getIndex().getUUID(), "index-uuid"); + long expectedChecksum = RemoteTransferContainer.checksumOfChecksum(new ByteArrayIndexInput("metadata-filename", writtenBytes), 8); + if (capturedWriteContext.get().doRemoteDataIntegrityCheck()) { + assertEquals(capturedWriteContext.get().getExpectedChecksum().longValue(), expectedChecksum); + } else { + assertEquals(capturedWriteContext.get().getExpectedChecksum(), null); + } + + } + + public void testWriteFullMetadataFailureForGlobalMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + // For async write action listener will be called from different thread, replicating same behaviour here. + new Thread(new Runnable() { + @Override + public void run() { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + } + }).start(); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.RemoteStateTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + } + + public void testTimeoutWhileWritingManifestFile() throws IOException { + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = 2; + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { // For Global Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { // For Index Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + // For Manifest file perform No Op, so latch in code will timeout + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + try { + remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); + } catch (Exception e) { + assertTrue(e instanceof RemoteClusterStateService.RemoteStateTransferException); + assertTrue(e.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); + } + } + + public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.RemoteStateTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); + Assert.assertThat(manifest, nullValue()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testFailWriteIncrementalMetadataWhenTermChanged() { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(2L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + assertThrows( + AssertionError.class, + () -> remoteClusterStateService.writeIncrementalMetadata(previousClusterState, clusterState, null) + ); + } + + public void testWriteIncrementalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(Collections.emptyList()).build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify the migration of manifest file from codec V0 and V1. + * + * Initially codec version is 0 and global metadata is also null, we will perform index metadata update. + * In final manifest codec version should be 1 and + * global metadata should be updated, even if it was not changed in this cluster state update + */ + public void testMigrationFromCodecV0ManifestToCodecV1Manifest() throws IOException { + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .nodes(nodesWithLocalNodeClusterManager()) + .build(); + + // Update only index metadata + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(previousClusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(previousClusterState).metadata(newMetadata).build(); + + // previous manifest with codec 0 and null global metadata + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .globalMetadataFileName(null) + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifestAfterUpdate = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + newClusterState, + previousManifest + ); + + // global metadata is updated + assertThat(manifestAfterUpdate.getGlobalMetadataFileName(), notNullValue()); + // Manifest file with codec version with 1 is updated. + assertThat(manifestAfterUpdate.getCodecVersion(), is(ClusterMetadataManifest.CODEC_V1)); + } + + public void testWriteIncrementalGlobalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(Collections.emptyList()) + .globalMetadataFileName("mock-filename") + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify index metadata is not uploaded again if change is only in global metadata + */ + public void testGlobalMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata).version(randomNonNegativeLong())) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with index. + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + // Updating remote cluster state with changing index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only global metadata is different + Metadata newMetadata = Metadata.builder(clusterState.metadata()) + .persistentSettings(Settings.builder().put("cluster.blocks.read_only", true).build()) + .version(randomNonNegativeLong()) + .build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterIndexMetadataUpdate + ); + + // Verify that index metadata information is same in manifest files + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(manifestAfterGlobalMetadataUpdate.getIndices().size())); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexName(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexName()) + ); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexUUID(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexUUID()) + ); + + // since timestamp is part of file name, if file name is same we can confirm that file is not update in global metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getUploadedFilename(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getUploadedFilename()) + ); + + // global metadata file would have changed + assertFalse( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName() + .equalsIgnoreCase(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + } + + /* + * Here we will verify global metadata is not uploaded again if change is only in index metadata + */ + public void testIndexMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with global metadata. + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + + // Updating remote cluster state with changing global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only Index metadata is different + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterGlobalMetadataUpdate + ); + + // Verify that global metadata information is same in manifest files after updating index Metadata + // since timestamp is part of file name, if file name is same we can confirm that file is not update in index metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName(), + is(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + + // Index metadata would have changed + assertThat(manifestAfterGlobalMetadataUpdate.getIndices().size(), is(0)); + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(1)); + } + + public void testReadLatestMetadataManifestFailedIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenThrow(IOException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while fetching latest manifest file for remote cluster state"); + } + + public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(List.of()); + + remoteClusterStateService.start(); + Optional<ClusterMetadataManifest> manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + assertEquals(manifest, Optional.empty()); + } + + public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRemote() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + BlobMetadata blobMetadata = new PlainBlobMetadata("manifestFileName", 1); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); + when(blobContainer.readBlob("manifestFileName")).thenThrow(FileNotFoundException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while downloading cluster metadata - manifestFileName"); + } + + public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainer(blobContainer, expectedManifest, Map.of()); + + remoteClusterStateService.start(); + assertEquals( + remoteClusterStateService.getLatestClusterState(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getMetadata() + .getIndices() + .size(), + 0 + ); + } + + public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainer(blobContainer, expectedManifest, Map.of()); + when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename() + ".dat")).thenThrow(FileNotFoundException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).getMetadata().getIndices() + ); + assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); + } + + public void testReadLatestMetadataManifestSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>()); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).get(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + public void testReadGlobalMetadata() throws IOException { + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry( + List.of(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent)))); + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + long prevClusterStateVersion = 13L; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(prevClusterStateVersion) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName("global-metadata-file") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); + + ClusterState newClusterState = remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + + assertTrue(Metadata.isGlobalStateEquals(newClusterState.getMetadata(), expactedMetadata)); + + long newClusterStateVersion = newClusterState.getVersion(); + assert prevClusterStateVersion == newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is not equal to current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + } + + public void testReadGlobalMetadataIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + String globalIndexMetadataName = "global-metadata-file"; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName(globalIndexMetadataName) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainerForGlobalMetadata(blobContainer, expectedManifest, expactedMetadata); + + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow( + FileNotFoundException.class + ); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); + } + + public void testReadLatestIndexMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + final Index index = new Index("test-index", "index-uuid"); + String fileName = "metadata-" + index.getUUID(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata(index.getName(), index.getUUID(), fileName); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(11) + .numberOfReplicas(10) + .build(); + + final List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .build(); + + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); + + Map<String, IndexMetadata> indexMetadataMap = remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).getMetadata().getIndices(); + + assertEquals(indexMetadataMap.size(), 1); + assertEquals(indexMetadataMap.get(index.getName()).getIndex().getName(), index.getName()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfShards(), indexMetadata.getNumberOfShards()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); + } + + public void testMarkLastStateAsCommittedSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List<UploadedIndexMetadata> indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); + + final ClusterMetadataManifest manifest = remoteClusterStateService.markLastStateAsCommitted(clusterState, previousManifest); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + public void testGetValidPreviousClusterUUID() throws IOException { + Map<String, String> clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { + Map<String, String> clusterUUIDsPointers = Map.of( + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2", + "cluster-uuid5", + "cluster-uuid4" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOException { + Map<String, String> clusterUUIDsPointers = Map.of( + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid1" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean(), Collections.emptyMap()); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IOException { + Map<String, String> clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + ClusterState.UNKNOWN_UUID + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWhenLastUUIDUncommitted() throws IOException { + Map<String, String> clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + Map<String, Boolean> clusterUUIDCommitted = Map.of("cluster-uuid1", true, "cluster-uuid2", true, "cluster-uuid3", false); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, clusterUUIDCommitted); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid2")); + } + + public void testDeleteStaleClusterUUIDs() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + ClusterMetadataManifest clusterMetadataManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID("cluster-uuid1") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .build(); + + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + BlobContainer uuidContainerContainer = mock(BlobContainer.class); + BlobContainer manifest2Container = mock(BlobContainer.class); + BlobContainer manifest3Container = mock(BlobContainer.class); + when(blobStore.blobContainer(any())).then(invocation -> { + BlobPath blobPath1 = invocation.getArgument(0); + if (blobPath1.buildAsString().endsWith("cluster-state/")) { + return uuidContainerContainer; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid2/")) { + return manifest2Container; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid3/")) { + return manifest3Container; + } else { + throw new IllegalArgumentException("Unexpected blob path " + blobPath1); + } + }); + Map<String, BlobContainer> blobMetadataMap = Map.of( + "cluster-uuid1", + mock(BlobContainer.class), + "cluster-uuid2", + mock(BlobContainer.class), + "cluster-uuid3", + mock(BlobContainer.class) + ); + when(uuidContainerContainer.children()).thenReturn(blobMetadataMap); + when( + manifest2Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest2", 1L))); + when( + manifest3Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest3", 1L))); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterUUIDs(clusterState, clusterMetadataManifest); + try { + assertBusy(() -> { + verify(manifest2Container, times(1)).delete(); + verify(manifest3Container, times(1)).delete(); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testRemoteStateStats() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(1, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(0, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getFailedCount()); + } + + public void testRemoteStateCleanupFailureStats() throws IOException { + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleUUIDsClusterMetadata("cluster1", Arrays.asList("cluster-uuid1")); + try { + assertBusy(() -> { + // wait for stats to get updated + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(1, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testFileNames() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); + String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); + assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); + assertThat(splittedIndexMetadataFileName[0], is(METADATA_FILE_PREFIX)); + assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); + assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); + + int term = randomIntBetween(5, 10); + int version = randomIntBetween(5, 10); + String manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, true); + assertThat(manifestFileName.split(DELIMITER).length, is(6)); + String[] splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[0], is(MANIFEST_FILE_PREFIX)); + assertThat(splittedName[1], is(RemoteStoreUtils.invertLong(term))); + assertThat(splittedName[2], is(RemoteStoreUtils.invertLong(version))); + assertThat(splittedName[3], is("C")); + assertThat(splittedName[5], is(String.valueOf(MANIFEST_CURRENT_CODEC_VERSION))); + + manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, false); + splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[3], is("P")); + } + + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { + BlobContainer blobContainer = mock(BlobContainer.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger callCount = new AtomicInteger(0); + doAnswer(invocation -> { + callCount.incrementAndGet(); + if (latch.await(5000, TimeUnit.SECONDS) == false) { + throw new Exception("Timed out waiting for delete task queuing to complete"); + } + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + any(String.class), + any(int.class), + any(BlobContainer.BlobNameSortOrder.class), + any(ActionListener.class) + ); + + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + + latch.countDown(); + assertBusy(() -> assertEquals(1, callCount.get())); + } + + public void testIndexMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getIndexMetadataUploadTimeout() + ); + + // verify update index metadata upload timeout + int indexMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.index_metadata.upload_timeout", indexMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); + } + + public void testMetadataManifestUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getMetadataManifestUploadTimeout() + ); + + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(metadataManifestUploadTimeout, remoteClusterStateService.getMetadataManifestUploadTimeout().seconds()); + } + + public void testGlobalMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getGlobalMetadataUploadTimeout() + ); + + // verify update global metadata upload timeout + int globalMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); + } + + private void mockObjectsForGettingPreviousClusterUUID(Map<String, String> clusterUUIDsPointers) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, Collections.emptyMap()); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map<String, String> clusterUUIDsPointers, + Map<String, Boolean> clusterUUIDCommitted + ) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, clusterUUIDCommitted); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map<String, String> clusterUUIDsPointers, + boolean differGlobalMetadata, + Map<String, Boolean> clusterUUIDCommitted + ) throws IOException { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + BlobContainer blobContainer1 = mock(BlobContainer.class); + BlobContainer blobContainer2 = mock(BlobContainer.class); + BlobContainer blobContainer3 = mock(BlobContainer.class); + BlobContainer uuidBlobContainer = mock(BlobContainer.class); + when(blobContainer1.path()).thenReturn(blobPath); + when(blobContainer2.path()).thenReturn(blobPath); + when(blobContainer3.path()).thenReturn(blobPath); + + mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + List<UploadedIndexMetadata> uploadedIndexMetadataList1 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); + final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( + "cluster-uuid1", + clusterUUIDsPointers.get("cluster-uuid1"), + randomAlphaOfLength(10), + uploadedIndexMetadataList1, + "test-metadata1", + clusterUUIDCommitted.getOrDefault("cluster-uuid1", true) + ); + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata2 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Metadata metadata1 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + Map<String, IndexMetadata> indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); + mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V1); + + List<UploadedIndexMetadata> uploadedIndexMetadataList2 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); + final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( + "cluster-uuid2", + clusterUUIDsPointers.get("cluster-uuid2"), + randomAlphaOfLength(10), + uploadedIndexMetadataList2, + "test-metadata2", + clusterUUIDCommitted.getOrDefault("cluster-uuid2", true) + ); + IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata4 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Metadata metadata2 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + Map<String, IndexMetadata> indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); + mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V1); + + // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different + // when comparing cluster-uuid3 and cluster-uuid1 state. + // if set true, only Metadata will differ b/w cluster uuid1 and cluster uuid3. + // If set to false, only IndexMetadata would be different + // Adding difference in EXACTLY on of these randomly will help us test if our uuid trimming logic compares both + // IndexMetadata and Metadata when deciding if the remote state b/w two different cluster uuids is same. + List<UploadedIndexMetadata> uploadedIndexMetadataList3 = differGlobalMetadata + ? new ArrayList<>(uploadedIndexMetadataList1) + : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map<String, IndexMetadata> indexMetadataMap3 = differGlobalMetadata + ? new HashMap<>(indexMetadataMap1) + : Map.of("index-uuid1", indexMetadata5); + Metadata metadata3 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), !differGlobalMetadata).build()) + .build(); + + final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( + "cluster-uuid3", + clusterUUIDsPointers.get("cluster-uuid3"), + randomAlphaOfLength(10), + uploadedIndexMetadataList3, + "test-metadata3", + clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) + ); + mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V1); + + ArrayList<BlobContainer> mockBlobContainerOrderedList = new ArrayList<>( + List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) + ); + + if (differGlobalMetadata) { + mockBlobContainerOrderedList.addAll( + List.of(blobContainer3, blobContainer1, blobContainer3, blobContainer1, blobContainer1, blobContainer3) + ); + } + mockBlobContainerOrderedList.addAll( + List.of(blobContainer2, blobContainer1, blobContainer2, blobContainer1, blobContainer1, blobContainer2) + ); + BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; + mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + } + + private ClusterMetadataManifest generateClusterMetadataManifest( + String clusterUUID, + String previousClusterUUID, + String stateUUID, + List<UploadedIndexMetadata> uploadedIndexMetadata, + String globalMetadataFileName, + Boolean isUUIDCommitted + ) { + return ClusterMetadataManifest.builder() + .indices(uploadedIndexMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(stateUUID) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(previousClusterUUID) + .committed(true) + .clusterUUIDCommitted(isUUIDCommitted) + .globalMetadataFileName(globalMetadataFileName) + .codecVersion(ClusterMetadataManifest.CODEC_V1) + .build(); + } + + private BlobContainer mockBlobStoreObjects() { + return mockBlobStoreObjects(BlobContainer.class); + } + + private BlobContainer mockBlobStoreObjects(Class<? extends BlobContainer> blobContainerClazz) { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + final BlobContainer blobContainer = mock(blobContainerClazz); + when(blobContainer.path()).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + return blobContainer; + } + + private void mockBlobContainerForClusterUUIDs(BlobContainer blobContainer, Set<String> clusterUUIDs) throws IOException { + Map<String, BlobContainer> blobContainerMap = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + blobContainerMap.put(clusterUUID, mockBlobStoreObjects()); + } + when(blobContainer.children()).thenReturn(blobContainerMap); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map<String, IndexMetadata> indexMetadataMap + ) throws IOException { + mockBlobContainer(blobContainer, clusterMetadataManifest, indexMetadataMap, ClusterMetadataManifest.CODEC_V0); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map<String, IndexMetadata> indexMetadataMap, + int codecVersion + ) throws IOException { + String manifestFileName = codecVersion >= ClusterMetadataManifest.CODEC_V1 + ? "manifest__manifestFileName__abcd__abcd__abcd__1" + : "manifestFileName"; + BlobMetadata blobMetadata = new PlainBlobMetadata(manifestFileName, 1); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + manifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(manifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { + try { + IndexMetadata indexMetadata = indexMetadataMap.get(uploadedIndexMetadata.getIndexUUID()); + if (indexMetadata == null) { + return; + } + String fileName = uploadedIndexMetadata.getUploadedFilename(); + when(blobContainer.readBlob(fileName + ".dat")).thenAnswer((invocationOnMock) -> { + BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( + indexMetadata, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()); + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + private void mockBlobContainerForGlobalMetadata( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Metadata metadata + ) throws IOException { + String mockManifestFileName = "manifest__1__2__C__456__1"; + BlobMetadata blobMetadata = new PlainBlobMetadata(mockManifestFileName, 1); + when( + blobContainer.listBlobsByPrefixInSortedOrder( + "manifest" + RemoteClusterStateService.DELIMITER, + 1, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + mockManifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenAnswer( + (invocationOnMock) -> { + BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + metadata, + "global-metadata-file", + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()); + } + ); + } + + private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { + final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .persistentSettings(clusterSettings) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + + private static ClusterState.Builder generateClusterStateWithOneIndex() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + + private static DiscoveryNodes nodesWithLocalNodeClusterManager() { + return DiscoveryNodes.builder().clusterManagerNodeId("cluster-manager-id").localNodeId("cluster-manager-id").build(); + } + +} diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index b36b3110b2123..c34f13041cb11 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -35,25 +35,26 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.network.NetworkService; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -173,7 +174,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th threadPool, xContentRegistry(), dispatcher, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) { @Override @@ -238,7 +240,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); } }, - clusterSettings + clusterSettings, + NoopTracer.INSTANCE ) { @Override protected HttpServerChannel bind(InetSocketAddress hostAddress) { diff --git a/server/src/test/java/org/opensearch/http/CorsHandlerTests.java b/server/src/test/java/org/opensearch/http/CorsHandlerTests.java index a6be3ef0828fc..7dbb55c29d66c 100644 --- a/server/src/test/java/org/opensearch/http/CorsHandlerTests.java +++ b/server/src/test/java/org/opensearch/http/CorsHandlerTests.java @@ -32,12 +32,12 @@ package org.opensearch.http; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.core.common.Strings; -import org.opensearch.rest.RestRequest; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/opensearch/http/DefaultRestChannelTests.java index 1ffe0538edaad..e94efdd5ea045 100644 --- a/server/src/test/java/org/opensearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/opensearch/http/DefaultRestChannelTests.java @@ -33,33 +33,32 @@ package org.opensearch.http; import org.opensearch.Build; -import org.opensearch.action.ActionListener; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.common.util.ByteArray; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.ByteArray; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.channels.ClosedChannelException; @@ -69,6 +68,8 @@ import java.util.Map; import java.util.Objects; +import org.mockito.ArgumentCaptor; + import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; diff --git a/server/src/test/java/org/opensearch/http/HttpInfoTests.java b/server/src/test/java/org/opensearch/http/HttpInfoTests.java index d03ae9a2a1ccb..8c5710af39661 100644 --- a/server/src/test/java/org/opensearch/http/HttpInfoTests.java +++ b/server/src/test/java/org/opensearch/http/HttpInfoTests.java @@ -32,17 +32,18 @@ package org.opensearch.http; -import java.io.IOException; -import java.net.InetAddress; -import java.util.Map; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.net.InetAddress; +import java.util.Map; + public class HttpInfoTests extends OpenSearchTestCase { public void testCorrectlyDisplayPublishedCname() throws Exception { diff --git a/server/src/test/java/org/opensearch/http/TestHttpRequest.java b/server/src/test/java/org/opensearch/http/TestHttpRequest.java index 57b107c5300ea..0a40cd858eb82 100644 --- a/server/src/test/java/org/opensearch/http/TestHttpRequest.java +++ b/server/src/test/java/org/opensearch/http/TestHttpRequest.java @@ -34,8 +34,8 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.rest.RestRequest; import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.RestRequest; import java.util.Arrays; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 95d0ae706b286..97bc822be7d51 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -33,7 +33,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.Term; @@ -44,6 +43,7 @@ import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -56,17 +56,20 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.SetOnce.AlreadySetException; import org.opensearch.common.UUIDs; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLock; @@ -84,10 +87,10 @@ import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.Uid; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.SearchOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.NonNegativeScoresSimilarity; import org.opensearch.index.similarity.SimilarityService; @@ -99,25 +102,25 @@ import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.internal.ReaderContext; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.engine.MockEngineFactory; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.hamcrest.Matchers; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collections; @@ -203,7 +206,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); repositoriesService = new RepositoriesService( settings, @@ -231,7 +235,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceReference::get, threadPool, - indexSettings.getRemoteStoreTranslogRepository() + indexSettings.getRemoteStoreTranslogRepository(), + new RemoteTranslogTransferTracker(shardRouting.shardId(), 10) ); } return new InternalTranslogFactory(); @@ -254,7 +259,10 @@ private IndexService newIndexService(IndexModule module) throws IOException { () -> false, null, new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), - translogFactorySupplier + translogFactorySupplier, + () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + DefaultRecoverySettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTestUtils.java b/server/src/test/java/org/opensearch/index/IndexServiceTestUtils.java new file mode 100644 index 0000000000000..736c0c0786fe9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/IndexServiceTestUtils.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractAsyncTask; + +public final class IndexServiceTestUtils { + private IndexServiceTestUtils() {} + + public static void setTrimTranslogTaskInterval(IndexService indexService, TimeValue interval) { + ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(interval); + } +} diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index 99207040207fc..14451ef21726e 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -33,18 +33,19 @@ package org.opensearch.index; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.opensearch.Version; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.shard.IndexShard; @@ -52,8 +53,8 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -80,7 +81,7 @@ public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOExc XContentBuilder builder = XContentFactory.jsonBuilder(); filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.close(); - return new CompressedXContent(Strings.toString(builder)); + return new CompressedXContent(builder.toString()); } public void testBaseAsyncTask() throws Exception { @@ -301,7 +302,7 @@ public void testRefreshActuallyWorks() throws Exception { assertEquals(1000, refreshTask.getInterval().millis()); assertTrue(indexService.getRefreshTask().mustReschedule()); IndexShard shard = indexService.getShard(0); - client().prepareIndex("test").setId("0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("0").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); // now disable the refresh client().admin() .indices() @@ -322,7 +323,7 @@ public void testRefreshActuallyWorks() throws Exception { }); assertFalse(refreshTask.isClosed()); // refresh every millisecond - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); client().admin() .indices() .prepareUpdateSettings("test") @@ -336,7 +337,7 @@ public void testRefreshActuallyWorks() throws Exception { assertEquals(2, search.totalHits.value); } }); - client().prepareIndex("test").setId("2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("2").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); assertBusy(() -> { // this one becomes visible due to the scheduled refresh try (Engine.Searcher searcher = shard.acquireSearcher("test")) { @@ -354,7 +355,7 @@ public void testAsyncFsyncActuallyWorks() throws Exception { IndexService indexService = createIndex("test", settings); ensureGreen("test"); assertTrue(indexService.getRefreshTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); IndexShard shard = indexService.getShard(0); assertBusy(() -> assertFalse(shard.isSyncNeeded())); } @@ -376,7 +377,7 @@ public void testRescheduleAsyncFsync() throws Exception { assertNotNull(indexService.getFsyncTask()); assertTrue(indexService.getFsyncTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); assertNotNull(indexService.getFsyncTask()); final IndexShard shard = indexService.getShard(0); assertBusy(() -> assertFalse(shard.isSyncNeeded())); @@ -403,7 +404,7 @@ public void testAsyncTranslogTrimActuallyWorks() throws Exception { IndexService indexService = createIndex("test", settings); ensureGreen("test"); assertTrue(indexService.getTrimTranslogTask().mustReschedule()); - client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex("test").setId("1").setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON).get(); client().admin().indices().prepareFlush("test").get(); client().admin() .indices() @@ -430,7 +431,11 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { int translogOps = 0; final int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { - client().prepareIndex().setIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + client().prepareIndex() + .setIndex(indexName) + .setId(String.valueOf(i)) + .setSource("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON) + .get(); translogOps++; if (randomBoolean()) { client().admin().indices().prepareFlush(indexName).get(); @@ -447,12 +452,7 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { assertTrue(indexService.getTrimTranslogTask().mustReschedule()); final Engine readOnlyEngine = getEngine(indexService.getShard(0)); - assertBusy( - () -> assertThat( - readOnlyEngine.translogManager().getTranslogStats().getTranslogSizeInBytes(), - equalTo((long) Translog.DEFAULT_HEADER_SIZE_IN_BYTES) - ) - ); + assertBusy(() -> assertTrue(isTranslogEmpty(readOnlyEngine))); assertAcked(client().admin().indices().prepareOpen("test").setWaitForActiveShards(ActiveShardCount.DEFAULT)); @@ -462,6 +462,12 @@ public void testAsyncTranslogTrimTaskOnClosedIndex() throws Exception { assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(0)); } + boolean isTranslogEmpty(Engine engine) { + long tlogSize = engine.translogManager().getTranslogStats().getTranslogSizeInBytes(); + // translog contains 1(or 2 in some corner cases) empty readers. + return tlogSize == Translog.DEFAULT_HEADER_SIZE_IN_BYTES || tlogSize == 2 * Translog.DEFAULT_HEADER_SIZE_IN_BYTES; + } + public void testIllegalFsyncInterval() { Settings settings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable @@ -523,4 +529,76 @@ public void testUpdateRemoteTranslogBufferIntervalDynamically() { indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())); } + + public void testIndexSort() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be remained to int sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.INT); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + public void testIndexSortBackwardCompatible() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_2_6_1) + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be converted to long sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + private static String createTestMapping(String type) { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \" + type + \"\n" + + " }\n" + + " }"; + } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e42e9b4970081..e4ce879a5ec5e 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -41,9 +41,9 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.search.pipeline.SearchPipelineService; @@ -60,10 +60,10 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; -import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; public class IndexSettingsTests extends OpenSearchTestCase { @@ -779,6 +779,7 @@ public void testRemoteStoreExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); @@ -795,22 +796,6 @@ public void testRemoteTranslogStoreDefaultSetting() { assertFalse(settings.isRemoteTranslogStoreEnabled()); } - public void testUpdateRemoteStoreFails() { - Set<Setting<?>> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING); - IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); - SettingsException error = expectThrows( - SettingsException.class, - () -> settings.updateSettings( - Settings.builder().put("index.remote_store.enabled", randomBoolean()).build(), - Settings.builder(), - Settings.builder(), - "index" - ) - ); - assertEquals(error.getMessage(), "final index setting [index.remote_store.enabled], not updateable"); - } - public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { Settings indexSettings = Settings.builder() .put("index.replication.type", ReplicationType.DOCUMENT) @@ -846,6 +831,7 @@ public void testRemoteRepositoryExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "repo1") .build() @@ -854,25 +840,6 @@ public void testRemoteRepositoryExplicitSetting() { assertEquals("repo1", settings.getRemoteStoreRepository()); } - public void testUpdateRemoteRepositoryFails() { - Set<Setting<?>> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING); - IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); - SettingsException error = expectThrows( - SettingsException.class, - () -> settings.updateSettings( - Settings.builder().put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, randomUnicodeOfLength(10)).build(), - Settings.builder(), - Settings.builder(), - "index" - ) - ); - assertEquals( - error.getMessage(), - String.format(Locale.ROOT, "final index setting [%s], not updateable", IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY) - ); - } - public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) @@ -881,7 +848,7 @@ public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { .build(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + () -> IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(indexSettings) ); assertEquals( String.format( @@ -902,7 +869,7 @@ public void testSetRemoteRepositoryFailsWhenEmptyString() { .build(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + () -> IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(indexSettings) ); assertEquals( String.format( diff --git a/server/src/test/java/org/opensearch/index/IndexTests.java b/server/src/test/java/org/opensearch/index/IndexTests.java index 5a6bcab775ad4..aa4661b04a25f 100644 --- a/server/src/test/java/org/opensearch/index/IndexTests.java +++ b/server/src/test/java/org/opensearch/index/IndexTests.java @@ -34,20 +34,20 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class IndexTests extends OpenSearchTestCase { public void testToString() { diff --git a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java index 2220434f9b9ed..0b657c1c9745f 100644 --- a/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingPressureServiceTests.java @@ -8,7 +8,6 @@ package org.opensearch.index; -import org.junit.Before; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.bulk.BulkItemRequest; @@ -18,14 +17,15 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.Requests; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.stats.IndexingPressurePerShardStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; public class IndexingPressureServiceTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/IndexingPressureTests.java b/server/src/test/java/org/opensearch/index/IndexingPressureTests.java index dd2f381a7fc64..1aa20506222d1 100644 --- a/server/src/test/java/org/opensearch/index/IndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingPressureTests.java @@ -32,8 +32,8 @@ package org.opensearch.index; -import org.opensearch.common.settings.Settings; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java index 6823bb5a8225f..2d1c633f95ccb 100644 --- a/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/IndexingSlowLogTests.java @@ -33,6 +33,7 @@ package org.opensearch.index; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; @@ -41,30 +42,32 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.logging.Loggers; import org.opensearch.common.logging.MockAppender; +import org.opensearch.common.logging.SlowLogLevel; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexingSlowLog.IndexingSlowLogMessage; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngineTests; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.mapper.Uid; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.mockito.Mockito; import java.io.IOException; import java.io.UncheckedIOException; +import org.mockito.Mockito; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -227,7 +230,7 @@ public void testSlowLogMessageHasJsonFields() throws IOException { "routingValue", null, source, - XContentType.JSON, + MediaTypeRegistry.JSON, null ); Index index = new Index("foo", "123"); @@ -255,7 +258,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { null, null, source, - XContentType.JSON, + MediaTypeRegistry.JSON, null ); Index index = new Index("foo", "123"); @@ -285,7 +288,7 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { null, null, source, - XContentType.JSON, + MediaTypeRegistry.JSON, null ); @@ -410,7 +413,7 @@ public void testLevelSetting() { assertNotNull(ex.getCause()); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - assertThat(cause, hasToString(containsString("No enum constant org.opensearch.index.SlowLogLevel.NOT A LEVEL"))); + assertThat(cause, hasToString(containsString("No enum constant org.opensearch.common.logging.SlowLogLevel.NOT A LEVEL"))); } assertEquals(SlowLogLevel.TRACE, log.getLevel()); diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index b1dba0c22c075..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,10 +31,11 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index b909846d3b681..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -33,8 +33,8 @@ package org.opensearch.index; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java index cbc439041666f..614dacd457782 100644 --- a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java @@ -32,9 +32,18 @@ package org.opensearch.index; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.TieredMergePolicy; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + public class OpenSearchTieredMergePolicyTests extends OpenSearchTestCase { public void testDefaults() { @@ -80,4 +89,32 @@ public void testSetDeletesPctAllowed() { policy.setDeletesPctAllowed(42); assertEquals(42, policy.regularMergePolicy.getDeletesPctAllowed(), 0); } + + public void testFindDeleteMergesReturnsNullOnEmptySegmentInfos() throws IOException { + MergePolicy.MergeSpecification mergeSpecification = new OpenSearchTieredMergePolicy().findForcedDeletesMerges( + new SegmentInfos(Version.LATEST.major), + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return 0; + } + + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return 0; + } + + @Override + public InfoStream getInfoStream() { + return InfoStream.NO_OUTPUT; + } + + @Override + public Set<SegmentCommitInfo> getMergingSegments() { + return Collections.emptySet(); + } + } + ); + assertNull(mergeSpecification); + } } diff --git a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java index fe902a09a8c7c..0c0dec29c9dbf 100644 --- a/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/opensearch/index/SearchSlowLogTests.java @@ -41,11 +41,12 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.logging.Loggers; import org.opensearch.common.logging.MockAppender; +import org.opensearch.common.logging.SlowLogLevel; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.index.query.QueryBuilders; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchRequest; @@ -337,7 +338,7 @@ public void testLevelSetting() { assertNotNull(ex.getCause()); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - assertThat(cause, hasToString(containsString("No enum constant org.opensearch.index.SlowLogLevel.NOT A LEVEL"))); + assertThat(cause, hasToString(containsString("No enum constant org.opensearch.common.logging.SlowLogLevel.NOT A LEVEL"))); } assertEquals(SlowLogLevel.TRACE, log.getLevel()); diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index 827c304ba9bd9..a9725f638cc53 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -8,8 +8,6 @@ package org.opensearch.index; -import org.mockito.Mockito; -import org.mockito.stubbing.Answer; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; @@ -17,10 +15,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -31,17 +29,22 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import org.mockito.Mockito; +import org.mockito.stubbing.Answer; + import static java.util.Arrays.asList; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; -import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; -import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevelReplicationTestCase { @@ -49,7 +52,7 @@ public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevel private static final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(5)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueSeconds(5)) .build(); public void testIsSegrepLimitBreached() throws Exception { @@ -111,7 +114,7 @@ public void testIsSegrepLimitBreached_onlyCheckpointLimitBreached() throws Excep indexInBatches(5, shards, primaryShard); - Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStats(); + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertEquals(5, shardStats.getCheckpointsBehindCount()); @@ -139,7 +142,7 @@ public void testIsSegrepLimitBreached_onlyTimeLimitBreached() throws Exception { indexInBatches(1, shards, primaryShard); assertBusy(() -> { - Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStats(); + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); @@ -161,7 +164,7 @@ public void testIsSegrepLimitBreached_underStaleNodeLimit() throws Exception { SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); assertBusy(() -> { - Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStats(); + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(3, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); @@ -195,7 +198,8 @@ public void testFailStaleReplicaTask() throws Exception { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(20)) .build(); try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { @@ -207,12 +211,14 @@ public void testFailStaleReplicaTask() throws Exception { indexInBatches(5, shards, primaryShard); // assert that replica shard is few checkpoints behind primary - Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStats(); + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertEquals(5, shardStats.getCheckpointsBehindCount()); // call the background task + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); service.getFailStaleReplicaTask().runInternal(); // verify that remote shard failed method is called which fails the replica shards falling behind. @@ -221,6 +227,73 @@ public void testFailStaleReplicaTask() throws Exception { } } + public void testFailStaleReplicaTaskDisabled() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(0)) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + Mockito.reset(shardStateAction); + + // index docs in batches without refreshing + indexInBatches(5, shards, primaryShard); + + // assert that replica shard is few checkpoints behind primary + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + // call the background task + service.getFailStaleReplicaTask().runInternal(); + + // verify that remote shard failed method is never called as it is disabled. + verify(shardStateAction, never()).remoteShardFailed(any(), anyString(), anyLong(), anyBoolean(), anyString(), any(), any()); + replicateSegments(primaryShard, shards.getReplicas()); + } + } + + public void testFailStaleReplicaTaskToggleOnOff() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(1)) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + // index docs in batches without refreshing + indexInBatches(5, shards, primaryShard); + + // assert that replica shard is few checkpoints behind primary + Set<SegmentReplicationShardStats> replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); + replicateSegments(primaryShard, shards.getReplicas()); + + service.setReplicationTimeLimitFailReplica(TimeValue.ZERO); + assertFalse(service.getFailStaleReplicaTask().mustReschedule()); + assertFalse(service.getFailStaleReplicaTask().isScheduled()); + service.setReplicationTimeLimitFailReplica(TimeValue.timeValueMillis(1)); + assertTrue(service.getFailStaleReplicaTask().mustReschedule()); + assertTrue(service.getFailStaleReplicaTask().isScheduled()); + } + } + private int indexInBatches(int count, ReplicationGroup shards, IndexShard primaryShard) throws Exception { int totalDocs = 0; for (int i = 0; i < count; i++) { @@ -242,6 +315,13 @@ private SegmentReplicationPressureService buildPressureService(Settings settings ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - return new SegmentReplicationPressureService(settings, clusterService, indicesService, shardStateAction, mock(ThreadPool.class)); + return new SegmentReplicationPressureService( + settings, + clusterService, + indicesService, + shardStateAction, + new SegmentReplicationStatsTracker(indicesService), + mock(ThreadPool.class) + ); } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java new file mode 100644 index 0000000000000..04423d583e8f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; + +public class SegmentReplicationStatsTrackerTests extends OpenSearchTestCase { + + private IndicesService indicesService = mock(IndicesService.class); + + public void testRejectedCount() { + SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); + + // Verify that total rejection count is 0 on an empty rejectionCount map in statsTracker. + assertTrue(segmentReplicationStatsTracker.getRejectionCount().isEmpty()); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 0L); + + // Verify that total rejection count is 1 after incrementing rejectionCount. + segmentReplicationStatsTracker.incrementRejectionCount(Mockito.mock(ShardId.class)); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 1L); + } + +} diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index eedb45e8e96b6..ce719a18898f8 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -8,14 +8,11 @@ package org.opensearch.index; -import org.hamcrest.Matcher; -import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -23,6 +20,9 @@ import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java index b871c8fb8f364..46f9801035ac3 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureStoreTests.java @@ -8,12 +8,12 @@ package org.opensearch.index; -import org.junit.Before; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Map; diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTrackerTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTrackerTests.java index 686844794809c..129e61316e8bc 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureTrackerTests.java @@ -8,12 +8,12 @@ package org.opensearch.index; -import org.opensearch.index.ShardIndexingPressureTracker.OperationTracker; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.ShardIndexingPressureTracker.CommonOperationTracker; -import org.opensearch.index.ShardIndexingPressureTracker.StatsTracker; -import org.opensearch.index.ShardIndexingPressureTracker.RejectionTracker; +import org.opensearch.index.ShardIndexingPressureTracker.OperationTracker; import org.opensearch.index.ShardIndexingPressureTracker.PerformanceTracker; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.ShardIndexingPressureTracker.RejectionTracker; +import org.opensearch.index.ShardIndexingPressureTracker.StatsTracker; import org.opensearch.test.OpenSearchTestCase; public class ShardIndexingPressureTrackerTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/SlowLogLevelTests.java b/server/src/test/java/org/opensearch/index/SlowLogLevelTests.java index 290af7360ce52..d0da07f7a7018 100644 --- a/server/src/test/java/org/opensearch/index/SlowLogLevelTests.java +++ b/server/src/test/java/org/opensearch/index/SlowLogLevelTests.java @@ -32,6 +32,7 @@ package org.opensearch.index; +import org.opensearch.common.logging.SlowLogLevel; import org.opensearch.test.OpenSearchTestCase; public class SlowLogLevelTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java index 739e26ed3d677..2c7f11063cf0f 100644 --- a/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/AnalysisRegistryTests.java @@ -33,8 +33,8 @@ package org.opensearch.index.analysis; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishAnalyzer; @@ -42,6 +42,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.MockTokenFilter; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; @@ -54,8 +55,8 @@ import org.opensearch.indices.analysis.PreBuiltAnalyzers; import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java index d92165ee8b62e..6f9a662caff46 100644 --- a/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/PreBuiltAnalyzerTests.java @@ -36,14 +36,14 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.analysis.PreBuiltAnalyzers; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/analysis/PreConfiguredTokenFilterTests.java b/server/src/test/java/org/opensearch/index/analysis/PreConfiguredTokenFilterTests.java index 67c865be70ac5..9f997889a438a 100644 --- a/server/src/test/java/org/opensearch/index/analysis/PreConfiguredTokenFilterTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/PreConfiguredTokenFilterTests.java @@ -38,8 +38,8 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/analysis/ReloadableCustomAnalyzerTests.java b/server/src/test/java/org/opensearch/index/analysis/ReloadableCustomAnalyzerTests.java index 4cbaef5ab82df..fd5271dffa290 100644 --- a/server/src/test/java/org/opensearch/index/analysis/ReloadableCustomAnalyzerTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/ReloadableCustomAnalyzerTests.java @@ -40,8 +40,8 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.BeforeClass; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/analysis/ShingleTokenFilterFactoryTests.java b/server/src/test/java/org/opensearch/index/analysis/ShingleTokenFilterFactoryTests.java index 110d705794d6a..41d64320cfd72 100644 --- a/server/src/test/java/org/opensearch/index/analysis/ShingleTokenFilterFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/ShingleTokenFilterFactoryTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.analysis; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; diff --git a/server/src/test/java/org/opensearch/index/analysis/StopAnalyzerTests.java b/server/src/test/java/org/opensearch/index/analysis/StopAnalyzerTests.java index f71edbcdcff66..68b042fa0101f 100644 --- a/server/src/test/java/org/opensearch/index/analysis/StopAnalyzerTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/StopAnalyzerTests.java @@ -37,8 +37,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTokenStreamTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTokenStreamTestCase; import static org.opensearch.test.OpenSearchTestCase.createTestAnalysis; diff --git a/server/src/test/java/org/opensearch/index/analysis/StopTokenFilterTests.java b/server/src/test/java/org/opensearch/index/analysis/StopTokenFilterTests.java index 006c40f945ea1..7e4e0747cdd4a 100644 --- a/server/src/test/java/org/opensearch/index/analysis/StopTokenFilterTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/StopTokenFilterTests.java @@ -38,8 +38,8 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter; import org.apache.lucene.util.Version; -import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.Settings.Builder; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.OpenSearchTokenStreamTestCase; diff --git a/server/src/test/java/org/opensearch/index/cache/bitset/BitSetFilterCacheTests.java b/server/src/test/java/org/opensearch/index/cache/bitset/BitSetFilterCacheTests.java index 501a8b01c3899..f3cac6abd6ced 100644 --- a/server/src/test/java/org/opensearch/index/cache/bitset/BitSetFilterCacheTests.java +++ b/server/src/test/java/org/opensearch/index/cache/bitset/BitSetFilterCacheTests.java @@ -53,10 +53,10 @@ import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.index.IndexSettings; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.IndexSettings; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index b0d904392407c..b31edd79411d0 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -43,22 +43,23 @@ import org.apache.lucene.index.SegmentReader; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; +import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; -import org.opensearch.index.codec.customcodecs.Lucene95CustomStoredFieldsFormat; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.Collections; +import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; import static org.hamcrest.Matchers.instanceOf; @SuppressCodecs("*") // we test against default codec so never get a random one here! @@ -67,71 +68,69 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(false); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene95Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene99Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService(false).codec("default"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService(false).codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); } - public void testZstd() throws Exception { - Codec codec = createCodecService(false).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + public void testLZ4() throws Exception { + Codec codec = createCodecService(false).codec("lz4"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); + assert codec instanceof PerFieldMappingPostingFormatCodec; } - public void testZstdNoDict() throws Exception { - Codec codec = createCodecService(false).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + public void testZlib() throws Exception { + Codec codec = createCodecService(false).codec("zlib"); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); + assert codec instanceof PerFieldMappingPostingFormatCodec; } - public void testZstdWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); + public void testBestCompressionWithCompressionLevel() { + final Settings settings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .build(); + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexScopedSettings.validate(settings, true)); + assertTrue(e.getMessage().startsWith("Compression level cannot be set")); } - public void testZstdNoDictWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); + public void testLuceneCodecsWithCompressionLevel() { + String codecName = randomFrom(Codec.availableCodecs()); + Codec codec = Codec.forName(codecName); + + final Settings settings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .build(); + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + + if (!(codec instanceof CodecSettings && ((CodecSettings) codec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING))) { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> indexScopedSettings.validate(settings, true) + ); + assertTrue(e.getMessage().startsWith("Compression level cannot be set")); + } } public void testDefaultMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("default"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); } public void testBestCompressionMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); - } - - public void testZstdMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); } public void testExceptionCodecNull() { @@ -143,18 +142,11 @@ public void testExceptionIndexSettingsNull() { } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene99Codec.Mode expected, Codec actual) throws Exception { SegmentReader sr = getSegmentReader(actual); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); - } - - private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { - SegmentReader sr = getSegmentReader(actual); - String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); - assertNotNull(v); - assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); + assertEquals(expected, Lucene99Codec.Mode.valueOf(v)); } private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { @@ -165,14 +157,6 @@ private CodecService createCodecService(boolean isMapperServiceNull) throws IOEx return buildCodecService(nodeSettings); } - private CodecService createCodecService(int randomCompressionLevel) throws IOException { - Settings nodeSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("index.codec.compression_level", randomCompressionLevel) - .build(); - return buildCodecService(nodeSettings); - } - private CodecService buildCodecService(Settings nodeSettings) throws IOException { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java deleted file mode 100644 index cc794eb2c48f1..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.ByteBuffersDataOutput; -import org.apache.lucene.tests.util.LineFileDocs; -import org.apache.lucene.tests.util.TestUtil; -import org.apache.lucene.util.BytesRef; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Random; - -/** - * Test cases for compressors (based on {@See org.opensearch.common.compress.DeflateCompressTests}). - */ -public abstract class AbstractCompressorTests extends OpenSearchTestCase { - - abstract Compressor compressor(); - - abstract Decompressor decompressor(); - - public void testEmpty() throws IOException { - final byte[] bytes = "".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testShortLiterals() throws IOException { - final byte[] bytes = "1234567345673456745608910123".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testRandom() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; - r.nextBytes(bytes); - doTest(bytes); - } - } - - public void testLineDocs() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 10; i++) { - int numDocs = TestUtil.nextInt(r, 1, 200); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int j = 0; j < numDocs; j++) { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - doTest(bos.toByteArray()); - } - lineFileDocs.close(); - } - - public void testRepetitionsL() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numLongs = TestUtil.nextInt(r, 1, 10000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - long theValue = r.nextLong(); - for (int j = 0; j < numLongs; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsI() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numInts = TestUtil.nextInt(r, 1, 20000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int theValue = r.nextInt(); - for (int j = 0; j < numInts; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsS() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numShorts = TestUtil.nextInt(r, 1, 40000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - short theValue = (short) r.nextInt(65535); - for (int j = 0; j < numShorts; j++) { - if (r.nextInt(10) == 0) { - theValue = (short) r.nextInt(65535); - } - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testMixed() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 2; ++i) { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int prevInt = r.nextInt(); - long prevLong = r.nextLong(); - while (bos.size() < 400000) { - switch (r.nextInt(4)) { - case 0: - addInt(r, prevInt, bos); - break; - case 1: - addLong(r, prevLong, bos); - break; - case 2: - addString(lineFileDocs, bos); - break; - case 3: - addBytes(r, bos); - break; - default: - throw new IllegalStateException("Random is broken"); - } - } - doTest(bos.toByteArray()); - } - } - - private void addLong(Random r, long prev, ByteArrayOutputStream bos) { - long theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addInt(Random r, int prev, ByteArrayOutputStream bos) { - int theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - - private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { - byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; - r.nextBytes(bytes); - bos.write(bytes); - } - - private void doTest(byte[] bytes) throws IOException { - final int length = bytes.length; - - ByteBuffersDataInput in = new ByteBuffersDataInput(List.of(ByteBuffer.wrap(bytes))); - ByteBuffersDataOutput out = new ByteBuffersDataOutput(); - - // let's compress - Compressor compressor = compressor(); - compressor.compress(in, out); - byte[] compressed = out.toArrayCopy(); - - // let's decompress - BytesRef outbytes = new BytesRef(); - Decompressor decompressor = decompressor(); - decompressor.decompress(new ByteArrayDataInput(compressed), length, 0, length, outbytes); - - // get the uncompressed array out of outbytes - byte[] restored = new byte[outbytes.length]; - System.arraycopy(outbytes.bytes, 0, restored, 0, outbytes.length); - - assertArrayEquals(bytes, restored); - } - -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java deleted file mode 100644 index e87fb56770e4c..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.test.OpenSearchTestCase; - -public class Lucene95CustomStoredFieldsFormatTests extends OpenSearchTestCase { - - public void testDefaultLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat(); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdNoDictLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictLucene95CustomCodecModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java deleted file mode 100644 index 78cf62c08f889..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with dictionary enabled) - */ -public class ZstdCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java deleted file mode 100644 index 2eda81a6af2ab..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with no dictionary). - */ -public class ZstdNoDictCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdNoDictCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdNoDictCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java new file mode 100644 index 0000000000000..92669d5bc1d92 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/BloomFilterTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +public class BloomFilterTests extends OpenSearchTestCase { + + public void testBloomFilterSerializationDeserialization() throws IOException { + int elementCount = randomIntBetween(1, 100); + long maxDocs = elementCount * 10L; // Keeping this high so that it ensures some bits are not set. + BloomFilter filter = new BloomFilter(maxDocs, getFpp(), () -> idIterator(elementCount)); + byte[] buffer = new byte[(int) maxDocs * 5]; + ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); + + // Write in the format readable through factory + out.writeString(filter.setType().getSetName()); + filter.writeTo(out); + + FuzzySet reconstructedFilter = FuzzySetFactory.deserializeFuzzySet(new ByteArrayIndexInput("filter", buffer)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, reconstructedFilter.setType()); + + Iterator<BytesRef> idIterator = idIterator(elementCount); + while (idIterator.hasNext()) { + BytesRef element = idIterator.next(); + assertEquals(FuzzySet.Result.MAYBE, reconstructedFilter.contains(element)); + assertEquals(FuzzySet.Result.MAYBE, filter.contains(element)); + } + } + + public void testBloomFilterIsSaturated_returnsTrue() throws IOException { + BloomFilter bloomFilter = new BloomFilter(1L, getFpp(), () -> idIterator(1000)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(true, bloomFilter.isSaturated()); + } + + public void testBloomFilterIsSaturated_returnsFalse() throws IOException { + int elementCount = randomIntBetween(1, 100); + BloomFilter bloomFilter = new BloomFilter(20000, getFpp(), () -> idIterator(elementCount)); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + assertEquals(false, bloomFilter.isSaturated()); + } + + public void testBloomFilterWithLargeCapacity() throws IOException { + long maxDocs = randomLongBetween(Integer.MAX_VALUE, 5L * Integer.MAX_VALUE); + BloomFilter bloomFilter = new BloomFilter(maxDocs, getFpp(), () -> List.of(new BytesRef("bar")).iterator()); + assertEquals(FuzzySet.SetType.BLOOM_FILTER_V1, bloomFilter.setType()); + } + + private double getFpp() { + return randomDoubleBetween(0.01, 0.50, true); + } + + private Iterator<BytesRef> idIterator(int count) { + return new Iterator<BytesRef>() { + int cnt = count; + + @Override + public boolean hasNext() { + return cnt-- > 0; + } + + @Override + public BytesRef next() { + return new BytesRef(Integer.toString(cnt)); + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java new file mode 100644 index 0000000000000..868c2175d0689 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/fuzzy/FuzzyFilterPostingsFormatTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.fuzzy; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; + +import java.util.TreeMap; + +public class FuzzyFilterPostingsFormatTests extends BasePostingsFormatTestCase { + + private TreeMap<String, FuzzySetParameters> params = new TreeMap<>() { + @Override + public FuzzySetParameters get(Object k) { + return new FuzzySetParameters(() -> FuzzySetParameters.DEFAULT_FALSE_POSITIVE_PROBABILITY); + } + }; + + private Codec fuzzyFilterCodec = TestUtil.alwaysPostingsFormat( + new FuzzyFilterPostingsFormat(TestUtil.getDefaultPostingsFormat(), new FuzzySetFactory(params)) + ); + + @Override + protected Codec getCodec() { + return fuzzyFilterCodec; + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index b746d0ba8a56d..0c87c384e0749 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,14 +32,14 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.suggest.document.Completion90PostingsFormat; +import org.apache.lucene.search.suggest.document.Completion99PostingsFormat; import org.apache.lucene.search.suggest.document.SuggestField; import org.apache.lucene.store.Directory; import org.opensearch.OpenSearchException; @@ -69,8 +69,8 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); - final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene95Codec() { + final PostingsFormat postingsFormat = new Completion99PostingsFormat(); + indexWriterConfig.setCodec(new Lucene99Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index c2ac8b0e1d3b3..cc927a19fd01a 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -33,6 +33,7 @@ package org.opensearch.index.engine; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -77,15 +78,14 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.apache.lucene.tests.index.ForceMergePolicy; +import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.TransportActions; import org.opensearch.cluster.metadata.IndexMetadata; @@ -99,8 +99,6 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.TriFunction; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.logging.Loggers; @@ -111,15 +109,20 @@ import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.opensearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ReleasableLock; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.codec.CodecService; @@ -139,7 +142,6 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardUtils; import org.opensearch.index.store.Store; import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; @@ -151,10 +153,14 @@ import org.opensearch.index.translog.TranslogDeletionPolicyFactory; import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.listener.TranslogEventListener; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.MockLogAppender; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.Assert; import java.io.Closeable; import java.io.IOException; @@ -164,6 +170,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -194,6 +201,15 @@ import java.util.stream.LongStream; import static java.util.Collections.shuffle; +import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_RESET; +import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY; +import static org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; +import static org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY; +import static org.opensearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; @@ -218,18 +234,8 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_RESET; -import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY; -import static org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; -import static org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY; -import static org.opensearch.index.engine.Engine.Operation.Origin.REPLICA; -import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; -import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; -import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; public class InternalEngineTests extends EngineTestCase { @@ -3228,6 +3234,304 @@ public void testFailStart() throws IOException { } } + public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpEnabled() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + // Fail segment merge with diskfull during merging terms + if (callStackContainsAnyOf("mergeTerms")) { + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + try { + Store store = createStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + // extra0 file is added as a part of + // https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html + // Safe to remove from file count along with write.lock without impacting the test. + long fileCount = Arrays.stream(store.directory().listAll()) + .filter(file -> file.equals("write.lock") == false && ExtrasFS.isExtra(file) == false) + .count(); + + // Since only one document is committed and unreferenced files are cleaned up, + // there are 4 files (*cfs, *cfe, *si and segments_*). + assertThat(fileCount, equalTo(4L)); + wrapper.close(); + store.close(); + engine.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new AssertionError(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + + // Just allow force merge so that regular merge does not close the shard first before any any other operation + // + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newForceMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + List<Segment> segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will be incremented whenever cleanup is performed correctly. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(1L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + + public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpDisabled() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (callStackContainsAnyOf("mergeTerms")) { + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + try { + Store store = createStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + // extra0 file is added as a part of + // https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html + // Safe to remove from file count along with write.lock without impacting the test + long fileCount = Arrays.stream(store.directory().listAll()) + .filter(file -> file.equals("write.lock") == false && ExtrasFS.isExtra(file) == false) + .count(); + + // Since now cleanup is not happening now, all unrefrenced files now be present as well. + assertThat(fileCount, equalTo(13L)); + wrapper.close(); + store.close(); + engine.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new AssertionError(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newForceMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + // Disable cleanup + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetadata indexMetadata = IndexMetadata.builder(indexSettings.getIndexMetadata()) + .settings( + Settings.builder().put(indexSettings.getSettings()).put(IndexSettings.INDEX_UNREFERENCED_FILE_CLEANUP.getKey(), false) + ) + .build(); + indexSettings.updateIndexMetadata(indexMetadata); + + List<Segment> segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever cleanup is disabled. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + + public void testUnreferencedFileCleanUpFailsOnSegmentMergeFailureWhenDirectoryClosed() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (callStackContainsAnyOf("mergeTerms")) { + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + MockLogAppender mockAppender = MockLogAppender.createForLoggers(Loggers.getLogger(Engine.class, shardId)); + try { + // Create a store where directory is closed during unreferenced file cleanup. + Store store = createFailingDirectoryStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + store.close(); + engine.close(); + mockAppender.assertAllExpectationsMatched(); + mockAppender.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference<RetentionLeases> retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newForceMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + List<Segment> segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + // Close the store so that unreferenced file cleanup will fail. + store.close(); + + final String message = "Error while deleting unreferenced file *"; + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation("expected message", Engine.class.getCanonicalName(), Level.ERROR, message) + ); + + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever there is some issue with cleanup. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + public void testSettings() { CodecService codecService = new CodecService(null, engine.config().getIndexSettings(), logger); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); @@ -3236,6 +3540,33 @@ public void testSettings() { assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); } + /** + * Creates a merge policy which only supports force merge. + * @return returns a merge policy which only supports force merge. + */ + private MergePolicy newForceMergePolicy() { + return new ForceMergePolicy(new TieredMergePolicy()); + } + + /** + * Create a store where directory is closed when referenced while unreferenced file cleanup. + * + * @param directory directory used for creating the store. + * @return a store where directory is closed when referenced while unreferenced file cleanup. + */ + private Store createFailingDirectoryStore(final Directory directory) { + return new Store(shardId, INDEX_SETTINGS, directory, new DummyShardLock(shardId)) { + @Override + public Directory directory() { + if (callStackContainsAnyOf("cleanUpUnreferencedFiles")) { + throw new AlreadyClosedException("store is already closed"); + } + + return super.directory(); + } + }; + } + public void testCurrentTranslogUUIIDIsCommitted() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { @@ -3671,7 +4002,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new LocalTranslog( - new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, @@ -3688,7 +4019,8 @@ public void testRecoverFromForeignTranslog() throws IOException { shardId, translog.location(), config.getIndexSettings(), - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); EngineConfig brokenConfig = new EngineConfig.Builder().shardId(shardId) @@ -5712,7 +6044,7 @@ public void testSeqNoGenerator() throws IOException { "routing", Collections.singletonList(document), source, - XContentType.JSON, + MediaTypeRegistry.JSON, null ); @@ -6921,7 +7253,11 @@ public void testMaxSeqNoInCommitUserData() throws Exception { engine.ensureOpen(); while (running.get() && assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().currentFileGeneration() < 500) { - engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + try { + engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + } catch (IOException e) { + fail("io exception not expected"); + } } }); rollTranslog.start(); @@ -7377,7 +7713,8 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { config.getTranslogConfig().getShardId(), createTempDir(), config.getTranslogConfig().getIndexSettings(), - config.getTranslogConfig().getBigArrays() + config.getTranslogConfig().getBigArrays(), + "" ); EngineConfig configWithWarmer = new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) @@ -7497,7 +7834,9 @@ public void testMaxDocsOnPrimary() throws Exception { assertNotNull(result.getFailure()); assertThat( result.getFailure().getMessage(), - containsString("Number of documents in the index can't exceed [" + maxDocs + "]") + containsString( + "Number of documents in shard " + shardId + " exceeds the limit of [" + maxDocs + "] documents per shard" + ) ); assertThat(result.getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(engine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(maxSeqNo)); @@ -7529,16 +7868,86 @@ public void testMaxDocsOnReplica() throws Exception { } } - public void testGetSegmentInfosSnapshot() throws IOException { + public void testGetSegmentInfosSnapshot_AllSnapshotFilesPreservedAcrossCommit() throws Exception { IOUtils.close(store, engine); - Store store = createStore(); - InternalEngine engine = spy(createEngine(store, createTempDir())); - GatedCloseable<SegmentInfos> segmentInfosSnapshot = engine.getSegmentInfosSnapshot(); - assertNotNull(segmentInfosSnapshot); - assertNotNull(segmentInfosSnapshot.get()); - verify(engine, times(1)).getLatestSegmentInfos(); - store.close(); - engine.close(); + store = createStore(); + engine = createEngine(store, createTempDir()); + List<Engine.Operation> operations = generateHistoryOnReplica( + randomIntBetween(1, 100), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + } + engine.refresh("test"); + try (GatedCloseable<SegmentInfos> snapshot = engine.getSegmentInfosSnapshot()) { + Collection<String> files = snapshot.get().files(true); + Set<String> localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + + engine.flush(true, true); + + try ( + final GatedCloseable<SegmentInfos> snapshotAfterFlush = engine.getSegmentInfosSnapshot(); + final GatedCloseable<IndexCommit> commit = engine.acquireLastIndexCommit(false) + ) { + final SegmentInfos segmentInfos = snapshotAfterFlush.get(); + assertNotEquals(segmentInfos.getSegmentsFileName(), snapshot.get().getSegmentsFileName()); + assertEquals(commit.get().getSegmentsFileName(), segmentInfos.getSegmentsFileName()); + } + + // original files are preserved. + localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + } + } + + public void testGetSegmentInfosSnapshot_LatestCommitOnDiskHasHigherGenThanReader() throws Exception { + IOUtils.close(store, engine); + store = createStore(); + engine = createEngine(store, createTempDir()); + // to simulate this we need concurrent flush/refresh. + AtomicBoolean run = new AtomicBoolean(true); + AtomicInteger docId = new AtomicInteger(0); + Thread refresher = new Thread(() -> { + while (run.get()) { + try { + engine.index(indexForDoc(createParsedDoc(Integer.toString(docId.getAndIncrement()), null))); + engine.refresh("test"); + getSnapshotAndAssertFilesExistLocally(); + } catch (Exception e) { + Assert.fail(); + } + } + }); + refresher.start(); + try { + for (int i = 0; i < 10; i++) { + engine.flush(true, true); + getSnapshotAndAssertFilesExistLocally(); + } + } catch (Exception e) { + Assert.fail(); + } finally { + run.set(false); + refresher.join(); + } + } + + private void getSnapshotAndAssertFilesExistLocally() throws IOException { + try (GatedCloseable<SegmentInfos> snapshot = engine.getSegmentInfosSnapshot()) { + Collection<String> files = snapshot.get().files(true); + Set<String> localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + } } public void testGetProcessedLocalCheckpoint() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/opensearch/index/engine/LiveVersionMapTests.java index b1e033232420b..8ac584b3fa923 100644 --- a/server/src/test/java/org/opensearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/opensearch/index/engine/LiveVersionMapTests.java @@ -32,11 +32,11 @@ package org.opensearch.index.engine; +import org.apache.lucene.tests.util.RamUsageTester; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Constants; -import org.apache.lucene.tests.util.RamUsageTester; -import org.apache.lucene.tests.util.TestUtil; import org.opensearch.common.lease.Releasable; import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 64fe42493c686..57509c5daa2b1 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -12,7 +12,10 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.util.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.UUIDs; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -28,17 +31,20 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.equalTo; -import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; +import static org.hamcrest.Matchers.equalTo; public class NRTReplicationEngineTests extends EngineTestCase { @@ -47,14 +53,6 @@ public class NRTReplicationEngineTests extends EngineTestCase { Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build() ); - private static final IndexSettings REMOTE_STORE_INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( - "index", - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") - .build() - ); - public void testCreateEngine() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try ( @@ -78,6 +76,28 @@ public void testCreateEngine() throws IOException { } } + public void testCreateEngineWithException() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + try { + // Passing null translogPath to induce failure + final EngineConfig replicaConfig = config( + defaultSettings, + nrtEngineStore, + null, + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + new NRTReplicationEngine(replicaConfig); + } catch (Exception e) { + // Ignore as engine creation will fail + } + assertEquals(1, nrtEngineStore.refCount()); + nrtEngineStore.close(); + } + public void testEngineWritesOpsToTranslog() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -140,29 +160,6 @@ public void testUpdateSegments_replicaReceivesSISWithHigherGen() throws IOExcept } } - public void testUpdateSegments_replicaReceivesSISWithHigherGen_remoteStoreEnabled() throws IOException { - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - - try ( - final Store nrtEngineStore = createStore(REMOTE_STORE_INDEX_SETTINGS, newDirectory()); - final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, REMOTE_STORE_INDEX_SETTINGS) - ) { - // assume we start at the same gen. - assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); - assertEquals(nrtEngine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLastCommittedSegmentInfos().getGeneration()); - assertEquals(engine.getLatestSegmentInfos().getGeneration(), nrtEngine.getLatestSegmentInfos().getGeneration()); - - // flush the primary engine - we don't need any segments, just force a new commit point. - engine.flush(true, true); - assertEquals(3, engine.getLatestSegmentInfos().getGeneration()); - - // When remote store is enabled, we don't commit on replicas since all segments are durably persisted in the store - nrtEngine.updateSegments(engine.getLatestSegmentInfos()); - assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); - assertEquals(2, nrtEngine.getLatestSegmentInfos().getGeneration()); - } - } - public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOException { // if the replica is already at segments_N that is received, it will commit segments_N+1. final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -171,16 +168,21 @@ public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOExcepti final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore) ) { + assertEquals(5, nrtEngine.getLatestSegmentInfos().getVersion()); nrtEngine.getLatestSegmentInfos().changed(); nrtEngine.getLatestSegmentInfos().changed(); + assertEquals(7, nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); + // commit the infos to push us to segments_3. - nrtEngine.commitSegmentInfos(); + nrtEngine.flush(); assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); // update the replica with segments_2 from the primary. final SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); assertEquals(2, primaryInfos.getGeneration()); + assertEquals(5, primaryInfos.getVersion()); nrtEngine.updateSegments(primaryInfos); assertEquals(4, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); assertEquals(4, nrtEngine.getLatestSegmentInfos().getGeneration()); @@ -304,7 +306,7 @@ public void testTrimTranslogOps() throws Exception { } } - public void testCommitSegmentInfos() throws Exception { + public void testFlush() throws Exception { // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints // stored in user data. final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -325,7 +327,7 @@ public void testCommitSegmentInfos() throws Exception { LocalCheckpointTracker localCheckpointTracker = nrtEngine.getLocalCheckpointTracker(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); final long processedCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); - nrtEngine.commitSegmentInfos(); + nrtEngine.flush(); // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); @@ -343,6 +345,10 @@ public void testCommitSegmentInfos() throws Exception { userData = committedInfos.getUserData(); assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + + try (final GatedCloseable<IndexCommit> indexCommit = nrtEngine.acquireLastIndexCommit(true)) { + assertEquals(committedInfos.getGeneration() + 1, indexCommit.get().getGeneration()); + } } } @@ -367,4 +373,272 @@ private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { return buildNrtReplicaEngine(globalCheckpoint, store, defaultSettings); } + + public void testGetSegmentInfosSnapshotPreservesFilesUntilRelease() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS) + ) { + // only index 2 docs here, this will create segments _0 and _1 and after forcemerge into _2. + final int docCount = 2; + List<Engine.Operation> operations = generateHistoryOnReplica(docCount, randomBoolean(), randomBoolean(), randomBoolean()); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + // refresh to create a lot of segments. + engine.refresh("test"); + } + assertEquals(2, engine.segmentsStats(false, false).getCount()); + // wipe the nrt directory initially so we can sync with primary. + Lucene.cleanLuceneIndex(nrtEngineStore.directory()); + assertFalse( + Arrays.stream(nrtEngineStore.directory().listAll()) + .anyMatch(file -> file.equals("write.lock") == false && file.equals("extra0") == false) + ); + for (String file : engine.getLatestSegmentInfos().files(true)) { + nrtEngineStore.directory().copyFrom(store.directory(), file, file, IOContext.DEFAULT); + } + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + assertEquals(engine.getLatestSegmentInfos(), nrtEngine.getLatestSegmentInfos()); + final GatedCloseable<SegmentInfos> snapshot = nrtEngine.getSegmentInfosSnapshot(); + final Collection<String> replicaSnapshotFiles = snapshot.get().files(false); + List<String> replicaFiles = List.of(nrtEngine.store.directory().listAll()); + + // merge primary down to 1 segment + engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()); + // we expect a 3rd segment to be created after merge. + assertEquals(3, engine.segmentsStats(false, false).getCount()); + final Collection<String> latestPrimaryFiles = engine.getLatestSegmentInfos().files(false); + + // copy new segments in and load reader. + for (String file : latestPrimaryFiles) { + if (replicaFiles.contains(file) == false) { + nrtEngineStore.directory().copyFrom(store.directory(), file, file, IOContext.DEFAULT); + } + } + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + + replicaFiles = List.of(nrtEngine.store.directory().listAll()); + assertTrue(replicaFiles.containsAll(replicaSnapshotFiles)); + + // close snapshot, files should be cleaned up + snapshot.close(); + + replicaFiles = List.of(nrtEngine.store.directory().listAll()); + assertFalse(replicaFiles.containsAll(replicaSnapshotFiles)); + + // Ensure we still have all the active files. Note - we exclude the infos file here if we aren't committing + // the nrt reader will still reference segments_n-1 after being loaded until a local commit occurs. + assertTrue(replicaFiles.containsAll(nrtEngine.getLatestSegmentInfos().files(false))); + } + } + + public void testRemoveExtraSegmentsOnStartup() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + List<Engine.Operation> operations = generateHistoryOnReplica(2, randomBoolean(), randomBoolean(), randomBoolean()); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + // refresh to create a lot of segments. + engine.refresh("test"); + } + try (final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory());) { + nrtEngineStore.createEmpty(Version.LATEST); + final Collection<String> extraSegments = engine.getLatestSegmentInfos().files(false); + for (String file : extraSegments) { + nrtEngineStore.directory().copyFrom(store.directory(), file, file, IOContext.DEFAULT); + } + List<String> replicaFiles = List.of(nrtEngineStore.directory().listAll()); + for (String file : extraSegments) { + assertTrue(replicaFiles.contains(file)); + } + try (NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS)) { + assertUnreferenced(nrtEngine, extraSegments); + } + } + } + + public void testPreserveLatestCommit() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS) + ) { + final int docCount = 4; + List<Engine.Operation> operations = generateHistoryOnReplica(docCount, randomBoolean(), randomBoolean(), randomBoolean()); + indexOperations(nrtEngine, operations.subList(0, 2)); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + SegmentInfos primaryInfos; + + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + final Collection<String> lastCommittedFiles = lastCommittedSegmentInfos.files(true); + assertRefCounted(nrtEngine, lastCommittedFiles); + + // get and close a snapshot - this will decref files when closed. + final GatedCloseable<SegmentInfos> segmentInfosSnapshot = nrtEngine.getSegmentInfosSnapshot(); + segmentInfosSnapshot.close(); + assertRefCounted(nrtEngine, lastCommittedFiles); + + // index more docs and refresh the reader - this will incref/decref files again + indexOperations(nrtEngine, operations.subList(2, 4)); + primaryInfos = engine.getLatestSegmentInfos(); + copySegments(primaryInfos.files(false), nrtEngine); + nrtEngine.updateSegments(primaryInfos); + + // get the additional segments that are only on the reader - not part of a commit. + final Collection<String> readerOnlySegments = primaryInfos.files(false); + readerOnlySegments.removeAll(lastCommittedFiles); + assertRefCounted(nrtEngine, readerOnlySegments); + // re-read the last commit from disk here in case the primary engine has flushed. + assertRefCounted(nrtEngine, nrtEngine.getLastCommittedSegmentInfos().files(true)); + + // flush the primary + engine.flush(true, true); + final Collection<String> latestPrimaryInfos = engine.getLatestSegmentInfos().files(false); + final Collection<String> mergedAwayFiles = nrtEngine.getLastCommittedSegmentInfos().files(false); + // remove files still part of latest commit. + mergedAwayFiles.removeAll(latestPrimaryInfos); + copySegments(latestPrimaryInfos, nrtEngine); + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + // after flush our original segment_n is removed but some segments may remain. + assertUnreferenced(nrtEngine, List.of(lastCommittedSegmentInfos.getSegmentsFileName())); + assertUnreferenced(nrtEngine, mergedAwayFiles); + // close the engine - ensure we preserved the last commit + final SegmentInfos infosBeforeClose = nrtEngine.getLatestSegmentInfos(); + nrtEngine.close(); + assertRefCounted(nrtEngine, infosBeforeClose.files(false)); + } + } + + private void assertRefCounted(NRTReplicationEngine nrtEngine, Collection<String> files) throws IOException { + List<String> storeFiles = List.of(nrtEngine.store.directory().listAll()); + for (String file : files) { + // refCount for our segments is 2 because they are still active on the reader + assertTrue("Expected: " + file + " to be referenced", nrtEngine.replicaFileTracker.refCount(file) >= 1); + assertTrue(storeFiles.contains(file)); + } + } + + private void assertUnreferenced(NRTReplicationEngine nrtEngine, Collection<String> files) throws IOException { + List<String> storeFiles = List.of(nrtEngine.store.directory().listAll()); + for (String file : files) { + // refCount for our segments is 2 because they are still active on the reader + assertEquals("Expected: " + file + " to be unreferenced", 0, nrtEngine.replicaFileTracker.refCount(file)); + assertFalse(storeFiles.contains(file)); + } + } + + private void cleanAndCopySegmentsFromPrimary(NRTReplicationEngine nrtEngine) throws IOException { + Lucene.cleanLuceneIndex(nrtEngine.store.directory()); + assertFalse( + Arrays.stream(nrtEngine.store.directory().listAll()) + .anyMatch(file -> file.equals("write.lock") == false && file.equals("extra0") == false) + ); + SegmentInfos primaryInfos = engine.getLatestSegmentInfos(); + copySegments(primaryInfos.files(false), nrtEngine); + nrtEngine.updateSegments(primaryInfos); + } + + private void indexOperations(NRTReplicationEngine nrtEngine, List<Engine.Operation> operations) throws IOException { + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + engine.refresh("test"); + } + } + + public void testDecrefToZeroRemovesFile() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS) + ) { + Lucene.cleanLuceneIndex(nrtEngineStore.directory()); + copySegments(engine.getLatestSegmentInfos().files(true), nrtEngine); + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + assertEquals( + "Segments_N is incref'd to 1", + 1, + nrtEngine.replicaFileTracker.refCount(lastCommittedSegmentInfos.getSegmentsFileName()) + ); + // create a new commit and update infos + engine.flush(true, true); + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + assertEquals( + "Segments_N is removed", + 0, + nrtEngine.replicaFileTracker.refCount(lastCommittedSegmentInfos.getSegmentsFileName()) + ); + assertFalse(List.of(nrtEngineStore.directory().listAll()).contains(lastCommittedSegmentInfos.getSegmentsFileName())); + } + } + + public void testCommitOnCloseThrowsException_decRefStore() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List<Engine.Operation> operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional<String> toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertEquals(2, nrtEngineStore.refCount()); + nrtEngine.close(); + assertEquals(1, nrtEngineStore.refCount()); + assertTrue(nrtEngineStore.isMarkedCorrupted()); + // store will throw when eventually closed, not handled here. + assertThrows(RuntimeException.class, nrtEngineStore::close); + } + + public void testFlushThrowsFlushFailedExceptionOnCorruption() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List<Engine.Operation> operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional<String> toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertThrows(FlushFailedEngineException.class, nrtEngine::flush); + nrtEngine.close(); + if (nrtEngineStore.isMarkedCorrupted()) { + assertThrows(RuntimeException.class, nrtEngineStore::close); + } else { + // With certain mock directories a NoSuchFileException is thrown which is not treated as a + // corruption Exception. In these cases we don't expect any issue on store close. + nrtEngineStore.close(); + } + } + + private void copySegments(Collection<String> latestPrimaryFiles, Engine nrtEngine) throws IOException { + final Store store = nrtEngine.store; + final List<String> replicaFiles = List.of(store.directory().listAll()); + // copy new segments in and load reader. + for (String file : latestPrimaryFiles) { + if (replicaFiles.contains(file) == false) { + store.directory().copyFrom(this.store.directory(), file, file, IOContext.DEFAULT); + } + } + } } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationReaderManagerTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationReaderManagerTests.java index 98f1a416731e4..d635b38e811c4 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationReaderManagerTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationReaderManagerTests.java @@ -46,43 +46,4 @@ public void testCreateNRTreaderManager() throws IOException { } } } - - public void testUpdateSegmentsWhileRefreshing() throws IOException, InterruptedException { - try (final Store store = createStore()) { - store.createEmpty(Version.LATEST); - final DirectoryReader reader = DirectoryReader.open(store.directory()); - NRTReplicationReaderManager readerManager = new NRTReplicationReaderManager( - OpenSearchDirectoryReader.wrap(reader, shardId), - (files) -> {}, - (files) -> {} - ); - - final SegmentInfos infos_2 = readerManager.getSegmentInfos().clone(); - infos_2.changed(); - - Thread refreshThread = new Thread(() -> { - try { - readerManager.maybeRefresh(); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - Thread updateThread = new Thread(() -> { - try { - readerManager.updateSegments(infos_2); - } catch (IOException e) { - throw new RuntimeException(e); - } - }); - refreshThread.start(); - updateThread.start(); - refreshThread.join(); - updateThread.join(); - try (final OpenSearchDirectoryReader acquire = readerManager.acquire()) { - final StandardDirectoryReader standardReader = NRTReplicationReaderManager.unwrapStandardReader(acquire); - assertEquals(infos_2.version, standardReader.getSegmentInfos().version); - } - assertEquals(infos_2, readerManager.getSegmentInfos()); - } - } } diff --git a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java index b9fe69c282471..423d246115a9a 100644 --- a/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NoOpEngineTests.java @@ -44,9 +44,9 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.ReplicationTracker; diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 808bf2056dabe..7c9a08d69d3c2 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -38,11 +38,11 @@ import org.apache.lucene.tests.util.TestUtil; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParsedDocument; diff --git a/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java index fed521e2d5ed9..34c0dd3478578 100644 --- a/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java @@ -56,8 +56,8 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.InfoStream; import org.apache.lucene.tests.util.NullInfoStream; +import org.apache.lucene.util.InfoStream; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/engine/SegmentTests.java b/server/src/test/java/org/opensearch/index/engine/SegmentTests.java index 77dfccd353ede..840bfe37b979d 100644 --- a/server/src/test/java/org/opensearch/index/engine/SegmentTests.java +++ b/server/src/test/java/org/opensearch/index/engine/SegmentTests.java @@ -33,11 +33,11 @@ package org.opensearch.index.engine; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSelector; import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.util.Version; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java index 1ffacf98a6836..2b44e759f4ff9 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.Strings; @@ -144,6 +145,27 @@ public void testSingleValueAllSet() throws Exception { } } + public void testWideSortField() throws Exception { + if (this instanceof NoOrdinalsStringFieldDataTests || this instanceof PagedBytesStringFieldDataTests) { + return; // Numeric types are not supported there. + } + // integer to long widening should happen + IndexFieldData<?> indexFieldData = getForField("int", "value"); + SortField sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // long to long no widening should happen + indexFieldData = getForField("long", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // float to float no widening should happen + indexFieldData = getForField("float", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.FLOAT); + + } + protected abstract void fillSingleValueWithMissing() throws Exception; public void assertValues(SortedBinaryDocValues values, int docId, BytesRef... actualValues) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java index a2eea22e920d9..63c74b3cfa64f 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java @@ -46,6 +46,7 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -60,12 +61,11 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java index 9bf88913a2895..3aa698260686d 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -56,17 +56,17 @@ import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.UnicodeUtil; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.opensearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.MultiValueMode; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/fielddata/BinaryDVFieldDataTests.java b/server/src/test/java/org/opensearch/index/fielddata/BinaryDVFieldDataTests.java index 66c785c3d4334..ce5b93b34a89e 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/BinaryDVFieldDataTests.java @@ -34,12 +34,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.SourceToParse; @@ -54,19 +53,18 @@ protected boolean hasDocValues() { } public void testDocValue() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("field") - .field("type", "binary") - .field("doc_values", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("field") + .field("type", "binary") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); @@ -81,16 +79,16 @@ public void testDocValue() throws Exception { doc.endArray(); } doc.endObject(); - ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(doc), XContentType.JSON)); + ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(doc), MediaTypeRegistry.JSON)); writer.addDocument(d.rootDoc()); BytesRef bytes1 = randomBytes(); doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1.bytes, bytes1.offset, bytes1.length).endObject(); - d = mapper.parse(new SourceToParse("test", "2", BytesReference.bytes(doc), XContentType.JSON)); + d = mapper.parse(new SourceToParse("test", "2", BytesReference.bytes(doc), MediaTypeRegistry.JSON)); writer.addDocument(d.rootDoc()); doc = XContentFactory.jsonBuilder().startObject().endObject(); - d = mapper.parse(new SourceToParse("test", "3", BytesReference.bytes(doc), XContentType.JSON)); + d = mapper.parse(new SourceToParse("test", "3", BytesReference.bytes(doc), MediaTypeRegistry.JSON)); writer.addDocument(d.rootDoc()); // test remove duplicate value @@ -106,7 +104,7 @@ public void testDocValue() throws Exception { doc.endArray(); } doc.endObject(); - d = mapper.parse(new SourceToParse("test", "4", BytesReference.bytes(doc), XContentType.JSON)); + d = mapper.parse(new SourceToParse("test", "4", BytesReference.bytes(doc), MediaTypeRegistry.JSON)); writer.addDocument(d.rootDoc()); IndexFieldData<?> indexFieldData = getForField("field"); diff --git a/server/src/test/java/org/opensearch/index/fielddata/FieldDataCacheTests.java b/server/src/test/java/org/opensearch/index/fielddata/FieldDataCacheTests.java index 48ab9d60727b7..a49391013dcd4 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/FieldDataCacheTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/FieldDataCacheTests.java @@ -44,15 +44,15 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; import org.opensearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.TextFieldMapper; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.FieldMaskingReader; +import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java index ac8691855db1a..3fb43b7dbdc4e 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/IndexFieldDataServiceTests.java @@ -46,6 +46,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -56,17 +57,15 @@ import org.opensearch.index.mapper.Mapper.BuilderContext; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.plugins.Plugin; import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.mockito.Mockito; import java.util.Arrays; import java.util.Collection; @@ -74,6 +73,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import org.mockito.Mockito; + import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -137,13 +138,15 @@ public void testGetForFieldRuntimeField() { ); final SetOnce<Supplier<SearchLookup>> searchLookupSetOnce = new SetOnce<>(); MappedFieldType ft = mock(MappedFieldType.class); + final int shardId = randomInt(); when(ft.fielddataBuilder(Mockito.any(), Mockito.any())).thenAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") Supplier<SearchLookup> searchLookup = (Supplier<SearchLookup>) invocationOnMock.getArguments()[1]; searchLookupSetOnce.set(searchLookup); + assertEquals(searchLookup.get().shardId(), shardId); return (IndexFieldData.Builder) (cache, breakerService) -> null; }); - SearchLookup searchLookup = new SearchLookup(null, null); + SearchLookup searchLookup = new SearchLookup(null, null, shardId); ifdService.getForField(ft, "qualified", () -> searchLookup); assertSame(searchLookup, searchLookupSetOnce.get().get()); } diff --git a/server/src/test/java/org/opensearch/index/fielddata/ScriptDocValuesGeoPointsTests.java b/server/src/test/java/org/opensearch/index/fielddata/ScriptDocValuesGeoPointsTests.java index 2ca5facc608ed..7af96344dd0eb 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/ScriptDocValuesGeoPointsTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/ScriptDocValuesGeoPointsTests.java @@ -32,9 +32,9 @@ package org.opensearch.index.fielddata; -import org.opensearch.index.fielddata.ScriptDocValues.GeoPoints; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.index.fielddata.ScriptDocValues.GeoPoints; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/fielddata/plain/HalfFloatFielddataTests.java b/server/src/test/java/org/opensearch/index/fielddata/plain/HalfFloatFielddataTests.java index 62c475c88045a..b5d936910b84c 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/plain/HalfFloatFielddataTests.java +++ b/server/src/test/java/org/opensearch/index/fielddata/plain/HalfFloatFielddataTests.java @@ -39,8 +39,8 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; import org.apache.lucene.store.Directory; -import org.opensearch.common.util.io.IOUtils; import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/test/java/org/opensearch/index/fieldstats/FieldStatsProviderRefreshTests.java b/server/src/test/java/org/opensearch/index/fieldstats/FieldStatsProviderRefreshTests.java index 286c2a805692d..16ce40d588a1b 100644 --- a/server/src/test/java/org/opensearch/index/fieldstats/FieldStatsProviderRefreshTests.java +++ b/server/src/test/java/org/opensearch/index/fieldstats/FieldStatsProviderRefreshTests.java @@ -38,9 +38,9 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesRequestCache; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchSingleNodeTestCase; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; diff --git a/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java index d3f6fcbe5b54c..254a609f7b9e5 100644 --- a/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/opensearch/index/get/DocumentFieldTests.java @@ -32,13 +32,15 @@ package org.opensearch.index.get; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.IgnoredFieldMapper; import org.opensearch.indices.IndicesModule; import org.opensearch.test.OpenSearchTestCase; @@ -52,7 +54,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -60,13 +62,13 @@ public class DocumentFieldTests extends OpenSearchTestCase { public void testToXContent() { DocumentField documentField = new DocumentField("field", Arrays.asList("value1", "value2")); - String output = Strings.toString(XContentType.JSON, documentField); + String output = Strings.toString(MediaTypeRegistry.JSON, documentField); assertEquals("{\"field\":[\"value1\",\"value2\"]}", output); } public void testEqualsAndHashcode() { checkEqualsAndHashCode( - randomDocumentField(XContentType.JSON).v1(), + randomDocumentField(MediaTypeRegistry.JSON).v1(), DocumentFieldTests::copyDocumentField, DocumentFieldTests::mutateDocumentField ); @@ -102,7 +104,7 @@ private static DocumentField copyDocumentField(DocumentField documentField) { private static DocumentField mutateDocumentField(DocumentField documentField) { List<Supplier<DocumentField>> mutations = new ArrayList<>(); mutations.add(() -> new DocumentField(randomUnicodeOfCodepointLength(15), documentField.getValues())); - mutations.add(() -> new DocumentField(documentField.getName(), randomDocumentField(XContentType.JSON).v1().getValues())); + mutations.add(() -> new DocumentField(documentField.getName(), randomDocumentField(MediaTypeRegistry.JSON).v1().getValues())); final int index = randomFrom(0, 1); final DocumentField randomCandidate = mutations.get(index).get(); if (!documentField.equals(randomCandidate)) { @@ -115,12 +117,12 @@ private static DocumentField mutateDocumentField(DocumentField documentField) { } } - public static Tuple<DocumentField, DocumentField> randomDocumentField(XContentType xContentType) { - return randomDocumentField(xContentType, randomBoolean(), fieldName -> false); // don't exclude any meta-fields + public static Tuple<DocumentField, DocumentField> randomDocumentField(MediaType mediaType) { + return randomDocumentField(mediaType, randomBoolean(), fieldName -> false); // don't exclude any meta-fields } public static Tuple<DocumentField, DocumentField> randomDocumentField( - XContentType xContentType, + MediaType mediaType, boolean isMetafield, Predicate<String> excludeMetaFieldFilter ) { @@ -143,7 +145,7 @@ public static Tuple<DocumentField, DocumentField> randomDocumentField( switch (randomIntBetween(0, 2)) { case 0: String fieldName = randomAlphaOfLengthBetween(3, 10); - Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType); + Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), mediaType); DocumentField input = new DocumentField(fieldName, tuple.v1()); DocumentField expected = new DocumentField(fieldName, tuple.v2()); return Tuple.tuple(input, expected); diff --git a/server/src/test/java/org/opensearch/index/get/GetResultTests.java b/server/src/test/java/org/opensearch/index/get/GetResultTests.java index 3796a71bf58bb..64b14744a40d2 100644 --- a/server/src/test/java/org/opensearch/index/get/GetResultTests.java +++ b/server/src/test/java/org/opensearch/index/get/GetResultTests.java @@ -32,16 +32,17 @@ package org.opensearch.index.get; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; @@ -61,7 +62,7 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.index.get.DocumentFieldTests.randomDocumentField; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -105,7 +106,7 @@ public void testToXContent() throws IOException { singletonMap("field1", new DocumentField("field1", singletonList("value1"))), singletonMap("field1", new DocumentField("metafield", singletonList("metavalue"))) ); - String output = Strings.toString(XContentType.JSON, getResult); + String output = Strings.toString(MediaTypeRegistry.JSON, getResult); assertEquals( "{\"_index\":\"index\",\"_id\":\"id\",\"_version\":1,\"_seq_no\":0,\"_primary_term\":1," + "\"metafield\":\"metavalue\",\"found\":true,\"_source\":{ \"field1\" : \"value1\", \"field2\":\"value2\"}," @@ -115,7 +116,7 @@ public void testToXContent() throws IOException { } { GetResult getResult = new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); - String output = Strings.toString(XContentType.JSON, getResult); + String output = Strings.toString(MediaTypeRegistry.JSON, getResult); assertEquals("{\"_index\":\"index\",\"_id\":\"id\",\"found\":false}", output); } } @@ -173,7 +174,7 @@ public void testToXContentEmbedded() throws IOException { null ); - BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); + BytesReference originalBytes = toXContentEmbedded(getResult, MediaTypeRegistry.JSON, false); assertEquals( "{\"_seq_no\":0,\"_primary_term\":1,\"found\":true,\"_source\":{\"foo\":\"bar\",\"baz\":[\"baz_0\",\"baz_1\"]}," + "\"fields\":{\"foo\":[\"bar\"],\"baz\":[\"baz_0\",\"baz_1\"]}}", @@ -184,7 +185,7 @@ public void testToXContentEmbedded() throws IOException { public void testToXContentEmbeddedNotFound() throws IOException { GetResult getResult = new GetResult("index", "id", UNASSIGNED_SEQ_NO, 0, 1, false, null, null, null); - BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); + BytesReference originalBytes = toXContentEmbedded(getResult, MediaTypeRegistry.JSON, false); assertEquals("{\"found\":false}", originalBytes.utf8ToString()); } @@ -196,7 +197,7 @@ public void testSerializationNotFound() throws IOException { getResult.writeTo(out); getResult = new GetResult(out.bytes().streamInput()); - BytesReference originalBytes = toXContentEmbedded(getResult, XContentType.JSON, false); + BytesReference originalBytes = toXContentEmbedded(getResult, MediaTypeRegistry.JSON, false); assertEquals("{\"found\":false}", originalBytes.utf8ToString()); } @@ -212,7 +213,11 @@ public void testGetSourceAsBytes() { } public void testEqualsAndHashcode() { - checkEqualsAndHashCode(randomGetResult(XContentType.JSON).v1(), GetResultTests::copyGetResult, GetResultTests::mutateGetResult); + checkEqualsAndHashCode( + randomGetResult(MediaTypeRegistry.JSON).v1(), + GetResultTests::copyGetResult, + GetResultTests::mutateGetResult + ); } public static GetResult copyGetResult(GetResult getResult) { @@ -305,14 +310,14 @@ public static GetResult mutateGetResult(GetResult getResult) { getResult.getVersion(), getResult.isExists(), getResult.internalSourceRef(), - randomDocumentFields(XContentType.JSON, randomBoolean()).v1(), + randomDocumentFields(MediaTypeRegistry.JSON, randomBoolean()).v1(), null ) ); return randomFrom(mutations).get(); } - public static Tuple<GetResult, GetResult> randomGetResult(XContentType xContentType) { + public static Tuple<GetResult, GetResult> randomGetResult(MediaType mediaType) { final String index = randomAlphaOfLengthBetween(3, 10); final String type = randomAlphaOfLengthBetween(3, 10); final String id = randomAlphaOfLengthBetween(3, 10); @@ -334,11 +339,11 @@ public static Tuple<GetResult, GetResult> randomGetResult(XContentType xContentT source = RandomObjects.randomSource(random()); } if (randomBoolean()) { - Tuple<Map<String, DocumentField>, Map<String, DocumentField>> tuple = randomDocumentFields(xContentType, false); + Tuple<Map<String, DocumentField>, Map<String, DocumentField>> tuple = randomDocumentFields(mediaType, false); docFields = tuple.v1(); expectedDocFields = tuple.v2(); - tuple = randomDocumentFields(xContentType, true); + tuple = randomDocumentFields(mediaType, true); metaFields = tuple.v1(); expectedMetaFields = tuple.v2(); } @@ -364,7 +369,7 @@ public static Tuple<GetResult, GetResult> randomGetResult(XContentType xContentT } public static Tuple<Map<String, DocumentField>, Map<String, DocumentField>> randomDocumentFields( - XContentType xContentType, + MediaType mediaType, boolean isMetaFields ) { int numFields = isMetaFields ? randomIntBetween(1, 3) : randomIntBetween(2, 10); @@ -378,7 +383,7 @@ public static Tuple<Map<String, DocumentField>, Map<String, DocumentField>> rand || field.equals(SourceFieldMapper.NAME) || field.equals(SeqNoFieldMapper.NAME); while (fields.size() < numFields) { - Tuple<DocumentField, DocumentField> tuple = randomDocumentField(xContentType, isMetaFields, excludeMetaFieldFilter); + Tuple<DocumentField, DocumentField> tuple = randomDocumentField(mediaType, isMetaFields, excludeMetaFieldFilter); DocumentField getField = tuple.v1(); DocumentField expectedGetField = tuple.v2(); if (fields.putIfAbsent(getField.getName(), getField) == null) { @@ -388,8 +393,7 @@ public static Tuple<Map<String, DocumentField>, Map<String, DocumentField>> rand return Tuple.tuple(fields, expectedFields); } - private static BytesReference toXContentEmbedded(GetResult getResult, XContentType xContentType, boolean humanReadable) - throws IOException { - return XContentHelper.toXContent(getResult::toXContentEmbedded, xContentType, humanReadable); + private static BytesReference toXContentEmbedded(GetResult getResult, MediaType mediaType, boolean humanReadable) throws IOException { + return org.opensearch.core.xcontent.XContentHelper.toXContent(getResult::toXContentEmbedded, mediaType, humanReadable); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/BinaryFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/BinaryFieldMapperTests.java index 40e7786e829c6..87b5ad3434944 100644 --- a/server/src/test/java/org/opensearch/index/mapper/BinaryFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/BinaryFieldMapperTests.java @@ -33,10 +33,10 @@ package org.opensearch.index.mapper; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; -import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; @@ -119,11 +119,11 @@ public void testStoredValue() throws IOException { // case 2: a value that looks compressed: this used to fail in 1.x BytesStreamOutput out = new BytesStreamOutput(); - try (OutputStream compressed = CompressorFactory.defaultCompressor().threadLocalOutputStream(out)) { + try (OutputStream compressed = CompressorRegistry.defaultCompressor().threadLocalOutputStream(out)) { new BytesArray(binaryValue1).writeTo(compressed); } final byte[] binaryValue2 = BytesReference.toBytes(out.bytes()); - assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue2))); + assertTrue(CompressorRegistry.isCompressed(new BytesArray(binaryValue2))); for (byte[] value : Arrays.asList(binaryValue1, binaryValue2)) { ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.field("field", value))); diff --git a/server/src/test/java/org/opensearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/BooleanFieldMapperTests.java index 5e58e6c3c127e..8dec03a353d16 100644 --- a/server/src/test/java/org/opensearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/BooleanFieldMapperTests.java @@ -40,10 +40,9 @@ import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.mapper.ParseContext.Document; import java.io.IOException; @@ -105,7 +104,7 @@ public void testSerialization() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); mapper.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{\"field\":{\"type\":\"boolean\"}}", Strings.toString(builder)); + assertEquals("{\"field\":{\"type\":\"boolean\"}}", builder.toString()); // now change some parameters defaultMapper = createDocumentMapper(fieldMapping(b -> { @@ -117,7 +116,7 @@ public void testSerialization() throws IOException { builder = XContentFactory.jsonBuilder().startObject(); mapper.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{\"field\":{\"type\":\"boolean\",\"doc_values\":false,\"null_value\":true}}", Strings.toString(builder)); + assertEquals("{\"field\":{\"type\":\"boolean\",\"doc_values\":false,\"null_value\":true}}", builder.toString()); } public void testParsesBooleansStrict() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/CompletionFieldMapperTests.java index a5e3e7bd363d7..b1785f5d7b14c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/CompletionFieldMapperTests.java @@ -46,11 +46,11 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -187,7 +187,7 @@ public void testCompletionAnalyzerSettings() throws Exception { assertEquals( "{\"field\":{\"type\":\"completion\",\"analyzer\":\"simple\",\"search_analyzer\":\"standard\"," + "\"preserve_separators\":false,\"preserve_position_increments\":true,\"max_input_length\":50}}", - Strings.toString(XContentType.JSON, fieldMapper) + Strings.toString(MediaTypeRegistry.JSON, fieldMapper) ); } diff --git a/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java index ce91d45029517..b274cf28429e8 100644 --- a/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/CopyToMapperTests.java @@ -33,11 +33,11 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.ParseContext.Document; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/opensearch/index/mapper/DataStreamFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DataStreamFieldMapperTests.java index 50edd29b4b4c9..8fdc1b8a62be6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DataStreamFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DataStreamFieldMapperTests.java @@ -8,11 +8,10 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchSingleNodeTestCase; import static org.hamcrest.Matchers.containsString; @@ -21,16 +20,15 @@ public class DataStreamFieldMapperTests extends OpenSearchSingleNodeTestCase { public void testDefaultTimestampField() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("_data_stream_timestamp") - .field("enabled", true) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("_data_stream_timestamp") + .field("enabled", true) + .endObject() + .endObject() + .endObject() + .toString(); assertDataStreamFieldMapper(mapping, "@timestamp"); } @@ -38,37 +36,35 @@ public void testDefaultTimestampField() throws Exception { public void testCustomTimestampField() throws Exception { String timestampFieldName = "timestamp_" + randomAlphaOfLength(5); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("_data_stream_timestamp") - .field("enabled", true) - .startObject("timestamp_field") - .field("name", timestampFieldName) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("_data_stream_timestamp") + .field("enabled", true) + .startObject("timestamp_field") + .field("name", timestampFieldName) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); assertDataStreamFieldMapper(mapping, timestampFieldName); } public void testDeeplyNestedCustomTimestampField() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("_data_stream_timestamp") - .field("enabled", true) - .startObject("timestamp_field") - .field("name", "event.meta.created_at") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("_data_stream_timestamp") + .field("enabled", true) + .startObject("timestamp_field") + .field("name", "event.meta.created_at") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = createIndex("test").mapperService() .merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); @@ -87,7 +83,7 @@ public void testDeeplyNestedCustomTimestampField() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); assertThat(doc.rootDoc().getFields("event.meta.created_at").length, equalTo(2)); @@ -107,7 +103,7 @@ public void testDeeplyNestedCustomTimestampField() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); @@ -129,7 +125,7 @@ private void assertDataStreamFieldMapper(String mapping, String timestampFieldNa BytesReference.bytes( XContentFactory.jsonBuilder().startObject().field(timestampFieldName, "2020-12-06T11:04:05.000Z").endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -147,7 +143,7 @@ private void assertDataStreamFieldMapper(String mapping, String timestampFieldNa BytesReference.bytes( XContentFactory.jsonBuilder().startObject().field("invalid-field-name", "2020-12-06T11:04:05.000Z").endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); @@ -168,7 +164,7 @@ private void assertDataStreamFieldMapper(String mapping, String timestampFieldNa .array(timestampFieldName, "2020-12-06T11:04:05.000Z", "2020-12-07T11:04:05.000Z") .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2afd6773b15d4..054d3956596af 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -66,6 +66,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("index", b -> b.field("index", false)); checker.registerConflictCheck("store", b -> b.field("store", true)); checker.registerConflictCheck("format", b -> b.field("format", "yyyy-MM-dd")); + checker.registerConflictCheck("print_format", b -> b.field("print_format", "yyyy-MM-dd")); checker.registerConflictCheck("locale", b -> b.field("locale", "es")); checker.registerConflictCheck("null_value", b -> b.field("null_value", "34500000")); checker.registerUpdateCheck(b -> b.field("ignore_malformed", true), m -> assertTrue(((DateFieldMapper) m).getIgnoreMalformed())); @@ -148,7 +149,7 @@ public void testStore() throws Exception { public void testIgnoreMalformed() throws IOException { testIgnoreMalformedForValue( "2016-03-99", - "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" + "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); testIgnoreMalformedForValue("-522000000", "long overflow"); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index d360a2a767e8a..ab53ae81ab0ce 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -40,9 +40,10 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.opensearch.Version; @@ -106,7 +107,7 @@ public void isFieldWithinRangeTestCase(DateFieldType ft) throws IOException { w.addDocument(doc); DirectoryReader reader = DirectoryReader.open(w); - DateMathParser alternateFormat = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + DateMathParser alternateFormat = DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser(); doTestIsFieldWithinQuery(ft, reader, null, null); doTestIsFieldWithinQuery(ft, reader, null, alternateFormat); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); @@ -157,7 +158,7 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, public void testValueFormat() { MappedFieldType ft = new DateFieldType("field"); - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12T14:10:55")) + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-12T14:10:55")) .toInstant() .toEpochMilli(); @@ -166,14 +167,14 @@ public void testValueFormat() { assertEquals("2015", new DateFieldType("field").docValueFormat("YYYY", ZoneOffset.UTC).format(instant)); assertEquals(instant, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", false, null)); assertEquals(instant + 999, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", true, null)); - long i = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-13")).toInstant().toEpochMilli(); + long i = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-13")).toInstant().toEpochMilli(); assertEquals(i - 1, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T12:09:55.000Z"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); + long instant = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(date); assertEquals(date, ft.valueForDisplay(instant)); } @@ -204,7 +205,7 @@ public void testTermQuery() { ); MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T14:10:55"; - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)).toInstant().toEpochMilli(); + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)).toInstant().toEpochMilli(); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant, instant + 999), SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) @@ -216,7 +217,7 @@ public void testTermQuery() { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -253,13 +254,16 @@ public void testRangeQuery() throws IOException { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant1, instant2), SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) ); - assertEquals(expected, ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new MultiReader())); + assertEquals( + expected, + ft.rangeQuery(date1, date2, true, true, null, null, null, context).rewrite(new IndexSearcher(new MultiReader())) + ); instant1 = nowInMillis; instant2 = instant1 + 100; @@ -276,7 +280,7 @@ public void testRangeQuery() throws IOException { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -322,8 +326,8 @@ public void testRangeQueryWithIndexSort() { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query pointQuery = LongPoint.newRangeQuery("field", instant1, instant2); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2); diff --git a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java index a9ac83790226f..ecab9da8c6b6c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DocumentParserTests.java @@ -36,12 +36,11 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.plugins.Plugin; @@ -1063,7 +1062,7 @@ public void testParseToJsonAndParse() throws Exception { // reparse it DocumentMapper builtDocMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, builtMapping); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1.json")); - Document doc = builtDocMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = builtDocMapper.parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); assertThat(doc.getBinaryValue(builtDocMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(builtDocMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } @@ -1075,7 +1074,7 @@ public void testSimpleParser() throws Exception { assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1.json")); - Document doc = docMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = docMapper.parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } @@ -1084,7 +1083,7 @@ public void testSimpleParserNoTypeNoId() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/simple/test-mapping.json"); DocumentMapper docMapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/simple/test1-notype-noid.json")); - Document doc = docMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = docMapper.parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); assertThat(doc.getBinaryValue(docMapper.idFieldMapper().name()), equalTo(Uid.encodeId("1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").name()), equalTo("fred")); } @@ -1106,7 +1105,7 @@ public void testNoDocumentSent() throws Exception { BytesReference json = new BytesArray("".getBytes(StandardCharsets.UTF_8)); MapperParsingException e = expectThrows( MapperParsingException.class, - () -> docMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)) + () -> docMapper.parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)) ); assertThat(e.getMessage(), equalTo("failed to parse, document is empty")); } @@ -1469,18 +1468,17 @@ public void testDynamicDottedFieldNameWithFieldAlias() throws Exception { } public void testTypeless() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("foo") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = createDocumentMapper(MapperService.SINGLE_MAPPING_NAME, mapping); ParsedDocument doc = mapper.parse(source(b -> b.field("foo", "1234"))); diff --git a/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java index 0e04e57a290b6..6cf9600f74341 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java @@ -32,11 +32,11 @@ package org.opensearch.index.mapper; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; import java.time.Instant; @@ -184,7 +184,7 @@ public void testField() throws Exception { assertEquals( "{\"_doc\":{\"properties\":{\"foo\":{\"type\":\"text\",\"fields\":" + "{\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}}}}}", - Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()) + Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()) ); } @@ -200,9 +200,9 @@ public void testIncremental() throws Exception { })); assertNotNull(doc.dynamicMappingsUpdate()); - assertThat(Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), containsString("{\"bar\":")); + assertThat(Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("{\"bar\":")); // field is NOT in the update - assertThat(Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), not(containsString("{\"field\":"))); + assertThat(Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), not(containsString("{\"field\":"))); } public void testIntroduceTwoFields() throws Exception { @@ -214,8 +214,8 @@ public void testIntroduceTwoFields() throws Exception { })); assertNotNull(doc.dynamicMappingsUpdate()); - assertThat(Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), containsString("\"foo\":{")); - assertThat(Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), containsString("\"bar\":{")); + assertThat(Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("\"foo\":{")); + assertThat(Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("\"bar\":{")); } public void testObject() throws Exception { @@ -230,7 +230,7 @@ public void testObject() throws Exception { assertNotNull(doc.dynamicMappingsUpdate()); assertThat( - Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), + Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("{\"foo\":{\"properties\":{\"bar\":{\"properties\":{\"baz\":{\"type\":\"text\"") ); } @@ -241,7 +241,7 @@ public void testArray() throws Exception { ParsedDocument doc = mapper.parse(source(b -> b.startArray("foo").value("bar").value("baz").endArray())); assertNotNull(doc.dynamicMappingsUpdate()); - assertThat(Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), containsString("{\"foo\":{\"type\":\"text\"")); + assertThat(Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("{\"foo\":{\"type\":\"text\"")); } public void testInnerDynamicMapping() throws Exception { @@ -257,7 +257,7 @@ public void testInnerDynamicMapping() throws Exception { assertNotNull(doc.dynamicMappingsUpdate()); assertThat( - Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()), + Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()), containsString("{\"field\":{\"properties\":{\"bar\":{\"properties\":{\"baz\":{\"type\":\"text\"") ); } @@ -277,7 +277,7 @@ public void testComplexArray() throws Exception { assertEquals( "{\"_doc\":{\"properties\":{\"foo\":{\"properties\":{\"bar\":{\"type\":\"text\",\"fields\":{" + "\"keyword\":{\"type\":\"keyword\",\"ignore_above\":256}}},\"baz\":{\"type\":\"long\"}}}}}}", - Strings.toString(XContentType.JSON, doc.dynamicMappingsUpdate()) + Strings.toString(MediaTypeRegistry.JSON, doc.dynamicMappingsUpdate()) ); } diff --git a/server/src/test/java/org/opensearch/index/mapper/DynamicTemplateTests.java b/server/src/test/java/org/opensearch/index/mapper/DynamicTemplateTests.java index 7fbdf349c1bc1..3d53f3fbebd87 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DynamicTemplateTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DynamicTemplateTests.java @@ -32,10 +32,9 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.DynamicTemplate.XContentFieldType; import org.opensearch.test.OpenSearchTestCase; @@ -107,7 +106,7 @@ public void testSerialization() throws Exception { DynamicTemplate template = DynamicTemplate.parse("my_template", templateDef); XContentBuilder builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); + assertEquals("{\"match_mapping_type\":\"string\",\"mapping\":{\"store\":true}}", builder.toString()); // name-based template templateDef = new HashMap<>(); @@ -117,7 +116,7 @@ public void testSerialization() throws Exception { template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); + assertEquals("{\"match\":\"*name\",\"unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", builder.toString()); // path-based template templateDef = new HashMap<>(); @@ -127,7 +126,7 @@ public void testSerialization() throws Exception { template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); + assertEquals("{\"path_match\":\"*name\",\"path_unmatch\":\"first_name\",\"mapping\":{\"store\":true}}", builder.toString()); // regex matching templateDef = new HashMap<>(); @@ -137,6 +136,6 @@ public void testSerialization() throws Exception { template = DynamicTemplate.parse("my_template", templateDef); builder = JsonXContent.contentBuilder(); template.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", Strings.toString(builder)); + assertEquals("{\"match\":\"^a$\",\"match_pattern\":\"regex\",\"mapping\":{\"store\":true}}", builder.toString()); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/ExternalMapperPlugin.java b/server/src/test/java/org/opensearch/index/mapper/ExternalMapperPlugin.java index dc179acaca324..147ad63e0c4f3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ExternalMapperPlugin.java +++ b/server/src/test/java/org/opensearch/index/mapper/ExternalMapperPlugin.java @@ -32,13 +32,13 @@ package org.opensearch.index.mapper; +import org.opensearch.plugins.MapperPlugin; +import org.opensearch.plugins.Plugin; + import java.util.Collections; import java.util.HashMap; import java.util.Map; -import org.opensearch.plugins.MapperPlugin; -import org.opensearch.plugins.Plugin; - public class ExternalMapperPlugin extends Plugin implements MapperPlugin { public static final String EXTERNAL = "external"; diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperTests.java index 7a10646b5497a..1952e455b1c78 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldAliasMapperTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; import org.opensearch.common.xcontent.XContentFactory; import java.io.IOException; @@ -40,22 +39,21 @@ public class FieldAliasMapperTests extends MapperServiceTestCase { public void testParsing() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("alias-field") - .field("type", "alias") - .field("path", "concrete-field") - .endObject() - .startObject("concrete-field") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("alias-field") + .field("type", "alias") + .field("path", "concrete-field") + .endObject() + .startObject("concrete-field") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = createDocumentMapper("_doc", mapping); assertEquals(mapping, mapper.mappingSource().toString()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java index 2c825401ab94a..be5fc84a2bfc7 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java @@ -40,7 +40,7 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest; import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.indices.IndicesModule; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; @@ -73,7 +73,7 @@ protected Collection<Class<? extends Plugin>> getPlugins() { public void putMappings() { assertAcked(client().admin().indices().prepareCreate("index1")); assertAcked(client().admin().indices().prepareCreate("filtered")); - assertAcked(client().admin().indices().preparePutMapping("index1", "filtered").setSource(TEST_ITEM, XContentType.JSON)); + assertAcked(client().admin().indices().preparePutMapping("index1", "filtered").setSource(TEST_ITEM, MediaTypeRegistry.JSON)); } public void testGetMappings() { diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldNamesFieldMapperTests.java index d3001c5c9c890..0ca1e8890ca3e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldNamesFieldMapperTests.java @@ -34,11 +34,10 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.ArrayList; @@ -85,9 +84,14 @@ public void testExtractFieldNames() { } public void testFieldType() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_field_names").endObject().endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_field_names") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -102,7 +106,7 @@ public void testFieldType() throws Exception { } public void testInjectIntoDocDuringParsing() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -114,7 +118,7 @@ public void testInjectIntoDocDuringParsing() throws Exception { BytesReference.bytes( XContentFactory.jsonBuilder().startObject().field("a", "100").startObject("b").field("c", 42).endObject().endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -122,22 +126,21 @@ public void testInjectIntoDocDuringParsing() throws Exception { } public void testExplicitEnabled() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_field_names") - .field("enabled", true) - .endObject() - .startObject("properties") - .startObject("field") - .field("type", "keyword") - .field("doc_values", false) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_field_names") + .field("enabled", true) + .endObject() + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -149,7 +152,7 @@ public void testExplicitEnabled() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -158,16 +161,15 @@ public void testExplicitEnabled() throws Exception { } public void testDisabled() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_field_names") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_field_names") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -179,7 +181,7 @@ public void testDisabled() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -188,26 +190,24 @@ public void testDisabled() throws Exception { } public void testMergingMappings() throws Exception { - String enabledMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_field_names") - .field("enabled", true) - .endObject() - .endObject() - .endObject() - ); - String disabledMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_field_names") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + String enabledMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_field_names") + .field("enabled", true) + .endObject() + .endObject() + .endObject() + .toString(); + String disabledMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_field_names") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java index a2edff295d8f2..e318ca5e953a3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java @@ -8,11 +8,10 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.fielddata.AbstractFieldDataTestCase; import org.opensearch.index.fielddata.IndexFieldData; @@ -28,22 +27,21 @@ protected boolean hasDocValues() { } public void testDocValue() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("test") - .startObject("properties") - .startObject("field") - .field("type", FIELD_TYPE) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("field") + .field("type", FIELD_TYPE) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); XContentBuilder json = XContentFactory.jsonBuilder().startObject().startObject("field").field("foo", "bar").endObject().endObject(); - ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(json), XContentType.JSON)); + ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(json), MediaTypeRegistry.JSON)); writer.addDocument(d.rootDoc()); writer.commit(); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 309b150f11748..637072c8886c1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -15,11 +15,10 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; import java.io.IOException; @@ -92,18 +91,22 @@ public void testMinimalToMaximal() throws IOException { XContentBuilder parsedFromOrig = JsonXContent.contentBuilder().startObject(); createMapperService(orig).documentMapper().mapping().toXContent(parsedFromOrig, ToXContent.EMPTY_PARAMS); parsedFromOrig.endObject(); - assertEquals(Strings.toString(orig), Strings.toString(parsedFromOrig)); + assertEquals(orig.toString(), parsedFromOrig.toString()); assertParseMaximalWarnings(); } public void testDefaults() throws Exception { XContentBuilder mapping = fieldMapping(this::minimalMapping); DocumentMapper mapper = createDocumentMapper(mapping); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); - - String json = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("field").field("foo", "bar").endObject().endObject() - ); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); + + String json = XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .field("foo", "bar") + .endObject() + .endObject() + .toString(); ParsedDocument doc = mapper.parse(source(json)); IndexableField[] fields = doc.rootDoc().getFields("field"); diff --git a/server/src/test/java/org/opensearch/index/mapper/GenericStoreDynamicTemplateTests.java b/server/src/test/java/org/opensearch/index/mapper/GenericStoreDynamicTemplateTests.java index 1834b087e3bdd..6d816c6aab69c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GenericStoreDynamicTemplateTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GenericStoreDynamicTemplateTests.java @@ -34,7 +34,7 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -47,17 +47,17 @@ public class GenericStoreDynamicTemplateTests extends OpenSearchSingleNodeTestCa public void testSimple() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/genericstore/test-mapping.json"); IndexService index = createIndex("test"); - client().admin().indices().preparePutMapping("test").setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping("test").setSource(mapping, MediaTypeRegistry.JSON).get(); MapperService mapperService = index.mapperService(); byte[] json = copyToBytesFromClasspath("/org/opensearch/index/mapper/dynamictemplate/genericstore/test-data.json"); ParsedDocument parsedDoc = mapperService.documentMapper() - .parse(new SourceToParse("test", "1", new BytesArray(json), XContentType.JSON)); + .parse(new SourceToParse("test", "1", new BytesArray(json), MediaTypeRegistry.JSON)); client().admin() .indices() .preparePutMapping("test") - .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON) + .setSource(parsedDoc.dynamicMappingsUpdate().toString(), MediaTypeRegistry.JSON) .get(); Document doc = parsedDoc.rootDoc(); diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 4112d792aa087..cbb5fc8ce5a22 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -34,8 +34,8 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.hamcrest.CoreMatchers; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java index 4a5c342883cc8..016862e3ffabc 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoShapeFieldMapperTests.java @@ -32,9 +32,9 @@ package org.opensearch.index.mapper; import org.opensearch.common.Explicit; -import org.opensearch.common.Strings; import org.opensearch.common.geo.builders.ShapeBuilder; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugins.Plugin; @@ -229,7 +229,7 @@ public void testSerializeDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); assertThat( Strings.toString( - XContentType.JSON, + MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")) ), diff --git a/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java index ec30b014c9359..e6ef5a9069af6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IdFieldMapperTests.java @@ -34,19 +34,18 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.indices.IndicesService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Collection; @@ -63,7 +62,7 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testIncludeInObjectNotAllowed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -74,7 +73,7 @@ public void testIncludeInObjectNotAllowed() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("_id", "1").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); fail("Expected failure to parse metadata field"); @@ -90,7 +89,7 @@ public void testDefaults() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); + ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), MediaTypeRegistry.JSON)); IndexableField[] fields = document.rootDoc().getFields(IdFieldMapper.NAME); assertEquals(1, fields.length); assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); diff --git a/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java index 8a37a72ab7be4..3fb6be2203cd1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IdFieldTypeTests.java @@ -40,6 +40,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.query.QueryShardContext; import org.opensearch.test.OpenSearchTestCase; + import org.mockito.Mockito; public class IdFieldTypeTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/mapper/IndexFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/IndexFieldMapperTests.java index 1d1ff0405e2ee..b194e08d17813 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IndexFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IndexFieldMapperTests.java @@ -32,21 +32,20 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Collection; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + public class IndexFieldMapperTests extends OpenSearchSingleNodeTestCase { @Override @@ -55,7 +54,7 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testDefaultDisabledIndexMapper() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -65,7 +64,7 @@ public void testDefaultDisabledIndexMapper() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -74,9 +73,14 @@ public void testDefaultDisabledIndexMapper() throws Exception { } public void testIndexNotConfigurable() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_index").endObject().endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_index") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); assertEquals("_index is not configurable", e.getMessage()); diff --git a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java index 1a66037d98d71..0a2435553b19e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpFieldTypeTests.java @@ -32,10 +32,14 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -75,22 +79,41 @@ public void testTermQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); String ip = "2001:db8::2:1"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + + Query query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "192.168.1.7"; - assertEquals(InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)), ft.termQuery(ip, null)); + query = InetAddressPoint.newExactQuery("field", InetAddresses.forString(ip)); + assertEquals( + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowExactQuery("field", new BytesRef(((PointRangeQuery) query).getLowerPoint())) + ), + ft.termQuery(ip, null) + ); ip = "2001:db8::2:1"; String prefix = ip + "/64"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null)); + + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64); + assertEquals(query, ft.termQuery(prefix, null)); ip = "192.168.1.7"; prefix = ip + "/16"; - assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null)); + query = InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16); + assertEquals(query, ft.termQuery(prefix, null)); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("::1", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testTermsQuery() { @@ -118,44 +141,123 @@ public void testTermsQuery() { public void testRangeQuery() { MappedFieldType ft = new IpFieldMapper.IpFieldType("field"); - + Query query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, null) ); @@ -178,30 +280,60 @@ public void testRangeQuery() { ) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE); assertEquals( - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), // same lo/hi values but inclusive=false so this won't match anything ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, null) ); + query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")); assertEquals( // lower bound is ipv4, upper bound is ipv6 - InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::")), + new IndexOrDocValuesQuery( + query, + SortedSetDocValuesField.newSlowRangeQuery( + ((PointRangeQuery) query).getField(), + new BytesRef(((PointRangeQuery) query).getLowerPoint()), + new BytesRef(((PointRangeQuery) query).getUpperPoint()), + true, + true + ) + ), ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, null) ); - MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, true, null, Collections.emptyMap()); + MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testFetchSourceValue() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldMapperTests.java index a805f1b235856..8de78a99c5e8a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/IpRangeFieldMapperTests.java @@ -34,14 +34,13 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.network.InetAddresses; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.termvectors.TermVectorsService; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -73,8 +72,8 @@ public void testStoreCidr() throws Exception { .field("type", "ip_range") .field("store", true); mapping = mapping.endObject().endObject().endObject().endObject(); - DocumentMapper mapper = parser.parse("type", new CompressedXContent(Strings.toString(mapping))); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.toString())); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); final Map<String, String> cases = new HashMap<>(); cases.put("192.168.0.0/15", "192.169.255.255"); cases.put("192.168.0.0/16", "192.168.255.255"); @@ -119,18 +118,17 @@ public void testIgnoreMalformed() throws Exception { final DocumentMapper mapper = parser.parse( "type", new CompressedXContent( - Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field") - .field("type", "ip_range") - .endObject() - .endObject() - .endObject() - .endObject() - ) + XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "ip_range") + .endObject() + .endObject() + .endObject() + .endObject() + .toString() ) ); @@ -141,19 +139,18 @@ public void testIgnoreMalformed() throws Exception { final DocumentMapper mapper2 = parser.parse( "type", new CompressedXContent( - Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field") - .field("type", "ip_range") - .field("ignore_malformed", true) - .endObject() - .endObject() - .endObject() - .endObject() - ) + XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "ip_range") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString() ) ); @@ -176,6 +173,6 @@ private final SourceToParse source(CheckedConsumer<XContentBuilder, IOException> XContentBuilder builder = JsonXContent.contentBuilder().startObject(); build.accept(builder); builder.endObject(); - return new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON); + return new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/JavaMultiFieldMergeTests.java b/server/src/test/java/org/opensearch/index/mapper/JavaMultiFieldMergeTests.java index f0bc7c9f8c616..93a6b0a59b864 100644 --- a/server/src/test/java/org/opensearch/index/mapper/JavaMultiFieldMergeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/JavaMultiFieldMergeTests.java @@ -33,10 +33,10 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -56,7 +56,7 @@ public void testMergeMultiField() throws Exception { assertThat(mapperService.fieldType("name.indexed"), nullValue()); BytesReference json = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject()); - Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -72,7 +72,7 @@ public void testMergeMultiField() throws Exception { assertThat(mapperService.fieldType("name.not_indexed2"), nullValue()); assertThat(mapperService.fieldType("name.not_indexed3"), nullValue()); - doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -109,7 +109,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(mapperService.fieldType("name.indexed"), nullValue()); BytesReference json = BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject()); - Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -125,7 +125,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(mapperService.fieldType("name.not_indexed2"), nullValue()); assertThat(mapperService.fieldType("name.not_indexed3"), nullValue()); - doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java index 71eee7b89693c..4da21da40e0d8 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldMapperTests.java @@ -33,8 +33,6 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockLowerCaseFilter; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; @@ -43,8 +41,9 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.tests.analysis.MockLowerCaseFilter; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; @@ -69,10 +68,10 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; public class KeywordFieldMapperTests extends MapperTestCase { @@ -208,7 +207,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { public void testDefaults() throws Exception { XContentBuilder mapping = fieldMapping(this::minimalMapping); DocumentMapper mapper = createDocumentMapper(mapping); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 626af63fc968d..393c448330142 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.TokenFilter; @@ -43,13 +44,17 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -127,14 +132,29 @@ public void testTermsQuery() { List<BytesRef> terms = new ArrayList<>(); terms.add(new BytesRef("foo")); terms.add(new BytesRef("bar")); - assertEquals(new TermInSetQuery("field", terms), ft.termsQuery(Arrays.asList("foo", "bar"), null)); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field", terms), + new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms) + ); + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), null)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + Query expectedIndex = new TermInSetQuery("field", terms); + assertEquals(expectedIndex, onlyIndexed.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expectedDocValues = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms); + assertEquals(expectedDocValues, onlyDocValues.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.termsQuery(Arrays.asList("foo", "bar"), null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testExistsQuery() { @@ -156,9 +176,36 @@ public void testExistsQuery() { public void testRangeQuery() { MappedFieldType ft = new KeywordFieldType("field"); + + Query indexExpected = new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false); + Query dvExpected = new TermRangeQuery( + "field", + BytesRefs.toBytesRef("foo"), + BytesRefs.toBytesRef("bar"), + true, + false, + MultiTermQuery.DOC_VALUES_REWRITE + ); + + Query expected = new IndexOrDocValuesQuery(indexExpected, dvExpected); + Query actual = ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC); + assertEquals(expected, actual); + + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + ); + assertEquals( - new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false), - ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); OpenSearchException ee = expectThrows( @@ -174,16 +221,37 @@ public void testRangeQuery() { public void testRegexpQuery() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals( - new RegexpQuery(new Term("field", "foo.*")), + new IndexOrDocValuesQuery( + new RegexpQuery(new Term("field", "foo.*")), + new RegexpQuery(new Term("field", "foo.*"), 0, 0, RegexpQuery.DEFAULT_PROVIDER, 10, MultiTermQuery.DOC_VALUES_REWRITE) + ), ft.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new RegexpQuery(new Term("field", "foo.*")); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new RegexpQuery( + new Term("field", "foo.*"), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -199,12 +267,26 @@ public void testFuzzyQuery() { ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC)); + + Query dvExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals( + dvExpected, + onlyDocValues.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) + () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -213,6 +295,47 @@ public void testFuzzyQuery() { assertEquals("[fuzzy] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + public void testWildCardQuery() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = new IndexOrDocValuesQuery( + new WildcardQuery(new Term("field", new BytesRef("foo*"))), + new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query indexExpected = new WildcardQuery(new Term("field", new BytesRef("foo*"))); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.wildcardQuery("foo*", MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + + OpenSearchException ee = expectThrows( + OpenSearchException.class, + () -> ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC_DISALLOW_EXPENSIVE) + ); + assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); + } + public void testNormalizeQueries() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals(new TermQuery(new Term("field", new BytesRef("FOO"))), ft.termQuery("FOO", null)); diff --git a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java index 82482141d15f8..048b51d39ca6c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapperTests.java @@ -38,12 +38,12 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.opensearch.OpenSearchException; import org.opensearch.common.Explicit; -import org.opensearch.common.Strings; import org.opensearch.common.geo.GeoUtils; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; import org.opensearch.common.geo.builders.ShapeBuilder; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Point; @@ -139,7 +139,7 @@ public void testDefaultConfiguration() throws IOException { DocumentMapper mapper = createDocumentMapper(mapping); Mapper fieldMapper = mapper.mappers().getMapper("field"); assertThat(fieldMapper, instanceOf(LegacyGeoShapeFieldMapper.class)); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); LegacyGeoShapeFieldMapper geoShapeFieldMapper = (LegacyGeoShapeFieldMapper) fieldMapper; assertThat(geoShapeFieldMapper.fieldType().tree(), equalTo(LegacyGeoShapeFieldMapper.DeprecatedParameters.Defaults.TREE)); @@ -539,13 +539,13 @@ public void testSerializeDefaults() throws Exception { ToXContent.Params includeDefaults = new ToXContent.MapParams(singletonMap("include_defaults", "true")); { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("tree", "quadtree"))); - String serialized = Strings.toString(XContentType.JSON, mapper.mappers().getMapper("field"), includeDefaults); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), includeDefaults); assertTrue(serialized, serialized.contains("\"precision\":\"50.0m\"")); assertTrue(serialized, serialized.contains("\"tree_levels\":21")); } { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_shape").field("tree", "geohash"))); - String serialized = Strings.toString(XContentType.JSON, mapper.mappers().getMapper("field"), includeDefaults); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), includeDefaults); assertTrue(serialized, serialized.contains("\"precision\":\"50.0m\"")); assertTrue(serialized, serialized.contains("\"tree_levels\":9")); } @@ -553,7 +553,7 @@ public void testSerializeDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "geo_shape").field("tree", "quadtree").field("tree_levels", "6")) ); - String serialized = Strings.toString(XContentType.JSON, mapper.mappers().getMapper("field"), includeDefaults); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), includeDefaults); assertFalse(serialized, serialized.contains("\"precision\":")); assertTrue(serialized, serialized.contains("\"tree_levels\":6")); } @@ -561,7 +561,7 @@ public void testSerializeDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "geo_shape").field("tree", "quadtree").field("precision", "6")) ); - String serialized = Strings.toString(XContentType.JSON, mapper.mappers().getMapper("field"), includeDefaults); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), includeDefaults); assertTrue(serialized, serialized.contains("\"precision\":\"6.0m\"")); assertFalse(serialized, serialized.contains("\"tree_levels\":")); } @@ -569,7 +569,7 @@ public void testSerializeDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "geo_shape").field("tree", "quadtree").field("precision", "6m").field("tree_levels", "5")) ); - String serialized = Strings.toString(XContentType.JSON, mapper.mappers().getMapper("field"), includeDefaults); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper.mappers().getMapper("field"), includeDefaults); assertTrue(serialized, serialized.contains("\"precision\":\"6.0m\"")); assertTrue(serialized, serialized.contains("\"tree_levels\":5")); } @@ -591,7 +591,7 @@ public void testPointsOnlyDefaultsWithTermStrategy() throws IOException { assertThat(strategy.getGrid().getMaxLevels(), equalTo(23)); assertThat(strategy.isPointsOnly(), equalTo(true)); // term strategy changes the default for points_only, check that we handle it correctly - assertThat(Strings.toString(XContentType.JSON, geoShapeFieldMapper), not(containsString("points_only"))); + assertThat(Strings.toString(MediaTypeRegistry.JSON, geoShapeFieldMapper), not(containsString("points_only"))); assertFieldWarnings("tree", "precision", "strategy"); } diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index c1c1b6d9925b5..adcfc9d7b17fc 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -34,12 +34,12 @@ import org.apache.lucene.analysis.TokenStream; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.env.Environment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -56,16 +56,19 @@ import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -159,6 +162,26 @@ public void testMappingDepthExceedsLimit() throws Throwable { assertThat(e.getMessage(), containsString("Limit of mapping depth [1] has been exceeded")); } + public void testMappingDepthExceedsXContentLimit() throws Throwable { + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> createIndex( + "test1", + Settings.builder() + .put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), XContentContraints.DEFAULT_MAX_DEPTH + 1) + .build() + ) + ); + + assertThat( + ex.getMessage(), + is( + "The provided value 1001 of the index setting 'index.mapping.depth.limit' exceeds per-JVM configured limit of 1000. " + + "Please change the setting value or increase per-JVM limit using 'opensearch.xcontent.depth.max' system property." + ) + ); + } + public void testUnmappedFieldType() { MapperService mapperService = createIndex("index").mapperService(); assertThat(mapperService.unmappedFieldType("keyword"), instanceOf(KeywordFieldType.class)); @@ -267,22 +290,21 @@ public void testFieldAliasWithMismatchedNestedScope() throws Throwable { } public void testTotalFieldsLimitWithFieldAlias() throws Throwable { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("alias") - .field("type", "alias") - .field("path", "field") - .endObject() - .startObject("field") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("alias") + .field("type", "alias") + .field("path", "field") + .endObject() + .startObject("field") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); int numberOfFieldsIncludingAlias = 2; createIndex( @@ -302,6 +324,26 @@ public void testTotalFieldsLimitWithFieldAlias() throws Throwable { assertEquals("Limit of total fields [" + numberOfNonAliasFields + "] has been exceeded", e.getMessage()); } + public void testFieldNameLengthExceedsXContentLimit() throws Throwable { + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> createIndex( + "test1", + Settings.builder() + .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), XContentContraints.DEFAULT_MAX_NAME_LEN + 1) + .build() + ) + ); + + assertThat( + ex.getMessage(), + is( + "The provided value 50001 of the index setting 'index.mapping.field_name_length.limit' exceeds per-JVM configured limit of 50000. " + + "Please change the setting value or increase per-JVM limit using 'opensearch.xcontent.name.length.max' system property." + ) + ); + } + public void testFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); @@ -501,6 +543,28 @@ public void testReloadSearchAnalyzers() throws IOException { ); } + public void testMapperDynamicAllowedIgnored() { + final List<Function<Settings.Builder, Settings.Builder>> scenarios = List.of( + (builder) -> builder.putNull(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey()), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), true), + (builder) -> builder.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false) + ); + + for (int i = 0; i < scenarios.size(); i++) { + final Settings.Builder defaultSettingsBuilder = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); + + final Settings settings = scenarios.get(i).apply(defaultSettingsBuilder).build(); + + createIndex("test" + i, settings).mapperService(); + } + + assertWarnings( + "[index.mapper.dynamic] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); + } + private boolean assertSameContainedFilters(TokenFilterFactory[] originalTokenFilter, NamedAnalyzer updatedAnalyzer) { ReloadableCustomAnalyzer updatedReloadableAnalyzer = (ReloadableCustomAnalyzer) updatedAnalyzer.analyzer(); TokenFilterFactory[] newTokenFilters = updatedReloadableAnalyzer.getComponents().getTokenFilters(); diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java new file mode 100644 index 0000000000000..13cb279418fa8 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +public class MatchOnlyTextFieldAnalyzerModeTests extends TextFieldAnalyzerModeTests { + @Override + ParametrizedFieldMapper.TypeParser getTypeParser() { + return MatchOnlyTextFieldMapper.PARSER; + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java new file mode 100644 index 0000000000000..580f8cccc9af5 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -0,0 +1,450 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.search.MatchQuery; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.Is.is; + +public class MatchOnlyTextFieldMapperTests extends TextFieldMapperTests { + + @Before + public void setupMatchOnlyTextFieldMapper() { + textFieldName = "match_only_text"; + } + + @Override + public void testDefaults() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertEquals("1234", fields[0].stringValue()); + IndexableFieldType fieldType = fields[0].fieldType(); + assertThat(fieldType.omitNorms(), equalTo(true)); + assertTrue(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); + assertThat(fieldType.storeTermVectors(), equalTo(false)); + assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); + assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); + assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + } + + @Override + public void testEnableStore() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("store", true))); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertTrue(fields[0].fieldType().stored()); + } + + @Override + public void testIndexOptions() throws IOException { + Map<String, IndexOptions> supportedOptions = new HashMap<>(); + supportedOptions.put("docs", IndexOptions.DOCS); + + Map<String, IndexOptions> unsupportedOptions = new HashMap<>(); + unsupportedOptions.put("freqs", IndexOptions.DOCS_AND_FREQS); + unsupportedOptions.put("positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + unsupportedOptions.put("offsets", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + + for (String option : supportedOptions.keySet()) { + XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); + mapping.endObject().endObject().endObject(); + + DocumentMapper mapper = createDocumentMapper(mapping); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper); + assertThat(serialized, containsString("\"docs\":{\"type\":\"match_only_text\"}")); + + ParsedDocument doc = mapper.parse(source(b -> { b.field(option, "1234"); })); + + IndexOptions options = supportedOptions.get(option); + IndexableField[] fields = doc.rootDoc().getFields(option); + assertEquals(1, fields.length); + assertEquals(options, fields[0].fieldType().indexOptions()); + } + + for (String option : unsupportedOptions.keySet()) { + XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); + mapping.endObject().endObject().endObject(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping)); + assertThat( + e.getMessage(), + containsString( + "Failed to parse mapping [_doc]: Unknown value [" + option + "] for field [index_options] - accepted values are [docs]" + ) + ); + } + } + + @Override + public void testAnalyzedFieldPositionIncrementWithoutPositions() { + for (String indexOptions : List.of("docs")) { + try { + createDocumentMapper( + fieldMapping( + b -> b.field("type", textFieldName).field("index_options", indexOptions).field("position_increment_gap", 10) + ) + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void testBWCSerialization() throws IOException {} + + @Override + public void testPositionIncrementGap() throws IOException {} + + @Override + public void testDefaultPositionIncrementGap() throws IOException {} + + @Override + public void testMinimalToMaximal() throws IOException {} + + @Override + public void testIndexPrefixMapping() throws IOException { + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> createDocumentMapper( + fieldMapping( + b -> b.field("type", textFieldName) + .field("analyzer", "standard") + .startObject("index_prefixes") + .field("min_chars", 2) + .field("max_chars", 10) + .endObject() + ) + ) + ); + assertEquals( + "Failed to parse mapping [_doc]: Index prefixes cannot be enabled on for match_only_text field. Use text field instead", + e.getMessage() + ); + } + + @Override + public void testIndexPrefixIndexTypes() throws IOException { + // not supported and asserted the expected behavior in testIndexPrefixMapping + } + + @Override + public void testFastPhrasePrefixes() throws IOException { + // not supported and asserted the expected behavior in testIndexPrefixMapping + } + + public void testPhrasePrefixes() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + b.startObject("synfield"); + { + b.field("type", textFieldName); + b.field("analyzer", "standard"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "words")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "two")); + mqbFilter.add(new Term("field", "words")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "three words here").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "here")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "three")); + mqbFilter.add(new Term("field", "words")); + mqbFilter.add(new Term("field", "here")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "three")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "words")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.setSlop(1); + mqbFilter.add(new Term("field", "two")); + mqbFilter.add(new Term("field", "words")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "singleton").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "singleton")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(mqb, BooleanClause.Occur.FILTER).build(), + mqb, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, is(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "stopword")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "sparkle")); + mqbFilter.add(new Term[] { new Term("field", "stopword") }, 2); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "sparkle")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "motor dogs"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "motor")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "motor")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setPhraseSlop(1); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "two dogs"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "two")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + mqbFilter.setSlop(1); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "three dogs word"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term("synfield", "word")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "three")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + mqbFilter.add(new Term("synfield", "word")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "three")), BooleanClause.Occur.FILTER) + .add( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "dogs")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("synfield", "dog")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.FILTER + ) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + } + + @Override + public void testFastPhraseMapping() throws IOException { + MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(mapping(b -> { + b.startObject("field") + .field("type", textFieldName) + .field("analyzer", "my_stop_analyzer") + .field("index_phrases", true) + .endObject(); + // "standard" will be replaced with MockSynonymAnalyzer + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").field("index_phrases", true).endObject(); + }))); + assertEquals( + "Failed to parse mapping [_doc]: Index phrases cannot be enabled on for match_only_text field. Use text field instead", + e.getMessage() + ); + } + + @Override + public void testSimpleMerge() throws IOException {} + + public void testPhraseQuery() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field").field("type", textFieldName).field("analyzer", "my_stop_analyzer").endObject(); + // "standard" will be replaced with MockSynonymAnalyzer + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new MatchPhraseQueryBuilder("field", "two words").toQuery(queryShardContext); + Query expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery("field", "two", "words"), + mapperService.fieldType("field"), + queryShardContext + ); + + assertThat(q, is(expectedQuery)); + Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); + assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + + Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "three")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "here")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery("field", "three", "words", "here"), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q2, is(expectedQuery)); + + Query q3 = new MatchPhraseQueryBuilder("field", "two words").slop(2).toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery(2, "field", "two", "words"), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q3, is(expectedQuery)); + + Query q5 = new MatchPhraseQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "sparkle")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "stopword")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery.Builder().add(new Term("field", "sparkle")).add(new Term("field", "stopword"), 2).build(), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q5, is(expectedQuery)); + + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q6 = matchQuery.parse(MatchQuery.Type.PHRASE, "synfield", "motor dogs"); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "motor")), BooleanClause.Occur.FILTER) + .add( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "dogs")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("synfield", "dog")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.FILTER + ) + .build(), + new MultiPhraseQuery.Builder().add(new Term("synfield", "motor")) + .add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }, 1) + .build(), + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q6, is(expectedQuery)); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java new file mode 100644 index 0000000000000..51234fa04ddc2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.common.lucene.Lucene; + +public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { + + @Override + TextFieldMapper.TextFieldType createFieldType(boolean searchable) { + TextSearchInfo tsi = new TextSearchInfo( + TextFieldMapper.Defaults.FIELD_TYPE, + null, + Lucene.STANDARD_ANALYZER, + Lucene.STANDARD_ANALYZER + ); + return new MatchOnlyTextFieldMapper.MatchOnlyTextFieldType( + "field", + searchable, + false, + tsi, + ParametrizedFieldMapper.Parameter.metaParam().get() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MultiFieldTests.java b/server/src/test/java/org/opensearch/index/mapper/MultiFieldTests.java index 76110a8a23e1f..92ff57aa72e51 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MultiFieldTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MultiFieldTests.java @@ -35,14 +35,13 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.index.mapper.TextFieldMapper.TextFieldType; @@ -76,7 +75,7 @@ private void testMultiField(String mapping) throws Exception { .merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/multifield/test-data.json")); - Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f.name(), equalTo("name")); @@ -155,7 +154,7 @@ public void testBuildThenParse() throws Exception { .parse(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(builtMapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/opensearch/index/mapper/multifield/test-data.json")); - Document doc = docMapper.parse(new SourceToParse("test", "1", json, XContentType.JSON)).rootDoc(); + Document doc = docMapper.parse(new SourceToParse("test", "1", json, MediaTypeRegistry.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f.name(), equalTo("name")); @@ -196,7 +195,7 @@ public void testMultiFieldsInConsistentOrder() throws Exception { builder = builder.startObject(multiFieldName).field("type", "text").endObject(); } builder = builder.endObject().endObject().endObject().endObject().endObject(); - String mapping = Strings.toString(builder); + String mapping = builder.toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -222,22 +221,21 @@ public void testMultiFieldsInConsistentOrder() throws Exception { } public void testObjectFieldNotAllowed() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("type") - .startObject("properties") - .startObject("my_field") - .field("type", "text") - .startObject("fields") - .startObject("multi") - .field("type", "object") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("my_field") + .field("type", "text") + .startObject("fields") + .startObject("multi") + .field("type", "object") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); try { parser.parse("type", new CompressedXContent(mapping)); @@ -248,22 +246,21 @@ public void testObjectFieldNotAllowed() throws Exception { } public void testNestedFieldNotAllowed() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("type") - .startObject("properties") - .startObject("my_field") - .field("type", "text") - .startObject("fields") - .startObject("multi") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("my_field") + .field("type", "text") + .startObject("fields") + .startObject("multi") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); try { parser.parse("type", new CompressedXContent(mapping)); @@ -293,7 +290,7 @@ public void testMultiFieldWithDot() throws IOException { MapperService mapperService = createIndex("test").mapperService(); try { - mapperService.documentMapperParser().parse("my_type", new CompressedXContent(Strings.toString(mapping))); + mapperService.documentMapperParser().parse("my_type", new CompressedXContent(mapping.toString())); fail("this should throw an exception because one field contains a dot"); } catch (MapperParsingException e) { assertThat(e.getMessage(), equalTo("Field name [raw.foo] which is a multi field of [city] cannot contain '.'")); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index d5b22ae64cd54..9a0d34c916f5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -33,18 +33,17 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.mapper.ObjectMapper.Dynamic; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.io.UncheckedIOException; @@ -66,18 +65,17 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testEmptyNested() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -88,7 +86,7 @@ public void testEmptyNested() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").nullField("nested1").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -101,7 +99,7 @@ public void testEmptyNested() throws Exception { BytesReference.bytes( XContentFactory.jsonBuilder().startObject().field("field", "value").startArray("nested").endArray().endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -109,18 +107,17 @@ public void testEmptyNested() throws Exception { } public void testSingleNested() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -144,7 +141,7 @@ public void testSingleNested() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -175,7 +172,7 @@ public void testSingleNested() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -191,23 +188,22 @@ public void testSingleNested() throws Exception { } public void testMultiNested() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -257,7 +253,7 @@ public void testMultiNested() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -284,24 +280,23 @@ public void testMultiNested() throws Exception { } public void testMultiObjectAndNested1() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -351,7 +346,7 @@ public void testMultiObjectAndNested1() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -378,25 +373,24 @@ public void testMultiObjectAndNested1() throws Exception { } public void testMultiObjectAndNested2() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_parent", true) - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_parent", true) + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -446,7 +440,7 @@ public void testMultiObjectAndNested2() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -473,24 +467,23 @@ public void testMultiObjectAndNested2() throws Exception { } public void testMultiRootAndNested1() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_root", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_root", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -540,7 +533,7 @@ public void testMultiRootAndNested1() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -574,27 +567,26 @@ public void testMultiRootAndNested1() throws Exception { public void testMultipleLevelsIncludeRoot1() throws Exception { MapperService mapperService = createIndex("test").mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); MergeReason mergeReason = randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.INDEX_TEMPLATE); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), mergeReason); @@ -618,7 +610,7 @@ public void testMultipleLevelsIncludeRoot1() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -636,34 +628,33 @@ public void testMultipleLevelsIncludeRoot1() throws Exception { public void testMultipleLevelsIncludeRoot2() throws Exception { MapperService mapperService = createIndex("test").mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", false) - .startObject("properties") - .startObject("nested3") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", false) + .startObject("properties") + .startObject("nested3") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); MergeReason mergeReason = randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.INDEX_TEMPLATE); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), mergeReason); @@ -691,7 +682,7 @@ public void testMultipleLevelsIncludeRoot2() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -707,48 +698,46 @@ public void testMultipleLevelsIncludeRoot2() throws Exception { public void testMultipleLevelsIncludeRootWithMerge() throws Exception { MapperService mapperService = createIndex("test").mapperService(); - String firstMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_root", true) - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String firstMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_root", true) + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(firstMapping), MergeReason.INDEX_TEMPLATE); - String secondMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_root", true) - .field("include_in_parent", true) - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .field("include_in_root", true) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String secondMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_root", true) + .field("include_in_parent", true) + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .field("include_in_root", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(secondMapping), MergeReason.INDEX_TEMPLATE); DocumentMapper docMapper = mapperService.documentMapper(); @@ -771,7 +760,7 @@ public void testMultipleLevelsIncludeRootWithMerge() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -780,24 +769,23 @@ public void testMultipleLevelsIncludeRootWithMerge() throws Exception { } public void testNestedArrayStrict() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("dynamic", "strict") - .startObject("properties") - .startObject("field1") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("dynamic", "strict") + .startObject("properties") + .startObject("field1") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() @@ -826,7 +814,7 @@ public void testNestedArrayStrict() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -841,23 +829,22 @@ public void testNestedArrayStrict() throws Exception { public void testLimitOfNestedFieldsPerIndex() throws Exception { Function<String, String> mapping = type -> { try { - return Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(type) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + return XContentFactory.jsonBuilder() + .startObject() + .startObject(type) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -935,18 +922,17 @@ public void testParentObjectMapperAreNested() throws Exception { public void testLimitNestedDocsDefaultSettings() throws Exception { Settings settings = Settings.builder().build(); MapperService mapperService = createIndex("test1", settings).mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping)); long defaultMaxNoNestedDocs = MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.get(settings); @@ -963,7 +949,7 @@ public void testLimitNestedDocsDefaultSettings() throws Exception { docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), MediaTypeRegistry.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source1)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" @@ -982,18 +968,17 @@ public void testLimitNestedDocs() throws Exception { "test1", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), maxNoNestedDocs).build() ).mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping)); // parsing a doc with 2 nested objects succeeds @@ -1008,7 +993,7 @@ public void testLimitNestedDocs() throws Exception { docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), MediaTypeRegistry.JSON); ParsedDocument doc = docMapper.parse(source1); assertThat(doc.docs().size(), equalTo(3)); @@ -1025,7 +1010,7 @@ public void testLimitNestedDocs() throws Exception { docBuilder2.endArray(); } docBuilder2.endObject(); - SourceToParse source2 = new SourceToParse("test1", "2", BytesReference.bytes(docBuilder2), XContentType.JSON); + SourceToParse source2 = new SourceToParse("test1", "2", BytesReference.bytes(docBuilder2), MediaTypeRegistry.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" @@ -1044,21 +1029,20 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { "test1", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), maxNoNestedDocs).build() ).mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .startObject("nested2") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .startObject("nested2") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping)); // parsing a doc with 2 nested objects succeeds @@ -1077,7 +1061,7 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { docBuilder.endArray(); } docBuilder.endObject(); - SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), XContentType.JSON); + SourceToParse source1 = new SourceToParse("test1", "1", BytesReference.bytes(docBuilder), MediaTypeRegistry.JSON); ParsedDocument doc = docMapper.parse(source1); assertThat(doc.docs().size(), equalTo(3)); @@ -1099,7 +1083,7 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { } docBuilder2.endObject(); - SourceToParse source2 = new SourceToParse("test1", "2", BytesReference.bytes(docBuilder2), XContentType.JSON); + SourceToParse source2 = new SourceToParse("test1", "2", BytesReference.bytes(docBuilder2), MediaTypeRegistry.JSON); MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2)); assertEquals( "The number of nested documents has exceeded the allowed limit of [" @@ -1113,8 +1097,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { @Override protected boolean forbidPrivateIndexSettings() { - /** - * This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. + /* + This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. */ return false; } @@ -1133,19 +1117,18 @@ public void testMergeNestedMappings() throws IOException { .endObject() ).mapperService(); - String mapping1 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_parent", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping1 = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_parent", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); // cannot update `include_in_parent` dynamically MapperException e1 = expectThrows( @@ -1154,19 +1137,18 @@ public void testMergeNestedMappings() throws IOException { ); assertEquals("the [include_in_parent] parameter can't be updated on a nested object mapping", e1.getMessage()); - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("nested1") - .field("type", "nested") - .field("include_in_root", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .field("include_in_root", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); // cannot update `include_in_root` dynamically MapperException e2 = expectThrows( diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java index 9823c54b4ab13..06086e1c38466 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedPathFieldMapperTests.java @@ -9,10 +9,10 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -30,7 +30,7 @@ public void testDefaultConfig() throws IOException { new CompressedXContent("{\"" + MapperService.SINGLE_MAPPING_NAME + "\":{}}"), MapperService.MergeReason.MAPPING_UPDATE ); - ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), XContentType.JSON)); + ParsedDocument document = mapper.parse(new SourceToParse("index", "id", new BytesArray("{}"), MediaTypeRegistry.JSON)); assertEquals(Collections.<IndexableField>emptyList(), Arrays.asList(document.rootDoc().getFields(NestedPathFieldMapper.NAME))); } diff --git a/server/src/test/java/org/opensearch/index/mapper/NullValueObjectMappingTests.java b/server/src/test/java/org/opensearch/index/mapper/NullValueObjectMappingTests.java index 16c4a172e46ba..eb0ef25f793ef 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NullValueObjectMappingTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NullValueObjectMappingTests.java @@ -32,11 +32,10 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; @@ -45,18 +44,17 @@ public class NullValueObjectMappingTests extends OpenSearchSingleNodeTestCase { public void testNullValueObject() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("obj1") - .field("type", "object") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("obj1") + .field("type", "object") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -69,7 +67,7 @@ public void testNullValueObject() throws IOException { BytesReference.bytes( XContentFactory.jsonBuilder().startObject().startObject("obj1").endObject().field("value1", "test1").endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -80,7 +78,7 @@ public void testNullValueObject() throws IOException { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().nullField("obj1").field("value1", "test1").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -99,7 +97,7 @@ public void testNullValueObject() throws IOException { .field("value1", "test1") .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/server/src/test/java/org/opensearch/index/mapper/NullValueTests.java b/server/src/test/java/org/opensearch/index/mapper/NullValueTests.java index a48d0c5fb07d9..641edeb46354c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NullValueTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NullValueTests.java @@ -32,10 +32,9 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import java.util.Collections; @@ -56,7 +55,7 @@ public void testNullNullValue() throws Exception { XContentBuilder b = JsonXContent.contentBuilder().startObject(); mapper.mapping().toXContent(b, params); b.endObject(); - assertThat(Strings.toString(b), containsString("\"null_value\":null")); + assertThat(b.toString(), containsString("\"null_value\":null")); } } } diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java index ee2380ff2c04d..610b69a7fdf88 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldMapperTests.java @@ -34,10 +34,9 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.NumberFieldMapper.NumberType; import org.opensearch.index.mapper.NumberFieldTypeTests.OutOfRangeSpec; import org.opensearch.index.termvectors.TermVectorsService; @@ -95,7 +94,7 @@ public void testExistsQueryDocValuesDisabled() throws IOException { public void doTestDefaults(String type) throws Exception { XContentBuilder mapping = fieldMapping(b -> b.field("type", type)); DocumentMapper mapper = createDocumentMapper(mapping); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123))); @@ -316,7 +315,7 @@ public void testOutOfRangeValues() throws IOException { public void testLongIndexingOutOfRange() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "long").field("ignore_malformed", true))); ParsedDocument doc = mapper.parse( - source(b -> b.rawField("field", new BytesArray("9223372036854775808").streamInput(), XContentType.JSON)) + source(b -> b.rawField("field", new BytesArray("9223372036854775808").streamInput(), MediaTypeRegistry.JSON)) ); assertEquals(0, doc.rootDoc().getFields("field").length); } diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index 547c4696ce3ed..af852b12e7a30 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -33,6 +33,7 @@ package org.opensearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.document.Document; import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.FloatPoint; @@ -44,27 +45,28 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; -import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.tests.util.TestUtil; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.document.SortedUnsignedLongDocValuesRangeQuery; +import org.opensearch.index.document.SortedUnsignedLongDocValuesSetQuery; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.mapper.MappedFieldType.Relation; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -117,15 +119,27 @@ public void testIsFieldWithinQuery() throws IOException { public void testIntegerTermsQueryWithDecimalPart() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberType.INTEGER); - assertEquals(IntPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1, 2.1), null)); - assertEquals(IntPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1.0, 2.1), null)); + assertEquals( + new IndexOrDocValuesQuery(IntPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1, 2.1), null) + ); + assertEquals( + new IndexOrDocValuesQuery(IntPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1.0, 2.1), null) + ); assertTrue(ft.termsQuery(Arrays.asList(1.1, 2.1), null) instanceof MatchNoDocsQuery); } public void testLongTermsQueryWithDecimalPart() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberType.LONG); - assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1, 2.1), null)); - assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1.0, 2.1), null)); + assertEquals( + new IndexOrDocValuesQuery(LongPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1, 2.1), null) + ); + assertEquals( + new IndexOrDocValuesQuery(LongPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1.0, 2.1), null) + ); assertTrue(ft.termsQuery(Arrays.asList(1.1, 2.1), null) instanceof MatchNoDocsQuery); } @@ -150,16 +164,18 @@ public void testLongTermQueryWithDecimalPart() { } private static MappedFieldType unsearchable() { - return new NumberFieldType("field", NumberType.LONG, false, false, true, true, null, Collections.emptyMap()); + return new NumberFieldType("field", NumberType.LONG, false, false, false, true, null, Collections.emptyMap()); } public void testTermQuery() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.LONG); - assertEquals(LongPoint.newExactQuery("field", 42), ft.termQuery("42", null)); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery("field", 42); + Query query = new IndexOrDocValuesQuery(LongPoint.newExactQuery("field", 42), dvQuery); + assertEquals(query, ft.termQuery("42", null)); MappedFieldType unsearchable = unsearchable(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("42", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testRangeQueryWithNegativeBounds() { @@ -379,7 +395,7 @@ public void testLongRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testUnsignedLongRangeQuery() { @@ -395,7 +411,23 @@ public void testUnsignedLongRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); + } + + public void testUnsignedLongTermsQuery() { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.UNSIGNED_LONG); + Query expected = new IndexOrDocValuesQuery( + BigIntegerPoint.newSetQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)), + SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)) + ); + assertEquals(expected, ft.termsQuery(List.of("1", "3"), MOCK_QSC)); + + MappedFieldType unsearchable = unsearchable(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termsQuery(List.of("1", "3"), MOCK_QSC) + ); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testDoubleRangeQuery() { @@ -415,7 +447,7 @@ public void testDoubleRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testConversions() { @@ -517,8 +549,8 @@ public void testHalfFloatRange() throws IOException { float u = (randomFloat() * 2 - 1) * 65504; boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query floatQ = NumberType.FLOAT.rangeQuery("float", l, u, includeLower, includeUpper, false, MOCK_QSC); - Query halfFloatQ = NumberType.HALF_FLOAT.rangeQuery("half_float", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query floatQ = NumberType.FLOAT.rangeQuery("float", l, u, includeLower, includeUpper, false, true, MOCK_QSC); + Query halfFloatQ = NumberType.HALF_FLOAT.rangeQuery("half_float", l, u, includeLower, includeUpper, false, true, MOCK_QSC); assertEquals(searcher.count(floatQ), searcher.count(halfFloatQ)); } IOUtils.close(reader, dir); @@ -548,8 +580,17 @@ public void testUnsignedLongRange() throws IOException { BigInteger u = randomUnsignedLong(); boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query unsignedLongQ = NumberType.UNSIGNED_LONG.rangeQuery("unsigned_long", l, u, includeLower, includeUpper, false, MOCK_QSC); - Query doubleQ = NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query unsignedLongQ = NumberType.UNSIGNED_LONG.rangeQuery( + "unsigned_long", + l, + u, + includeLower, + includeUpper, + false, + true, + MOCK_QSC + ); + Query doubleQ = NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, true, MOCK_QSC); assertEquals(searcher.count(doubleQ), searcher.count(unsignedLongQ)); } IOUtils.close(reader, dir); @@ -557,21 +598,23 @@ public void testUnsignedLongRange() throws IOException { public void testNegativeZero() { assertEquals( - NumberType.DOUBLE.rangeQuery("field", null, -0d, true, true, false, MOCK_QSC), - NumberType.DOUBLE.rangeQuery("field", null, +0d, true, false, false, MOCK_QSC) + NumberType.DOUBLE.rangeQuery("field", null, -0d, true, true, false, true, MOCK_QSC), + NumberType.DOUBLE.rangeQuery("field", null, +0d, true, false, false, true, MOCK_QSC) ); assertEquals( - NumberType.FLOAT.rangeQuery("field", null, -0f, true, true, false, MOCK_QSC), - NumberType.FLOAT.rangeQuery("field", null, +0f, true, false, false, MOCK_QSC) + NumberType.FLOAT.rangeQuery("field", null, -0f, true, true, false, true, MOCK_QSC), + NumberType.FLOAT.rangeQuery("field", null, +0f, true, false, false, true, MOCK_QSC) ); assertEquals( - NumberType.HALF_FLOAT.rangeQuery("field", null, -0f, true, true, false, MOCK_QSC), - NumberType.HALF_FLOAT.rangeQuery("field", null, +0f, true, false, false, MOCK_QSC) + NumberType.HALF_FLOAT.rangeQuery("field", null, -0f, true, true, false, true, MOCK_QSC), + NumberType.HALF_FLOAT.rangeQuery("field", null, +0f, true, false, false, true, MOCK_QSC) ); - assertFalse(NumberType.DOUBLE.termQuery("field", -0d).equals(NumberType.DOUBLE.termQuery("field", +0d))); - assertFalse(NumberType.FLOAT.termQuery("field", -0f).equals(NumberType.FLOAT.termQuery("field", +0f))); - assertFalse(NumberType.HALF_FLOAT.termQuery("field", -0f).equals(NumberType.HALF_FLOAT.termQuery("field", +0f))); + assertFalse(NumberType.DOUBLE.termQuery("field", -0d, true, true).equals(NumberType.DOUBLE.termQuery("field", +0d, true, true))); + assertFalse(NumberType.FLOAT.termQuery("field", -0f, true, true).equals(NumberType.FLOAT.termQuery("field", +0f, true, true))); + assertFalse( + NumberType.HALF_FLOAT.termQuery("field", -0f, true, true).equals(NumberType.HALF_FLOAT.termQuery("field", +0f, true, true)) + ); } // Make sure we construct the IndexOrDocValuesQuery objects with queries that match @@ -627,6 +670,7 @@ public void doTestDocValueRangeQueries(NumberType type, Supplier<Number> valueSu randomBoolean(), randomBoolean(), true, + true, MOCK_QSC ); assertThat(query, instanceOf(IndexOrDocValuesQuery.class)); @@ -707,6 +751,7 @@ public void doTestIndexSortRangeQueries(NumberType type, Supplier<Number> valueS randomBoolean(), randomBoolean(), true, + true, context ); assertThat(query, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)); @@ -798,7 +843,7 @@ static OutOfRangeSpec of(NumberType t, Object v, String m) { public void write(XContentBuilder b) throws IOException { if (value instanceof BigInteger) { - b.rawField("field", new ByteArrayInputStream(value.toString().getBytes("UTF-8")), XContentType.JSON); + b.rawField("field", new ByteArrayInputStream(value.toString().getBytes("UTF-8")), MediaTypeRegistry.JSON); } else { b.field("field", value); } diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 403b157ecd22c..be947935dc4ea 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -32,16 +32,15 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.mapper.ObjectMapper.Dynamic; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Collection; @@ -50,7 +49,7 @@ public class ObjectMapperTests extends OpenSearchSingleNodeTestCase { public void testDifferentInnerObjectTokenFailure() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -75,7 +74,7 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { + " \"value\":\"value\"\n" + " }" ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); }); @@ -83,51 +82,54 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { } public void testEmptyArrayProperties() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").startArray("properties").endArray().endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("properties") + .endArray() + .endObject() + .endObject() + .toString(); createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); } public void testEmptyFieldsArrayMultiFields() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startArray("fields") - .endArray() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startArray("fields") + .endArray() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } public void testFieldsArrayMultiFieldsShouldThrowException() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startArray("fields") - .startObject() - .field("test", "string") - .endObject() - .startObject() - .field("test2", "string") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startArray("fields") + .startObject() + .field("test", "string") + .endObject() + .startObject() + .field("test2", "string") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); try { createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); fail("Expected MapperParsingException"); @@ -138,38 +140,36 @@ public void testFieldsArrayMultiFieldsShouldThrowException() throws Exception { } public void testEmptyFieldsArray() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startArray("fields") - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startArray("fields") + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } public void testFieldsWithFilledArrayShouldThrowException() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startArray("fields") - .startObject() - .field("test", "string") - .endObject() - .startObject() - .field("test2", "string") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startArray("fields") + .startObject() + .field("test", "string") + .endObject() + .startObject() + .field("test2", "string") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); try { createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); fail("Expected MapperParsingException"); @@ -179,16 +179,15 @@ public void testFieldsWithFilledArrayShouldThrowException() throws Exception { } public void testDotAsFieldName() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject(".") - .field("type", "text") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(".") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .toString(); try { createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); @@ -199,78 +198,78 @@ public void testDotAsFieldName() throws Exception { } public void testFieldPropertiesArray() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("tweet") - .startObject("properties") - .startObject("name") - .field("type", "text") - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("tweet") + .startObject("properties") + .startObject("name") + .field("type", "text") + .startObject("fields") + .startObject("raw") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); createIndex("test").mapperService().documentMapperParser().parse("tweet", new CompressedXContent(mapping)); } public void testMerge() throws IOException { MergeReason reason = randomFrom(MergeReason.values()); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("foo") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), reason); assertNull(mapper.root().dynamic()); - String update = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").field("dynamic", "strict").endObject().endObject() - ); + String update = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("dynamic", "strict") + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(update), reason); assertEquals(Dynamic.STRICT, mapper.root().dynamic()); } public void testMergeEnabledForIndexTemplates() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .field("type", "object") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .field("type", "object") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.INDEX_TEMPLATE); assertNull(mapper.root().dynamic()); // If we don't explicitly set 'enabled', then the mapping should not change. - String update = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .field("type", "object") - .field("dynamic", false) - .endObject() - .endObject() - .endObject() - ); + String update = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .field("type", "object") + .field("dynamic", false) + .endObject() + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.INDEX_TEMPLATE); ObjectMapper objectMapper = mapper.objectMappers().get("object"); @@ -278,17 +277,16 @@ public void testMergeEnabledForIndexTemplates() throws IOException { assertFalse(objectMapper.isEnabled()); // Setting 'enabled' to true is allowed, and updates the mapping. - update = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .field("type", "object") - .field("enabled", true) - .endObject() - .endObject() - .endObject() - ); + update = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .field("type", "object") + .field("enabled", true) + .endObject() + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(update), MergeReason.INDEX_TEMPLATE); objectMapper = mapper.objectMappers().get("object"); @@ -298,129 +296,123 @@ public void testMergeEnabledForIndexTemplates() throws IOException { public void testFieldReplacementForIndexTemplates() throws IOException { MapperService mapperService = createIndex("test").mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field1") - .field("type", "keyword") - .endObject() - .startObject("field2") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field1") + .field("type", "keyword") + .endObject() + .startObject("field2") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MergeReason.INDEX_TEMPLATE); - String update = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field2") - .field("type", "integer") - .endObject() - .startObject("field3") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String update = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field2") + .field("type", "integer") + .endObject() + .startObject("field3") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = mapperService.merge( MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(update), MergeReason.INDEX_TEMPLATE ); - String expected = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject(MapperService.SINGLE_MAPPING_NAME) - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field1") - .field("type", "keyword") - .endObject() - .startObject("field2") - .field("type", "integer") - .endObject() - .startObject("field3") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String expected = XContentFactory.jsonBuilder() + .startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field1") + .field("type", "keyword") + .endObject() + .startObject("field2") + .field("type", "integer") + .endObject() + .startObject("field3") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); assertEquals(expected, mapper.mappingSource().toString()); } public void testDisallowFieldReplacementForIndexTemplates() throws IOException { MapperService mapperService = createIndex("test").mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field1") - .field("type", "object") - .endObject() - .startObject("field2") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field1") + .field("type", "object") + .endObject() + .startObject("field2") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MergeReason.INDEX_TEMPLATE); - String firstUpdate = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field2") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String firstUpdate = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field2") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(firstUpdate), MergeReason.INDEX_TEMPLATE) ); assertThat(e.getMessage(), containsString("can't merge a non object mapping [object.field2] with an object mapping")); - String secondUpdate = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("object") - .startObject("properties") - .startObject("field1") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); + String secondUpdate = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("object") + .startObject("properties") + .startObject("field1") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); e = expectThrows( IllegalArgumentException.class, () -> mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(secondUpdate), MergeReason.INDEX_TEMPLATE) @@ -429,18 +421,17 @@ public void testDisallowFieldReplacementForIndexTemplates() throws IOException { } public void testEmptyName() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("") - .startObject("properties") - .startObject("name") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("") + .startObject("properties") + .startObject("name") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); // Empty name not allowed in index created after 5.0 IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { diff --git a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java index 9c135c806ddc4..b7c65e09bcce3 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ParametrizedMapperTests.java @@ -34,15 +34,15 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.opensearch.Version; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -253,7 +253,7 @@ private String toStringWithDefaults(ToXContent value) throws IOException { builder.startObject(); value.toXContent(builder, params); builder.endObject(); - return Strings.toString(builder); + return builder.toString(); } @@ -265,7 +265,7 @@ public void testDefaults() throws IOException { assertTrue(mapper.fixed); assertEquals("default", mapper.variable); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"fixed\":true," + "\"fixed2\":false,\"variable\":\"default\",\"index\":true," @@ -279,7 +279,7 @@ public void testDefaults() throws IOException { public void testMerging() { String mapping = "{\"type\":\"test_mapper\",\"fixed\":false,\"required\":\"value\"}"; TestMapper mapper = fromMapping(mapping); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); TestMapper badMerge = fromMapping("{\"type\":\"test_mapper\",\"fixed\":true,\"fixed2\":true,\"required\":\"value\"}"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapper.merge(badMerge)); @@ -288,16 +288,20 @@ public void testMerging() { + "\tCannot update parameter [fixed2] from [false] to [true]"; assertEquals(expectedError, e.getMessage()); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); // original mapping is unaffected + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); // original + // mapping is + // unaffected // TODO: should we have to include 'fixed' here? Or should updates take as 'defaults' the existing values? TestMapper goodMerge = fromMapping("{\"type\":\"test_mapper\",\"fixed\":false,\"variable\":\"updated\",\"required\":\"value\"}"); TestMapper merged = (TestMapper) mapper.merge(goodMerge); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); // original mapping is unaffected + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); // original + // mapping is + // unaffected assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"fixed\":false,\"variable\":\"updated\",\"required\":\"value\"}}", - Strings.toString(XContentType.JSON, merged) + Strings.toString(MediaTypeRegistry.JSON, merged) ); } @@ -306,7 +310,7 @@ public void testMultifields() { String mapping = "{\"type\":\"test_mapper\",\"variable\":\"foo\",\"required\":\"value\"," + "\"fields\":{\"sub\":{\"type\":\"keyword\"}}}"; TestMapper mapper = fromMapping(mapping); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String addSubField = "{\"type\":\"test_mapper\",\"variable\":\"foo\",\"required\":\"value\"" + ",\"fields\":{\"sub2\":{\"type\":\"keyword\"}}}"; @@ -315,7 +319,7 @@ public void testMultifields() { assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"variable\":\"foo\",\"required\":\"value\"," + "\"fields\":{\"sub\":{\"type\":\"keyword\"},\"sub2\":{\"type\":\"keyword\"}}}}", - Strings.toString(XContentType.JSON, merged) + Strings.toString(MediaTypeRegistry.JSON, merged) ); String badSubField = "{\"type\":\"test_mapper\",\"variable\":\"foo\",\"required\":\"value\"," @@ -329,7 +333,7 @@ public void testMultifields() { public void testCopyTo() { String mapping = "{\"type\":\"test_mapper\",\"variable\":\"foo\",\"required\":\"value\",\"copy_to\":[\"other\"]}"; TestMapper mapper = fromMapping(mapping); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); // On update, copy_to is completely replaced @@ -339,14 +343,14 @@ public void testCopyTo() { TestMapper merged = (TestMapper) mapper.merge(toMerge); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"variable\":\"updated\",\"required\":\"value\"," + "\"copy_to\":[\"foo\",\"bar\"]}}", - Strings.toString(XContentType.JSON, merged) + Strings.toString(MediaTypeRegistry.JSON, merged) ); TestMapper removeCopyTo = fromMapping("{\"type\":\"test_mapper\",\"variable\":\"updated\",\"required\":\"value\"}"); TestMapper noCopyTo = (TestMapper) merged.merge(removeCopyTo); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"variable\":\"updated\",\"required\":\"value\"}}", - Strings.toString(XContentType.JSON, noCopyTo) + Strings.toString(MediaTypeRegistry.JSON, noCopyTo) ); } @@ -357,7 +361,7 @@ public void testNullables() { String fine = "{\"type\":\"test_mapper\",\"variable\":null,\"required\":\"value\"}"; TestMapper mapper = fromMapping(fine); - assertEquals("{\"field\":" + fine + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + fine + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); } public void testObjectSerialization() throws IOException { @@ -372,10 +376,10 @@ public void testObjectSerialization() throws IOException { + "\"is_interim\":{\"type\":\"boolean\"}}}}}}"; MapperService mapperService = createMapperService("_doc", mapping); - assertEquals(mapping, Strings.toString(XContentType.JSON, mapperService.documentMapper())); + assertEquals(mapping, Strings.toString(MediaTypeRegistry.JSON, mapperService.documentMapper())); mapperService.merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); - assertEquals(mapping, Strings.toString(XContentType.JSON, mapperService.documentMapper())); + assertEquals(mapping, Strings.toString(MediaTypeRegistry.JSON, mapperService.documentMapper())); } // test custom serializer @@ -383,7 +387,7 @@ public void testCustomSerialization() { String mapping = "{\"type\":\"test_mapper\",\"wrapper\":\"wrapped value\",\"required\":\"value\"}"; TestMapper mapper = fromMapping(mapping); assertEquals("wrapped value", mapper.wrapper.name); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String conflict = "{\"type\":\"test_mapper\",\"wrapper\":\"new value\",\"required\":\"value\"}"; TestMapper toMerge = fromMapping(conflict); @@ -400,7 +404,7 @@ public void testParameterValidation() { String mapping = "{\"type\":\"test_mapper\",\"int_value\":10,\"required\":\"value\"}"; TestMapper mapper = fromMapping(mapping); assertEquals(10, mapper.intValue); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -417,7 +421,7 @@ public void testDeprecatedParameterName() { assertWarnings("Parameter [fixed2_old] on mapper [field] is deprecated, use [fixed2]"); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"fixed2\":true,\"required\":\"value\"}}", - Strings.toString(XContentType.JSON, mapper) + Strings.toString(MediaTypeRegistry.JSON, mapper) ); } @@ -425,13 +429,13 @@ public void testAnalyzers() { String mapping = "{\"type\":\"test_mapper\",\"analyzer\":\"_standard\",\"required\":\"value\"}"; TestMapper mapper = fromMapping(mapping); assertEquals(mapper.analyzer, Lucene.STANDARD_ANALYZER); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String withDef = "{\"type\":\"test_mapper\",\"analyzer\":\"default\",\"required\":\"value\"}"; mapper = fromMapping(withDef); assertEquals(mapper.analyzer.name(), "default"); assertThat(mapper.analyzer.analyzer(), instanceOf(StandardAnalyzer.class)); - assertEquals("{\"field\":" + withDef + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + withDef + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String badAnalyzer = "{\"type\":\"test_mapper\",\"analyzer\":\"wibble\",\"required\":\"value\"}"; IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> fromMapping(badAnalyzer)); @@ -454,7 +458,7 @@ public void testDeprecatedParameters() { assertFalse(mapper.index); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"index\":false,\"required\":\"value\"}}", - Strings.toString(XContentType.JSON, mapper) + Strings.toString(MediaTypeRegistry.JSON, mapper) ); } @@ -463,20 +467,20 @@ public void testLinkedAnalyzers() throws IOException { TestMapper mapper = fromMapping(mapping); assertEquals("_standard", mapper.analyzer.name()); assertEquals("_standard", mapper.searchAnalyzer.name()); - assertEquals("{\"field\":" + mapping + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mapping + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String mappingWithSA = "{\"type\":\"test_mapper\",\"search_analyzer\":\"_standard\",\"required\":\"value\"}"; mapper = fromMapping(mappingWithSA); assertEquals("_keyword", mapper.analyzer.name()); assertEquals("_standard", mapper.searchAnalyzer.name()); - assertEquals("{\"field\":" + mappingWithSA + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mappingWithSA + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); String mappingWithBoth = "{\"type\":\"test_mapper\",\"analyzer\":\"default\"," + "\"search_analyzer\":\"_standard\",\"required\":\"value\"}"; mapper = fromMapping(mappingWithBoth); assertEquals("default", mapper.analyzer.name()); assertEquals("_standard", mapper.searchAnalyzer.name()); - assertEquals("{\"field\":" + mappingWithBoth + "}", Strings.toString(XContentType.JSON, mapper)); + assertEquals("{\"field\":" + mappingWithBoth + "}", Strings.toString(MediaTypeRegistry.JSON, mapper)); // we've configured things so that search_analyzer is only output when different from // analyzer, no matter what the value of `include_defaults` is @@ -487,7 +491,7 @@ public void testLinkedAnalyzers() throws IOException { assertEquals("default", mapper.searchAnalyzer.name()); assertEquals( "{\"field\":{\"type\":\"test_mapper\",\"analyzer\":\"default\",\"required\":\"value\"}}", - Strings.toString(XContentType.JSON, mapper) + Strings.toString(MediaTypeRegistry.JSON, mapper) ); assertEquals( diff --git a/server/src/test/java/org/opensearch/index/mapper/PathMatchDynamicTemplateTests.java b/server/src/test/java/org/opensearch/index/mapper/PathMatchDynamicTemplateTests.java index 36f63f9d90ab0..1fed040a449e8 100644 --- a/server/src/test/java/org/opensearch/index/mapper/PathMatchDynamicTemplateTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/PathMatchDynamicTemplateTests.java @@ -34,7 +34,7 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.ParseContext.Document; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -47,17 +47,17 @@ public class PathMatchDynamicTemplateTests extends OpenSearchSingleNodeTestCase public void testSimple() throws Exception { String mapping = copyToStringFromClasspath("/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json"); IndexService index = createIndex("test"); - client().admin().indices().preparePutMapping("test").setSource(mapping, XContentType.JSON).get(); + client().admin().indices().preparePutMapping("test").setSource(mapping, MediaTypeRegistry.JSON).get(); MapperService mapperService = index.mapperService(); byte[] json = copyToBytesFromClasspath("/org/opensearch/index/mapper/dynamictemplate/pathmatch/test-data.json"); ParsedDocument parsedDoc = mapperService.documentMapper() - .parse(new SourceToParse("test", "1", new BytesArray(json), XContentType.JSON)); + .parse(new SourceToParse("test", "1", new BytesArray(json), MediaTypeRegistry.JSON)); client().admin() .indices() .preparePutMapping("test") - .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON) + .setSource(parsedDoc.dynamicMappingsUpdate().toString(), MediaTypeRegistry.JSON) .get(); Document doc = parsedDoc.rootDoc(); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java index 448bb272872ee..331bfb7b2ddf4 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java @@ -36,11 +36,10 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.Strings; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.mapper.MapperService.MergeReason; import java.io.IOException; @@ -161,7 +160,7 @@ private XContentBuilder rangeFieldMapping(String type, CheckedConsumer<XContentB public void doTestDefaults(String type) throws Exception { XContentBuilder mapping = rangeFieldMapping(type, b -> {}); DocumentMapper mapper = createDocumentMapper(mapping); - assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + assertEquals(mapping.toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse( source(b -> b.startObject("field").field(getFromField(), getFrom(type)).field(getToField(), getTo(type)).endObject()) @@ -358,11 +357,16 @@ public void testSerializeDefaults() throws Exception { RangeFieldMapper mapper = (RangeFieldMapper) docMapper.root().getMapper("field"); XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); mapper.doXContentBody(builder, true, ToXContent.EMPTY_PARAMS); - String got = Strings.toString(builder.endObject()); + String got = builder.endObject().toString(); // if type is date_range we check that the mapper contains the default format and locale // otherwise it should not contain a locale or format - assertTrue(got, got.contains("\"format\":\"strict_date_optional_time||epoch_millis\"") == type.equals("date_range")); + assertTrue( + got, + got.contains("\"format\":\"strict_date_time_no_millis||strict_date_optional_time||epoch_millis\"") == type.equals( + "date_range" + ) + ); assertTrue(got, got.contains("\"locale\":" + "\"" + Locale.ROOT + "\"") == type.equals("date_range")); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index a8245627c6930..9dea7e13ac45e 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.time.DateMathParser; @@ -72,22 +71,20 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge( "_doc", new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping( - INTEGER_RANGE_FIELD_NAME, - "type=integer_range", - LONG_RANGE_FIELD_NAME, - "type=long_range", - FLOAT_RANGE_FIELD_NAME, - "type=float_range", - DOUBLE_RANGE_FIELD_NAME, - "type=double_range", - DATE_RANGE_FIELD_NAME, - "type=date_range", - IP_RANGE_FIELD_NAME, - "type=ip_range" - ) - ) + PutMappingRequest.simpleMapping( + INTEGER_RANGE_FIELD_NAME, + "type=integer_range", + LONG_RANGE_FIELD_NAME, + "type=long_range", + FLOAT_RANGE_FIELD_NAME, + "type=float_range", + DOUBLE_RANGE_FIELD_NAME, + "type=double_range", + DATE_RANGE_FIELD_NAME, + "type=date_range", + IP_RANGE_FIELD_NAME, + "type=ip_range" + ).toString() ), MapperService.MergeReason.MAPPING_UPDATE ); diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index 668666a53cd7c..00b48240d0567 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -265,7 +265,9 @@ public void testDateRangeQueryUsingMappingFormat() { ); assertThat( ex.getMessage(), - containsString("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]") + containsString( + "failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" + ) ); // setting mapping format which is compatible with those dates @@ -275,12 +277,12 @@ public void testDateRangeQueryUsingMappingFormat() { RangeFieldType fieldType = new RangeFieldType("field", formatter); final Query query = fieldType.rangeQuery(from, to, true, true, relation, null, fieldType.dateMathParser(), context); - assertEquals("field:<ranges:[1465975790000 : 1466062190999]>", query.toString()); + assertEquals("field:<ranges:[1465975790000 : 1466062190999]>", ((IndexOrDocValuesQuery) query).getIndexQuery().toString()); // compare lower and upper bounds with what we would get on a `date` field DateFieldType dateFieldType = new DateFieldType("field", DateFieldMapper.Resolution.MILLISECONDS, formatter); final Query queryOnDateField = dateFieldType.rangeQuery(from, to, true, true, relation, null, fieldType.dateMathParser(), context); - assertEquals("field:[1465975790000 TO 1466062190999]", queryOnDateField.toString()); + assertEquals("field:[1465975790000 TO 1466062190999]", ((IndexOrDocValuesQuery) queryOnDateField).getIndexQuery().toString()); } /** diff --git a/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java index d6c0bd0f2e5fd..054ff2ff8bbc6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RootObjectMapperTests.java @@ -32,10 +32,9 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -48,156 +47,166 @@ public class RootObjectMapperTests extends OpenSearchSingleNodeTestCase { public void testNumericDetection() throws Exception { MergeReason reason = randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.INDEX_TEMPLATE); - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").field("numeric_detection", false).endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("numeric_detection", false) + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), reason); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").field("numeric_detection", true).endObject().endObject() - ); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("numeric_detection", true) + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping2), reason); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change - String mapping3 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping3 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping3), reason); assertEquals(mapping2, mapper.mappingSource().toString()); } public void testDateDetection() throws Exception { MergeReason reason = randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.INDEX_TEMPLATE); - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").field("date_detection", true).endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("date_detection", true) + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), reason); assertEquals(mapping, mapper.mappingSource().toString()); // update with a different explicit value - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").field("date_detection", false).endObject().endObject() - ); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("date_detection", false) + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping2), reason); assertEquals(mapping2, mapper.mappingSource().toString()); // update with an implicit value: no change - String mapping3 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping3 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping3), reason); assertEquals(mapping2, mapper.mappingSource().toString()); } public void testDateFormatters() throws Exception { MergeReason reason = randomFrom(MergeReason.MAPPING_UPDATE, MergeReason.INDEX_TEMPLATE); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .field("dynamic_date_formats", Arrays.asList("yyyy-MM-dd")) - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("dynamic_date_formats", Arrays.asList("yyyy-MM-dd")) + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), reason); assertEquals(mapping, mapper.mappingSource().toString()); // no update if formatters are not set explicitly - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping2), reason); assertEquals(mapping, mapper.mappingSource().toString()); - String mapping3 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .field("dynamic_date_formats", Arrays.asList()) - .endObject() - .endObject() - ); + String mapping3 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("dynamic_date_formats", Arrays.asList()) + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping3), reason); assertEquals(mapping3, mapper.mappingSource().toString()); } public void testDynamicTemplates() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startArray("dynamic_templates") - .startObject() - .startObject("my_template") - .field("match_mapping_type", "string") - .startObject("mapping") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endArray() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("dynamic_templates") + .startObject() + .startObject("my_template") + .field("match_mapping_type", "string") + .startObject("mapping") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endArray() + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); // no update if templates are not set explicitly - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); assertEquals(mapping, mapper.mappingSource().toString()); - String mapping3 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .field("dynamic_templates", Arrays.asList()) - .endObject() - .endObject() - ); + String mapping3 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .field("dynamic_templates", Arrays.asList()) + .endObject() + .endObject() + .toString(); mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE); assertEquals(mapping3, mapper.mappingSource().toString()); } public void testDynamicTemplatesForIndexTemplate() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startArray("dynamic_templates") - .startObject() - .startObject("first_template") - .field("path_match", "first") - .startObject("mapping") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .startObject() - .startObject("second_template") - .field("path_match", "second") - .startObject("mapping") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endArray() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startArray("dynamic_templates") + .startObject() + .startObject("first_template") + .field("path_match", "first") + .startObject("mapping") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject() + .startObject("second_template") + .field("path_match", "second") + .startObject("mapping") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endArray() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MergeReason.INDEX_TEMPLATE); // There should be no update if templates are not set. - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field") - .field("type", "integer") - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper mapper = mapperService.merge( MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), @@ -212,29 +221,28 @@ public void testDynamicTemplatesForIndexTemplate() throws IOException { assertEquals("second", templates[1].pathMatch()); // Dynamic templates should be appended and deduplicated. - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startArray("dynamic_templates") - .startObject() - .startObject("third_template") - .field("path_match", "third") - .startObject("mapping") - .field("type", "integer") - .endObject() - .endObject() - .endObject() - .startObject() - .startObject("second_template") - .field("path_match", "second_updated") - .startObject("mapping") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endArray() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startArray("dynamic_templates") + .startObject() + .startObject("third_template") + .field("path_match", "third") + .startObject("mapping") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .startObject() + .startObject("second_template") + .field("path_match", "second_updated") + .startObject("mapping") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endArray() + .endObject() + .toString(); mapper = mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MergeReason.INDEX_TEMPLATE); templates = mapper.root().dynamicTemplates(); @@ -248,30 +256,28 @@ public void testDynamicTemplatesForIndexTemplate() throws IOException { } public void testIllegalFormatField() throws Exception { - String dynamicMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startArray("dynamic_date_formats") - .startArray() - .value("test_format") - .endArray() - .endArray() - .endObject() - .endObject() - ); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startArray("date_formats") - .startArray() - .value("test_format") - .endArray() - .endArray() - .endObject() - .endObject() - ); + String dynamicMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("dynamic_date_formats") + .startArray() + .value("test_format") + .endArray() + .endArray() + .endObject() + .endObject() + .toString(); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startArray("date_formats") + .startArray() + .value("test_format") + .endArray() + .endArray() + .endObject() + .endObject() + .toString(); DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); for (String m : Arrays.asList(mapping, dynamicMapping)) { @@ -284,15 +290,14 @@ public void testIllegalFormatField() throws Exception { } public void testIllegalDynamicTemplates() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("dynamic_templates") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("dynamic_templates") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping))); @@ -320,7 +325,7 @@ public void testIllegalDynamicTemplateUnknownFieldType() throws Exception { } mapping.endObject(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(Strings.toString(mapping)), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping.toString()), MergeReason.MAPPING_UPDATE); assertThat(mapper.mappingSource().toString(), containsString("\"type\":\"string\"")); assertWarnings( "dynamic template [my_template1] has invalid content [{\"match_mapping_type\":\"string\",\"mapping\":{\"type\":" @@ -350,7 +355,7 @@ public void testIllegalDynamicTemplateUnknownAttribute() throws Exception { } mapping.endObject(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(Strings.toString(mapping)), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping.toString()), MergeReason.MAPPING_UPDATE); assertThat(mapper.mappingSource().toString(), containsString("\"foo\":\"bar\"")); assertWarnings( "dynamic template [my_template2] has invalid content [{\"match_mapping_type\":\"string\",\"mapping\":{" @@ -381,7 +386,7 @@ public void testIllegalDynamicTemplateInvalidAttribute() throws Exception { } mapping.endObject(); MapperService mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(Strings.toString(mapping)), MergeReason.MAPPING_UPDATE); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping.toString()), MergeReason.MAPPING_UPDATE); assertThat(mapper.mappingSource().toString(), containsString("\"analyzer\":\"foobar\"")); assertWarnings( "dynamic template [my_template3] has invalid content [{\"match_mapping_type\":\"string\",\"mapping\":{" @@ -418,11 +423,7 @@ public void testIllegalDynamicTemplateNoMappingType() throws Exception { } mapping.endObject(); mapperService = createIndex("test").mapperService(); - DocumentMapper mapper = mapperService.merge( - "type", - new CompressedXContent(Strings.toString(mapping)), - MergeReason.MAPPING_UPDATE - ); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping.toString()), MergeReason.MAPPING_UPDATE); assertThat(mapper.mappingSource().toString(), containsString("\"index_phrases\":true")); } { @@ -452,11 +453,7 @@ public void testIllegalDynamicTemplateNoMappingType() throws Exception { } mapping.endObject(); - DocumentMapper mapper = mapperService.merge( - "type", - new CompressedXContent(Strings.toString(mapping)), - MergeReason.MAPPING_UPDATE - ); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping.toString()), MergeReason.MAPPING_UPDATE); assertThat(mapper.mappingSource().toString(), containsString("\"foo\":\"bar\"")); if (useMatchMappingType) { assertWarnings( diff --git a/server/src/test/java/org/opensearch/index/mapper/RoutingFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RoutingFieldMapperTests.java index 58029df33a2ce..561a35efc6d18 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RoutingFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RoutingFieldMapperTests.java @@ -32,11 +32,10 @@ package org.opensearch.index.mapper; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchSingleNodeTestCase; import static org.hamcrest.Matchers.containsString; @@ -45,7 +44,7 @@ public class RoutingFieldMapperTests extends OpenSearchSingleNodeTestCase { public void testRoutingMapper() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -55,7 +54,7 @@ public void testRoutingMapper() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), - XContentType.JSON, + MediaTypeRegistry.JSON, "routing_value" ) ); @@ -65,7 +64,7 @@ public void testRoutingMapper() throws Exception { } public void testIncludeInObjectNotAllowed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper docMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); @@ -76,7 +75,7 @@ public void testIncludeInObjectNotAllowed() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("_routing", "foo").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); fail("Expected failure to parse metadata field"); diff --git a/server/src/test/java/org/opensearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/SourceFieldMapperTests.java index 518e5e880f4f8..83d42fd423f08 100644 --- a/server/src/test/java/org/opensearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/SourceFieldMapperTests.java @@ -33,18 +33,17 @@ package org.opensearch.index.mapper; import org.apache.lucene.index.IndexableField; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.Collection; import java.util.Map; @@ -60,9 +59,14 @@ protected Collection<Class<? extends Plugin>> getPlugins() { } public void testNoFormat() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_source").endObject().endObject().endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(mapping)); @@ -71,11 +75,11 @@ public void testNoFormat() throws Exception { "test", "1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().field("field", "value").endObject()), - XContentType.JSON + MediaTypeRegistry.JSON ) ); - assertThat(XContentFactory.xContentType(doc.source().toBytesRef().bytes), equalTo(XContentType.JSON)); + assertThat(MediaTypeRegistry.xContent(doc.source().toBytesRef().bytes), equalTo(MediaTypeRegistry.JSON)); documentMapper = parser.parse("type", new CompressedXContent(mapping)); doc = documentMapper.parse( @@ -87,20 +91,19 @@ public void testNoFormat() throws Exception { ) ); - assertThat(XContentHelper.xContentType(doc.source()), equalTo(XContentType.SMILE)); + assertThat(MediaTypeRegistry.xContentType(doc.source()), equalTo(XContentType.SMILE)); } public void testIncludes() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("includes", new String[] { "path1*" }) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("includes", new String[] { "path1*" }) + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper documentMapper = createIndex("test").mapperService() .documentMapperParser() @@ -121,7 +124,7 @@ public void testIncludes() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -135,16 +138,15 @@ public void testIncludes() throws Exception { } public void testExcludes() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("excludes", "path1*") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("excludes", "path1*") + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper documentMapper = createIndex("test").mapperService() .documentMapperParser() @@ -165,7 +167,7 @@ public void testExcludes() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); @@ -181,59 +183,55 @@ public void testExcludes() throws Exception { public void testEnabledNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); // using default of true - String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(mapping1, mapping2, parser, "Cannot update parameter [enabled] from [true] to [false]"); // not changing is ok - String mapping3 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .field("enabled", true) - .endObject() - .endObject() - .endObject() - ); + String mapping3 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", true) + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(mapping1, mapping3, parser); } public void testIncludesNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping1 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("includes", "foo.*") - .endObject() - .endObject() - .endObject() - ); + String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); + String mapping1 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("includes", "foo.*") + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(defaultMapping, mapping1, parser, "Cannot update parameter [includes] from [[]] to [[foo.*]]"); assertConflicts(mapping1, defaultMapping, parser, "Cannot update parameter [includes] from [[foo.*]] to [[]]"); - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("includes", "foo.*", "bar.*") - .endObject() - .endObject() - .endObject() - ); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("includes", "foo.*", "bar.*") + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(mapping1, mapping2, parser, "Cannot update parameter [includes] from [[foo.*]] to [[foo.*, bar.*]]"); // not changing is ok @@ -242,30 +240,28 @@ public void testIncludesNotUpdateable() throws Exception { public void testExcludesNotUpdateable() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - String defaultMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); - String mapping1 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("excludes", "foo.*") - .endObject() - .endObject() - .endObject() - ); + String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); + String mapping1 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("excludes", "foo.*") + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(defaultMapping, mapping1, parser, "Cannot update parameter [excludes] from [[]] to [[foo.*]]"); assertConflicts(mapping1, defaultMapping, parser, "Cannot update parameter [excludes] from [[foo.*]] to [[]]"); - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("excludes", "foo.*", "bar.*") - .endObject() - .endObject() - .endObject() - ); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("excludes", "foo.*", "bar.*") + .endObject() + .endObject() + .endObject() + .toString(); assertConflicts(mapping1, mapping2, parser, "Cannot update parameter [excludes]"); // not changing is ok @@ -274,55 +270,52 @@ public void testExcludesNotUpdateable() throws Exception { public void testComplete() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); assertTrue(parser.parse("type", new CompressedXContent(mapping)).sourceMapper().isComplete()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .field("enabled", false) - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .field("enabled", false) + .endObject() + .endObject() + .endObject() + .toString(); assertFalse(parser.parse("type", new CompressedXContent(mapping)).sourceMapper().isComplete()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("includes", "foo.*") - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("includes", "foo.*") + .endObject() + .endObject() + .endObject() + .toString(); assertFalse(parser.parse("type", new CompressedXContent(mapping)).sourceMapper().isComplete()); - mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("_source") - .array("excludes", "foo.*") - .endObject() - .endObject() - .endObject() - ); + mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("_source") + .array("excludes", "foo.*") + .endObject() + .endObject() + .endObject() + .toString(); assertFalse(parser.parse("type", new CompressedXContent(mapping)).sourceMapper().isComplete()); } public void testSourceObjectContainsExtraTokens() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject()); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().toString(); DocumentMapper documentMapper = createIndex("test").mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); try { - documentMapper.parse(new SourceToParse("test", "1", new BytesArray("{}}"), XContentType.JSON)); // extra end object - // (invalid JSON) + documentMapper.parse(new SourceToParse("test", "1", new BytesArray("{}}"), MediaTypeRegistry.JSON)); // extra end object + // (invalid JSON) fail("Expected parse exception"); } catch (MapperParsingException e) { assertNotNull(e.getRootCause()); diff --git a/server/src/test/java/org/opensearch/index/mapper/StoredNumericValuesTests.java b/server/src/test/java/org/opensearch/index/mapper/StoredNumericValuesTests.java index 0984cfb2d1755..cebd92a280556 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StoredNumericValuesTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StoredNumericValuesTests.java @@ -37,13 +37,12 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.ByteBuffersDirectory; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.fieldvisitor.CustomFieldsVisitor; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -57,59 +56,58 @@ public class StoredNumericValuesTests extends OpenSearchSingleNodeTestCase { public void testBytesAndNumericRepresentation() throws Exception { IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field1") - .field("type", "byte") - .field("store", true) - .endObject() - .startObject("field2") - .field("type", "short") - .field("store", true) - .endObject() - .startObject("field3") - .field("type", "integer") - .field("store", true) - .endObject() - .startObject("field4") - .field("type", "float") - .field("store", true) - .endObject() - .startObject("field5") - .field("type", "long") - .field("store", true) - .endObject() - .startObject("field6") - .field("type", "double") - .field("store", true) - .endObject() - .startObject("field7") - .field("type", "ip") - .field("store", true) - .endObject() - .startObject("field8") - .field("type", "ip") - .field("store", true) - .endObject() - .startObject("field9") - .field("type", "date") - .field("store", true) - .endObject() - .startObject("field10") - .field("type", "boolean") - .field("store", true) - .endObject() - .startObject("field11") - .field("type", "unsigned_long") - .field("store", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field1") + .field("type", "byte") + .field("store", true) + .endObject() + .startObject("field2") + .field("type", "short") + .field("store", true) + .endObject() + .startObject("field3") + .field("type", "integer") + .field("store", true) + .endObject() + .startObject("field4") + .field("type", "float") + .field("store", true) + .endObject() + .startObject("field5") + .field("type", "long") + .field("store", true) + .endObject() + .startObject("field6") + .field("type", "double") + .field("store", true) + .endObject() + .startObject("field7") + .field("type", "ip") + .field("store", true) + .endObject() + .startObject("field8") + .field("type", "ip") + .field("store", true) + .endObject() + .startObject("field9") + .field("type", "date") + .field("store", true) + .endObject() + .startObject("field10") + .field("type", "boolean") + .field("store", true) + .endObject() + .startObject("field11") + .field("type", "unsigned_long") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); @@ -137,7 +135,7 @@ public void testBytesAndNumericRepresentation() throws Exception { .field("field11", "1") .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java index 93bed729f0974..83a3bdc580ae6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java @@ -59,6 +59,9 @@ import static org.mockito.Mockito.when; public class TextFieldAnalyzerModeTests extends OpenSearchTestCase { + ParametrizedFieldMapper.TypeParser getTypeParser() { + return TextFieldMapper.PARSER; + } private static Map<String, NamedAnalyzer> defaultAnalyzers() { Map<String, NamedAnalyzer> analyzers = new HashMap<>(); @@ -101,7 +104,7 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); + getTypeParser().parse("field", fieldNode, parserContext); // check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME); @@ -110,7 +113,7 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); fieldNode.put("analyzer", "my_analyzer"); - MapperException ex = expectThrows(MapperException.class, () -> { TextFieldMapper.PARSER.parse("name", fieldNode, parserContext); }); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("name", fieldNode, parserContext); }); assertThat( ex.getMessage(), containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run") @@ -136,7 +139,7 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("textField", fieldNode, parserContext); + getTypeParser().parse("textField", fieldNode, parserContext); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked mode = AnalysisMode.INDEX_TIME; @@ -151,10 +154,7 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { if (settingToTest.equals("search_quote_analyzer")) { fieldNode.put("search_analyzer", "standard"); } - MapperException ex = expectThrows( - MapperException.class, - () -> { TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); } - ); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("field", fieldNode, parserContext); }); assertEquals( "analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in search time mode.", ex.getMessage() @@ -174,10 +174,7 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode))); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - MapperException ex = expectThrows( - MapperException.class, - () -> { TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); } - ); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("field", fieldNode, parserContext); }); assertThat( ex.getMessage(), containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run") @@ -193,7 +190,6 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); + getTypeParser().parse("field", fieldNode, parserContext); } - } diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java index 0bd47f9e7e4c1..a22bfa5e845b1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java @@ -33,10 +33,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.CannedTokenStream; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.analysis.StopFilter; -import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; @@ -62,13 +59,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.CannedTokenStream; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.CharFilterFactory; @@ -82,6 +81,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.search.MatchQuery; +import org.junit.Before; import java.io.IOException; import java.util.Arrays; @@ -96,6 +96,13 @@ public class TextFieldMapperTests extends MapperTestCase { + public String textFieldName = "text"; + + @Before + public void setup() { + textFieldName = "text"; + } + @Override protected void writeFieldValue(XContentBuilder builder) throws IOException { builder.value(1234); @@ -170,30 +177,34 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("index", b -> b.field("index", false)); checker.registerConflictCheck("store", b -> b.field("store", true)); - checker.registerConflictCheck("index_phrases", b -> b.field("index_phrases", true)); - checker.registerConflictCheck("index_prefixes", b -> b.startObject("index_prefixes").endObject()); - checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs")); + if (!textFieldName.equals("match_only_text")) { + checker.registerConflictCheck("index_phrases", b -> b.field("index_phrases", true)); + checker.registerConflictCheck("index_prefixes", b -> b.startObject("index_prefixes").endObject()); + checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs")); + } checker.registerConflictCheck("similarity", b -> b.field("similarity", "boolean")); checker.registerConflictCheck("analyzer", b -> b.field("analyzer", "keyword")); checker.registerConflictCheck("term_vector", b -> b.field("term_vector", "yes")); checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10)); - // norms can be set from true to false, but not vice versa - checker.registerConflictCheck("norms", fieldMapping(b -> { - b.field("type", "text"); - b.field("norms", false); - }), fieldMapping(b -> { - b.field("type", "text"); - b.field("norms", true); - })); - checker.registerUpdateCheck(b -> { - b.field("type", "text"); - b.field("norms", true); - }, b -> { - b.field("type", "text"); - b.field("norms", false); - }, m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())); + if (!textFieldName.equals(MatchOnlyTextFieldMapper.CONTENT_TYPE)) { + // norms can be set from true to false, but not vice versa + checker.registerConflictCheck("norms", fieldMapping(b -> { + b.field("type", textFieldName); + b.field("norms", false); + }), fieldMapping(b -> { + b.field("type", textFieldName); + b.field("norms", true); + })); + checker.registerUpdateCheck(b -> { + b.field("type", textFieldName); + b.field("norms", true); + }, b -> { + b.field("type", textFieldName); + b.field("norms", false); + }, m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())); + } checker.registerUpdateCheck(b -> b.field("boost", 2.0), m -> assertEquals(m.fieldType().boost(), 2.0, 0)); @@ -238,12 +249,12 @@ public TokenStream create(TokenStream tokenStream) { @Override protected void minimalMapping(XContentBuilder b) throws IOException { - b.field("type", "text"); + b.field("type", textFieldName); } public void testDefaults() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); @@ -263,7 +274,7 @@ public void testDefaults() throws IOException { public void testBWCSerialization() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> { - b.field("type", "text"); + b.field("type", textFieldName); b.field("fielddata", true); b.startObject("fields"); { @@ -308,12 +319,12 @@ public void testBWCSerialization() throws IOException { + "\"similarity\":\"BM25\",\"eager_global_ordinals\":true,\"position_increment_gap\":0," + "\"fielddata\":true,\"fielddata_frequency_filter\":{\"min\":0.001,\"max\":0.1,\"min_segment_size\":500}," + "\"index_prefixes\":{\"min_chars\":1,\"max_chars\":10},\"index_phrases\":true}}}}", - Strings.toString(XContentType.JSON, mapperService.documentMapper()) + Strings.toString(MediaTypeRegistry.JSON, mapperService.documentMapper()) ); } public void testEnableStore() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("store", true))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("store", true))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -321,14 +332,14 @@ public void testEnableStore() throws IOException { } public void testDisableIndex() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("index", false))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("index", false))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); } public void testDisableNorms() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("norms", false))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("norms", false))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -342,14 +353,14 @@ public void testIndexOptions() throws IOException { supportedOptions.put("positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); supportedOptions.put("offsets", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); + XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); for (String option : supportedOptions.keySet()) { - mapping.startObject(option).field("type", "text").field("index_options", option).endObject(); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); } mapping.endObject().endObject().endObject(); DocumentMapper mapper = createDocumentMapper(mapping); - String serialized = Strings.toString(XContentType.JSON, mapper); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper); assertThat(serialized, containsString("\"offsets\":{\"type\":\"text\",\"index_options\":\"offsets\"}")); assertThat(serialized, containsString("\"freqs\":{\"type\":\"text\",\"index_options\":\"freqs\"}")); assertThat(serialized, containsString("\"docs\":{\"type\":\"text\",\"index_options\":\"docs\"}")); @@ -390,7 +401,7 @@ public void testDefaultPositionIncrementGap() throws IOException { public void testPositionIncrementGap() throws IOException { final int positionIncrementGap = randomIntBetween(1, 1000); MapperService mapperService = createMapperService( - fieldMapping(b -> b.field("type", "text").field("position_increment_gap", positionIncrementGap)) + fieldMapping(b -> b.field("type", textFieldName).field("position_increment_gap", positionIncrementGap)) ); ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b" }))); @@ -410,26 +421,26 @@ public void testPositionIncrementGap() throws IOException { public void testSearchAnalyzerSerialization() throws IOException { XContentBuilder mapping = fieldMapping( - b -> b.field("type", "text").field("analyzer", "standard").field("search_analyzer", "keyword") + b -> b.field("type", textFieldName).field("analyzer", "standard").field("search_analyzer", "keyword") ); - assertEquals(Strings.toString(mapping), createDocumentMapper(mapping).mappingSource().toString()); + assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); // special case: default index analyzer - mapping = fieldMapping(b -> b.field("type", "text").field("analyzer", "default").field("search_analyzer", "keyword")); - assertEquals(Strings.toString(mapping), createDocumentMapper(mapping).mappingSource().toString()); + mapping = fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "default").field("search_analyzer", "keyword")); + assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); // special case: default search analyzer - mapping = fieldMapping(b -> b.field("type", "text").field("analyzer", "keyword").field("search_analyzer", "default")); - assertEquals(Strings.toString(mapping), createDocumentMapper(mapping).mappingSource().toString()); + mapping = fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "keyword").field("search_analyzer", "default")); + assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); - XContentBuilder builder = XContentFactory.jsonBuilder(); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); createDocumentMapper(fieldMapping(this::minimalMapping)).toXContent( builder, new ToXContent.MapParams(Collections.singletonMap("include_defaults", "true")) ); builder.endObject(); - String mappingString = Strings.toString(builder); + String mappingString = builder.toString(); assertTrue(mappingString.contains("analyzer")); assertTrue(mappingString.contains("search_analyzer")); assertTrue(mappingString.contains("search_quote_analyzer")); @@ -437,47 +448,47 @@ public void testSearchAnalyzerSerialization() throws IOException { public void testSearchQuoteAnalyzerSerialization() throws IOException { XContentBuilder mapping = fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .field("search_analyzer", "standard") .field("search_quote_analyzer", "keyword") ); - assertEquals(Strings.toString(mapping), createDocumentMapper(mapping).mappingSource().toString()); + assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); // special case: default index/search analyzer mapping = fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "default") .field("search_analyzer", "default") .field("search_quote_analyzer", "keyword") ); - assertEquals(Strings.toString(mapping), createDocumentMapper(mapping).mappingSource().toString()); + assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); } public void testTermVectors() throws IOException { XContentBuilder mapping = mapping( b -> b.startObject("field1") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "no") .endObject() .startObject("field2") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "yes") .endObject() .startObject("field3") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_offsets") .endObject() .startObject("field4") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions") .endObject() .startObject("field5") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions_offsets") .endObject() .startObject("field6") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions_offsets_payloads") .endObject() ); @@ -527,7 +538,9 @@ public void testTermVectors() throws IOException { } public void testEagerGlobalOrdinals() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("eager_global_ordinals", true))); + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", textFieldName).field("eager_global_ordinals", true)) + ); FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); assertTrue(fieldMapper.fieldType().eagerGlobalOrdinals()); @@ -540,13 +553,13 @@ public void testFielddata() throws IOException { })); assertThat(e.getMessage(), containsString("Text fields are not optimised for operations that require per-document field data")); - MapperService enabledMapper = createMapperService(fieldMapping(b -> b.field("type", "text").field("fielddata", true))); + MapperService enabledMapper = createMapperService(fieldMapping(b -> b.field("type", textFieldName).field("fielddata", true))); enabledMapper.fieldType("field").fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); }); // no exception // this time e = expectThrows( MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "text").field("index", false).field("fielddata", true))) + () -> createMapperService(fieldMapping(b -> b.field("type", textFieldName).field("index", false).field("fielddata", true))) ); assertThat(e.getMessage(), containsString("Cannot enable fielddata on a [text] field that is not indexed")); } @@ -554,7 +567,7 @@ public void testFielddata() throws IOException { public void testFrequencyFilter() throws IOException { MapperService mapperService = createMapperService( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("fielddata", true) .startObject("fielddata_frequency_filter") .field("min", 2d) @@ -572,17 +585,22 @@ public void testFrequencyFilter() throws IOException { public void testNullConfigValuesFail() throws MapperParsingException { Exception e = expectThrows( MapperParsingException.class, - () -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("analyzer", (String) null))) + () -> createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("analyzer", (String) null))) + ); + assertThat( + e.getMessage(), + containsString("[analyzer] on mapper [field] of type [" + textFieldName + "] must not have a [null] value") ); - assertThat(e.getMessage(), containsString("[analyzer] on mapper [field] of type [text] must not have a [null] value")); } public void testNotIndexedFieldPositionIncrement() { Exception e = expectThrows( MapperParsingException.class, - () -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("index", false).field("position_increment_gap", 10))) + () -> createDocumentMapper( + fieldMapping(b -> b.field("type", textFieldName).field("index", false).field("position_increment_gap", 10)) + ) ); - assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled")); + assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field]")); } public void testAnalyzedFieldPositionIncrementWithoutPositions() { @@ -590,7 +608,9 @@ public void testAnalyzedFieldPositionIncrementWithoutPositions() { Exception e = expectThrows( MapperParsingException.class, () -> createDocumentMapper( - fieldMapping(b -> b.field("type", "text").field("index_options", indexOptions).field("position_increment_gap", 10)) + fieldMapping( + b -> b.field("type", textFieldName).field("index_options", indexOptions).field("position_increment_gap", 10) + ) ) ); assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled")); @@ -601,7 +621,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -616,7 +636,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -633,7 +653,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -650,7 +670,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -667,7 +687,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -683,62 +703,18 @@ public void testIndexPrefixIndexTypes() throws IOException { } public void testNestedIndexPrefixes() throws IOException { - { - MapperService mapperService = createMapperService( - mapping( - b -> b.startObject("object") - .field("type", "object") - .startObject("properties") - .startObject("field") - .field("type", "text") - .startObject("index_prefixes") - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - MappedFieldType textField = mapperService.fieldType("object.field"); - assertNotNull(textField); - assertThat(textField, instanceOf(TextFieldType.class)); - MappedFieldType prefix = ((TextFieldType) textField).getPrefixFieldType(); - assertEquals(prefix.name(), "object.field._index_prefix"); - FieldMapper mapper = (FieldMapper) mapperService.documentMapper().mappers().getMapper("object.field._index_prefix"); - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, mapper.fieldType.indexOptions()); - assertFalse(mapper.fieldType.storeTermVectorOffsets()); - } - { - MapperService mapperService = createMapperService( - mapping( - b -> b.startObject("body") - .field("type", "text") - .startObject("fields") - .startObject("with_prefix") - .field("type", "text") - .startObject("index_prefixes") - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - MappedFieldType textField = mapperService.fieldType("body.with_prefix"); - assertNotNull(textField); - assertThat(textField, instanceOf(TextFieldType.class)); - MappedFieldType prefix = ((TextFieldType) textField).getPrefixFieldType(); - assertEquals(prefix.name(), "body.with_prefix._index_prefix"); - FieldMapper mapper = (FieldMapper) mapperService.documentMapper().mappers().getMapper("body.with_prefix._index_prefix"); - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, mapper.fieldType.indexOptions()); - assertFalse(mapper.fieldType.storeTermVectorOffsets()); - } } public void testFastPhraseMapping() throws IOException { MapperService mapperService = createMapperService(mapping(b -> { - b.startObject("field").field("type", "text").field("analyzer", "my_stop_analyzer").field("index_phrases", true).endObject(); + b.startObject("field") + .field("type", textFieldName) + .field("analyzer", "my_stop_analyzer") + .field("index_phrases", true) + .endObject(); // "standard" will be replaced with MockSynonymAnalyzer - b.startObject("synfield").field("type", "text").field("analyzer", "standard").field("index_phrases", true).endObject(); + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").field("index_phrases", true).endObject(); })); QueryShardContext queryShardContext = createQueryShardContext(mapperService); @@ -809,14 +785,16 @@ protected TokenStreamComponents createComponents(String fieldName) { Exception e = expectThrows( MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "text").field("index", "false").field("index_phrases", true))) + () -> createMapperService( + fieldMapping(b -> b.field("type", textFieldName).field("index", "false").field("index_phrases", true)) + ) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on unindexed field [field]")); e = expectThrows( MapperParsingException.class, () -> createMapperService( - fieldMapping(b -> b.field("type", "text").field("index_options", "freqs").field("index_phrases", true)) + fieldMapping(b -> b.field("type", textFieldName).field("index_options", "freqs").field("index_phrases", true)) ) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on field [field] if positions are not enabled")); @@ -827,7 +805,7 @@ public void testIndexPrefixMapping() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 2) @@ -845,29 +823,29 @@ public void testIndexPrefixMapping() throws IOException { { DocumentMapper mapper = createDocumentMapper( - fieldMapping(b -> b.field("type", "text").field("analyzer", "standard").startObject("index_prefixes").endObject()) + fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "standard").startObject("index_prefixes").endObject()) ); assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:5")); } { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").nullField("index_prefixes"))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).nullField("index_prefixes"))); assertNull(mapper.mappers().getMapper("field._index_prefix")); } { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 1).field("max_chars", 10).endObject(); - b.startObject("fields").startObject("_index_prefix").field("type", "text").endObject().endObject(); + b.startObject("fields").startObject("_index_prefix").field("type", textFieldName).endObject().endObject(); }))); assertThat(e.getMessage(), containsString("Field [field._index_prefix] is defined more than once")); } { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 11).field("max_chars", 10).endObject(); }))); assertThat(e.getMessage(), containsString("min_chars [11] must be less than max_chars [10]")); @@ -875,7 +853,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 0).field("max_chars", 10).endObject(); }))); assertThat(e.getMessage(), containsString("min_chars [0] must be greater than zero")); @@ -883,7 +861,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 1).field("max_chars", 25).endObject(); }))); assertThat(e.getMessage(), containsString("max_chars [25] must be less than 20")); @@ -891,7 +869,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard").field("index", false); + b.field("type", textFieldName).field("analyzer", "standard").field("index", false); b.startObject("index_prefixes").endObject(); }))); assertThat(e.getMessage(), containsString("Cannot set index_prefixes on unindexed field [field]")); @@ -902,14 +880,14 @@ public void testFastPhrasePrefixes() throws IOException { MapperService mapperService = createMapperService(mapping(b -> { b.startObject("field"); { - b.field("type", "text"); + b.field("type", textFieldName); b.field("analyzer", "my_stop_analyzer"); b.startObject("index_prefixes").field("min_chars", 2).field("max_chars", 10).endObject(); } b.endObject(); b.startObject("synfield"); { - b.field("type", "text"); + b.field("type", textFieldName); b.field("analyzer", "standard"); // "standard" will be replaced with MockSynonymAnalyzer b.field("index_phrases", true); b.startObject("index_prefixes").field("min_chars", 2).field("max_chars", 10).endObject(); @@ -1000,7 +978,7 @@ public void testFastPhrasePrefixes() throws IOException { public void testSimpleMerge() throws IOException { XContentBuilder startingMapping = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", true) + b -> b.field("type", textFieldName).startObject("index_prefixes").endObject().field("index_phrases", true) ); MapperService mapperService = createMapperService(startingMapping); assertThat(mapperService.documentMapper().mappers().getMapper("field"), instanceOf(TextFieldMapper.class)); @@ -1009,19 +987,28 @@ public void testSimpleMerge() throws IOException { assertThat(mapperService.documentMapper().mappers().getMapper("field"), instanceOf(TextFieldMapper.class)); XContentBuilder differentPrefix = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").field("min_chars", "3").endObject().field("index_phrases", true) + b -> b.field("type", textFieldName) + .startObject("index_prefixes") + .field("min_chars", "3") + .endObject() + .field("index_phrases", true) ); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPrefix)); assertThat(e.getMessage(), containsString("Cannot update parameter [index_prefixes]")); XContentBuilder differentPhrases = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", false) + b -> b.field("type", textFieldName).startObject("index_prefixes").endObject().field("index_phrases", false) ); e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPhrases)); assertThat(e.getMessage(), containsString("Cannot update parameter [index_phrases]")); XContentBuilder newField = mapping(b -> { - b.startObject("field").field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", true).endObject(); + b.startObject("field") + .field("type", textFieldName) + .startObject("index_prefixes") + .endObject() + .field("index_phrases", true) + .endObject(); b.startObject("other_field").field("type", "keyword").endObject(); }); merge(mapperService, newField); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index b8d470e6bdf3e..9c177bbec61fd 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -60,41 +60,45 @@ import java.util.Collections; import java.util.List; -import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; -import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; public class TextFieldTypeTests extends FieldTypeTestCase { - private static TextFieldType createFieldType() { - return new TextFieldType("field"); + TextFieldType createFieldType(boolean searchabe) { + if (searchabe) { + return new TextFieldType("field"); + } else { + return new TextFieldType("field", false, false, Collections.emptyMap()); + } } public void testIsAggregatableDependsOnFieldData() { - TextFieldType ft = createFieldType(); + TextFieldType ft = createFieldType(true); assertFalse(ft.isAggregatable()); ft.setFielddata(true); assertTrue(ft.isAggregatable()); } public void testTermQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null)); assertEquals(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo")), ft.termQueryCaseInsensitive("fOo", null)); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } public void testTermsQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); List<BytesRef> terms = new ArrayList<>(); terms.add(new BytesRef("foo")); terms.add(new BytesRef("bar")); assertEquals(new TermInSetQuery("field", terms), ft.termsQuery(Arrays.asList("foo", "bar"), null)); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.termsQuery(Arrays.asList("foo", "bar"), null) @@ -103,7 +107,7 @@ public void testTermsQuery() { } public void testRangeQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false), ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) @@ -120,13 +124,13 @@ public void testRangeQuery() { } public void testRegexpQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new RegexpQuery(new Term("field", "foo.*")), ft.regexpQuery("foo.*", 0, 0, 10, CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) ); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC) @@ -141,13 +145,13 @@ public void testRegexpQuery() { } public void testFuzzyQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true), ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) ); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) @@ -162,7 +166,7 @@ public void testFuzzyQuery() { } public void testIndexPrefixes() { - TextFieldType ft = createFieldType(); + TextFieldType ft = createFieldType(true); ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10)); Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, false, randomMockShardContext()); @@ -222,7 +226,7 @@ public void testIndexPrefixes() { } public void testFetchSourceValue() throws IOException { - TextFieldType fieldType = createFieldType(); + TextFieldType fieldType = createFieldType(true); fieldType.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); assertEquals(List.of("value"), fetchSourceValue(fieldType, "value")); diff --git a/server/src/test/java/org/opensearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/opensearch/index/mapper/TypeParsersTests.java index 98493fe87ff07..5187242f5fdac 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TypeParsersTests.java @@ -33,10 +33,10 @@ package org.opensearch.index.mapper; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -47,8 +47,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import java.util.stream.IntStream; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.opensearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME; import static org.opensearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME; diff --git a/server/src/test/java/org/opensearch/index/mapper/UidTests.java b/server/src/test/java/org/opensearch/index/mapper/UidTests.java index a7253d9a6a7c4..9ce737358e252 100644 --- a/server/src/test/java/org/opensearch/index/mapper/UidTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/UidTests.java @@ -31,8 +31,8 @@ package org.opensearch.index.mapper; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java index 8651f973b3317..7e40354eb7f29 100644 --- a/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/UpdateMappingTests.java @@ -34,17 +34,16 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -128,7 +127,7 @@ public void testConflictSameType() throws Exception { IllegalArgumentException.class, () -> mapperService.merge( MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(Strings.toString(update)), + new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE ) ); @@ -138,7 +137,7 @@ public void testConflictSameType() throws Exception { IllegalArgumentException.class, () -> mapperService.merge( MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(Strings.toString(update)), + new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE ) ); @@ -175,7 +174,7 @@ public void testConflictNewType() throws Exception { IllegalArgumentException.class, () -> mapperService.merge( MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(Strings.toString(update)), + new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE ) ); @@ -202,7 +201,7 @@ public void testReuseMetaField() throws IOException { MapperParsingException.class, () -> mapperService.merge( MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(Strings.toString(mapping)), + new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE ) ); @@ -212,7 +211,7 @@ public void testReuseMetaField() throws IOException { MapperParsingException.class, () -> mapperService.merge( MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(Strings.toString(mapping)), + new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE ) ); @@ -220,26 +219,24 @@ public void testReuseMetaField() throws IOException { } public void testRejectFieldDefinedTwice() throws IOException { - String mapping1 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("foo") - .field("type", "object") - .endObject() - .endObject() - .endObject() - ); - String mapping2 = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("foo") - .field("type", "long") - .endObject() - .endObject() - .endObject() - ); + String mapping1 = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("foo") + .field("type", "object") + .endObject() + .endObject() + .endObject() + .toString(); + String mapping2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("foo") + .field("type", "long") + .endObject() + .endObject() + .endObject() + .toString(); MapperService mapperService1 = createIndex("test1").mapperService(); mapperService1.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java index 8902143b708e3..fdbb2ef43aa8e 100644 --- a/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/AbstractQueryBuilderTests.java @@ -32,11 +32,11 @@ package org.opensearch.index.query; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.SearchModule; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; diff --git a/server/src/test/java/org/opensearch/index/query/AbstractTermQueryTestCase.java b/server/src/test/java/org/opensearch/index/query/AbstractTermQueryTestCase.java index 34a6484bbe481..f7a4c1b48e3d1 100644 --- a/server/src/test/java/org/opensearch/index/query/AbstractTermQueryTestCase.java +++ b/server/src/test/java/org/opensearch/index/query/AbstractTermQueryTestCase.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.opensearch.test.AbstractQueryTestCase; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index 92efd19732ef0..d0f26f3026789 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -37,20 +37,23 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -181,7 +184,7 @@ public void testIllegalArguments() { // https://github.com/elastic/elasticsearch/issues/7240 public void testEmptyBooleanQuery() throws Exception { - XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder contentBuilder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); contentBuilder.startObject().startObject("bool").endObject().endObject(); try (XContentParser xParser = createParser(contentBuilder)) { Query parsedQuery = parseQuery(xParser).toQuery(createShardContext()); @@ -456,4 +459,26 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> boolQuery.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.should(new TermQueryBuilder(TEXT_FIELD_NAME, "should")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must1")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must2")); // Add a second one to confirm that they both get visited + boolQueryBuilder.mustNot(new TermQueryBuilder(TEXT_FIELD_NAME, "mustNot")); + boolQueryBuilder.filter(new TermQueryBuilder(TEXT_FIELD_NAME, "filter")); + List<QueryBuilder> visitedQueries = new ArrayList<>(); + boolQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(6, visitedQueries.size()); + Set<Object> set = new HashSet<>(Arrays.asList("should", "must1", "must2", "mustNot", "filter")); + + for (QueryBuilder qb : visitedQueries) { + if (qb instanceof TermQueryBuilder) { + set.remove(((TermQueryBuilder) qb).value()); + } + } + + assertEquals(0, set.size()); + + } } diff --git a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java index 94ded24975be4..66a02a02d4e5b 100644 --- a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -153,4 +155,16 @@ public void testMustRewrite() throws IOException { e = expectThrows(IllegalStateException.class, () -> queryBuilder2.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoostingQueryBuilder builder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "value"), + new TermQueryBuilder(KEYWORD_FIELD_NAME, "other_value") + ); + + List<QueryBuilder> visitedQueries = new ArrayList<>(); + builder.visit(createTestVisitor(visitedQueries)); + + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/CombineFunctionTests.java b/server/src/test/java/org/opensearch/index/query/CombineFunctionTests.java index d7243f4c024f7..516a4c7f34517 100644 --- a/server/src/test/java/org/opensearch/index/query/CombineFunctionTests.java +++ b/server/src/test/java/org/opensearch/index/query/CombineFunctionTests.java @@ -33,8 +33,8 @@ package org.opensearch.index.query; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.function.CombineFunction; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java index 2bfe964ce7259..527413d2513d0 100644 --- a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java @@ -39,6 +39,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -133,4 +135,12 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + ConstantScoreQueryBuilder queryBuilder = new ConstantScoreQueryBuilder(new TermQueryBuilder("unmapped_field", "foo")); + List<QueryBuilder> visitorQueries = new ArrayList<>(); + queryBuilder.visit(createTestVisitor(visitorQueries)); + + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java index 8d589bd76f2bb..cb0df38de5c02 100644 --- a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java @@ -41,6 +41,7 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -156,4 +157,14 @@ public void testRewriteMultipleTimes() throws IOException { assertEquals(rewrittenAgain, expected); assertEquals(Rewriteable.rewrite(dismax, createShardContext()), expected); } + + public void testVisit() { + DisMaxQueryBuilder dismax = new DisMaxQueryBuilder(); + dismax.add(new WrapperQueryBuilder(new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()).toString())); + + List<QueryBuilder> visitedQueries = new ArrayList<>(); + dismax.visit(createTestVisitor(visitedQueries)); + + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java index fb5fc4192d02c..db0a7bf1795ff 100644 --- a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java @@ -33,14 +33,16 @@ package org.opensearch.index.query; import org.apache.lucene.index.Term; +import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.opensearch.core.common.ParsingException; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.FieldMaskingSpanQueryBuilder.SPAN_FIELD_MASKING_FIELD; import static org.hamcrest.CoreMatchers.equalTo; @@ -147,4 +149,10 @@ public void testDeprecatedName() throws IOException { "Deprecated field [field_masking_span] used, expected [" + SPAN_FIELD_MASKING_FIELD.getPreferredName() + "] instead" ); } + + public void testVisit() { + List<QueryBuilder> visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/FuzzyIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/FuzzyIntervalsSourceProviderTests.java index d25d34ed80be2..c084658c64551 100644 --- a/server/src/test/java/org/opensearch/index/query/FuzzyIntervalsSourceProviderTests.java +++ b/server/src/test/java/org/opensearch/index/query/FuzzyIntervalsSourceProviderTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.query; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.IntervalsSourceProvider.Fuzzy; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/index/query/FuzzyQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/FuzzyQueryBuilderTests.java index b98f3f561d673..42c905301c390 100644 --- a/server/src/test/java/org/opensearch/index/query/FuzzyQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/FuzzyQueryBuilderTests.java @@ -37,8 +37,8 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.ParsingException; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 94cdebdcdf59e..1800beddad2ae 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -45,11 +45,12 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.geo.RandomShapeGenerator; -import org.locationtech.spatial4j.io.GeohashUtils; -import org.locationtech.spatial4j.shape.Rectangle; import java.io.IOException; +import org.locationtech.spatial4j.io.GeohashUtils; +import org.locationtech.spatial4j.shape.Rectangle; + import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; diff --git a/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java index 5140f5b5d466f..0dfe47e83fedc 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java @@ -37,19 +37,20 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.geo.RandomShapeGenerator; -import org.locationtech.spatial4j.shape.Point; import java.io.IOException; +import org.locationtech.spatial4j.shape.Point; + import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; @@ -357,7 +358,10 @@ private void assertGeoDistanceRangeQuery(String query, double lat, double lon, d Query parsedQuery = parseQuery(query).toQuery(createShardContext()); // The parsedQuery contains IndexOrDocValuesQuery, which wraps LatLonPointDistanceQuery which in turn has default visibility, // so we cannot access its fields directly to check and have to use toString() here instead. - assertEquals(parsedQuery.toString(), "mapped_geo_point:" + lat + "," + lon + " +/- " + distanceUnit.toMeters(distance) + " meters"); + assertEquals( + ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery().toString(), + "mapped_geo_point:" + lat + "," + lon + " +/- " + distanceUnit.toMeters(distance) + " meters" + ); } public void testFromJson() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/query/GeoPolygonQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoPolygonQueryBuilderTests.java index ea65395fbea13..f1d16a0159965 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoPolygonQueryBuilderTests.java @@ -37,21 +37,22 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.builders.ShapeBuilder; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.jts.JtsGeometry; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.jts.JtsGeometry; + import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java index e73f2e3f26928..8ad69774ff5b8 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderTests.java @@ -40,26 +40,26 @@ import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.geo.builders.EnvelopeBuilder; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.get.GetResult; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.geo.RandomShapeGenerator; import org.opensearch.test.geo.RandomShapeGenerator.ShapeType; import org.junit.After; -import org.locationtech.jts.geom.Coordinate; import java.io.IOException; +import org.locationtech.jts.geom.Coordinate; + import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.anyOf; @@ -107,7 +107,7 @@ protected GetResponse executeGet(GetRequest getRequest) { builder.field(expectedShapePath, indexedShapeToReturn); builder.field(randomAlphaOfLengthBetween(10, 20), "something"); builder.endObject(); - json = Strings.toString(builder); + json = builder.toString(); } catch (IOException ex) { throw new OpenSearchException("boom", ex); } diff --git a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java index cb75889a0aa24..7c7598e5dc3c4 100644 --- a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java @@ -32,11 +32,11 @@ package org.opensearch.index.query; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -109,7 +109,7 @@ public void testSerialization() throws Exception { /** * Test that if we serialize and deserialize an object, further * serialization leads to identical bytes representation. - * + * <p> * This is necessary to ensure because we use the serialized BytesReference * of this builder as part of the cacheKey in * {@link ShardSearchRequest} (via @@ -133,7 +133,7 @@ public void testSerializationOrder() throws Exception { public void testFromAndToXContent() throws Exception { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { InnerHitBuilder innerHit = randomInnerHits(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); innerHit.toXContent(builder, ToXContent.EMPTY_PARAMS); // fields is printed out as an object but parsed into a List where order matters, we disable shuffling XContentBuilder shuffled = shuffleXContent(builder, "fields"); diff --git a/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java index b31bcc10a28cc..3503552ee442b 100644 --- a/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/IntervalBuilderTests.java @@ -33,11 +33,11 @@ package org.opensearch.index.query; import org.apache.lucene.analysis.CachingTokenFilter; -import org.apache.lucene.tests.analysis.CannedTokenStream; -import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; +import org.apache.lucene.tests.analysis.CannedTokenStream; +import org.apache.lucene.tests.analysis.Token; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java index 23d6df398a8b2..660b8a3aa8994 100644 --- a/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/IntervalQueryBuilderTests.java @@ -44,12 +44,11 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; import org.opensearch.script.Script; @@ -109,7 +108,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge("_doc", new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } private static IntervalsSourceProvider createRandomSource(int depth, boolean useScripts) { diff --git a/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java index 1a311448b7fb5..46a0192164817 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.query; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -42,8 +41,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; -import org.opensearch.core.common.ParsingException; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.search.MatchQuery; import org.opensearch.test.AbstractQueryTestCase; diff --git a/server/src/test/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilderTests.java index 93826b52ca179..39aa5efa4a83e 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchPhrasePrefixQueryBuilderTests.java @@ -36,8 +36,8 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; import org.opensearch.test.AbstractQueryTestCase; diff --git a/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java index 6855a14ac5a8d..04dcacbd86ac5 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java @@ -34,8 +34,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.tests.analysis.CannedBinaryTokenStream; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.queries.spans.SpanNearQuery; @@ -52,14 +50,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.CannedBinaryTokenStream; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.search.MatchQuery; @@ -71,10 +70,10 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Iterator; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; @@ -411,9 +410,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge( "_doc", new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping("string_boost", "type=text", "string_no_pos", "type=text,index_options=docs") - ) + PutMappingRequest.simpleMapping("string_boost", "type=text", "string_no_pos", "type=text,index_options=docs").toString() ), MapperService.MergeReason.MAPPING_UPDATE ); diff --git a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java index 2d950f0994976..b6449ef0332e1 100644 --- a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -45,17 +45,16 @@ import org.opensearch.action.termvectors.TermVectorsRequest; import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.search.MoreLikeThisQuery; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.VersionType; import org.opensearch.index.query.MoreLikeThisQueryBuilder.Item; import org.opensearch.test.AbstractQueryTestCase; @@ -394,7 +393,7 @@ public void testItemCopy() throws IOException { public void testItemFromXContent() throws IOException { Item expectedItem = generateRandomItem(); - String json = Strings.toString(expectedItem.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)); + String json = expectedItem.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS).toString(); XContentParser parser = createParser(JsonXContent.jsonXContent, json); Item newItem = Item.parse(parser, new Item()); assertEquals(expectedItem, newItem); diff --git a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java index c71bbba93a4fb..39f5bb313fe9e 100644 --- a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java @@ -48,10 +48,10 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.query.MultiMatchQueryBuilder.Type; import org.opensearch.index.search.MatchQuery; import org.opensearch.test.AbstractQueryTestCase; @@ -304,10 +304,16 @@ public void testToQueryBooleanPrefixMultipleFields() throws IOException { } else if (disjunct instanceof PrefixQuery) { final PrefixQuery secondDisjunct = (PrefixQuery) disjunct; assertThat(secondDisjunct.getPrefix(), equalTo(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + } else if (disjunct instanceof IndexOrDocValuesQuery) { + final IndexOrDocValuesQuery iodvqDisjunct = (IndexOrDocValuesQuery) disjunct; + assertThat(iodvqDisjunct.getIndexQuery().toString(), equalTo("mapped_string_2:foo bar*")); } else { throw new AssertionError(); } - assertThat(disjunct, either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class))); + assertThat( + disjunct, + either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class)).or(instanceOf(IndexOrDocValuesQuery.class)) + ); } } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 91cfc90a9cebb..29efd64e5c751 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -34,13 +34,16 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.common.Strings; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; @@ -59,6 +62,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import static org.opensearch.index.IndexSettingsTests.newIndexMeta; import static org.opensearch.index.query.InnerHitBuilderTests.randomNestedInnerHits; @@ -67,6 +71,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBuilder> { @@ -76,26 +82,24 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge( "_doc", new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping( - TEXT_FIELD_NAME, - "type=text", - INT_FIELD_NAME, - "type=integer", - DOUBLE_FIELD_NAME, - "type=double", - BOOLEAN_FIELD_NAME, - "type=boolean", - DATE_FIELD_NAME, - "type=date", - OBJECT_FIELD_NAME, - "type=object", - GEO_POINT_FIELD_NAME, - "type=geo_point", - "nested1", - "type=nested" - ) - ) + PutMappingRequest.simpleMapping( + TEXT_FIELD_NAME, + "type=text", + INT_FIELD_NAME, + "type=integer", + DOUBLE_FIELD_NAME, + "type=double", + BOOLEAN_FIELD_NAME, + "type=boolean", + DATE_FIELD_NAME, + "type=date", + OBJECT_FIELD_NAME, + "type=object", + GEO_POINT_FIELD_NAME, + "type=geo_point", + "nested1", + "type=nested" + ).toString() ), MapperService.MergeReason.MAPPING_UPDATE ); @@ -414,4 +418,114 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testSetParentFilterInContext() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilderTests().createTestQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values())); + + assertNull(queryShardContext.getParentFilter()); + nqb.rewrite(queryShardContext).toQuery(queryShardContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } + + public void testNestedDepthProhibited() throws Exception { + assertThrows(IllegalArgumentException.class, () -> doWithDepth(0, context -> fail("won't call"))); + } + + public void testNestedDepthAllowed() throws Exception { + ThrowingConsumer<QueryShardContext> check = (context) -> { + NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None); + OpenSearchToParentBlockJoinQuery blockJoinQuery = (OpenSearchToParentBlockJoinQuery) queryBuilder.toQuery(context); + Optional<BooleanClause> childLeg = ((BooleanQuery) blockJoinQuery.getChildQuery()).clauses() + .stream() + .filter(c -> c.getOccur() == BooleanClause.Occur.MUST) + .findFirst(); + assertTrue(childLeg.isPresent()); + assertEquals(new MatchAllDocsQuery(), childLeg.get().getQuery()); + }; + check.accept(createShardContext()); + doWithDepth(randomIntBetween(1, 20), check); + } + + public void testNestedDepthOnceOnly() throws Exception { + doWithDepth(1, this::checkOnceNested); + } + + public void testNestedDepthDefault() throws Exception { + assertEquals(20, createShardContext().getIndexSettings().getMaxNestedQueryDepth()); + } + + private void checkOnceNested(QueryShardContext ctx) throws Exception { + { + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> depth2.toQuery(ctx)); + assertEquals( + "The depth of Nested Query is [2] has exceeded the allowed maximum of [1]. This maximum can be set by changing the [index.query.max_nested_depth] index level setting.", + e.getMessage() + ); + } + { + QueryBuilder mustBjqMustBjq = new BoolQueryBuilder().must( + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None) + ).must(new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None)); + BooleanQuery bool = (BooleanQuery) mustBjqMustBjq.toQuery(ctx); + assertEquals( + "Can parse joins one by one without breaching depth limit", + 2, + bool.clauses().stream().filter(c -> c.getQuery() instanceof OpenSearchToParentBlockJoinQuery).count() + ); + } + } + + public void testUpdateMaxDepthSettings() throws Exception { + doWithDepth(2, (ctx) -> { + assertEquals(ctx.getIndexSettings().getMaxNestedQueryDepth(), 2); + NestedQueryBuilder depth2 = new NestedQueryBuilder( + "nested1", + new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.None), + ScoreMode.None + ); + Query depth2Query = depth2.toQuery(ctx); + assertTrue(depth2Query instanceof OpenSearchToParentBlockJoinQuery); + }); + } + + void doWithDepth(int depth, ThrowingConsumer<QueryShardContext> test) throws Exception { + QueryShardContext context = createShardContext(); + int defLimit = context.getIndexSettings().getMaxNestedQueryDepth(); + assertTrue(defLimit > 0); + Settings updateSettings = Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", depth) + .build(); + context.getIndexSettings().updateIndexMetadata(IndexMetadata.builder("index").settings(updateSettings).build()); + try { + test.accept(context); + } finally { + context.getIndexSettings() + .updateIndexMetadata( + IndexMetadata.builder("index") + .settings( + Settings.builder() + .put(context.getIndexSettings().getSettings()) + .put("index.query.max_nested_depth", defLimit) + .build() + ) + .build() + ); + } + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java new file mode 100644 index 0000000000000..7849d3985ca59 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.test.AbstractBuilderTestCase; + +import java.util.ArrayList; +import java.util.List; + +public class QueryBuilderVisitorTests extends AbstractBuilderTestCase { + + public void testNoOpsVisitor() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + List<QueryBuilder> visitedQueries = new ArrayList<>(); + QueryBuilderVisitor qbv = createTestVisitor(visitedQueries); + boolQueryBuilder.visit(qbv); + QueryBuilderVisitor subQbv = qbv.getChildVisitor(BooleanClause.Occur.MUST_NOT); + assertEquals(0, visitedQueries.size()); + assertEquals(qbv, subQbv); + } + + protected static QueryBuilderVisitor createTestVisitor(List<QueryBuilder> visitedQueries) { + return QueryBuilderVisitor.NO_OP_VISITOR; + } +} diff --git a/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java new file mode 100644 index 0000000000000..18b814aec61c2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.junit.Assert.assertEquals; + +public final class QueryShapeVisitorTests extends OpenSearchTestCase { + public void testQueryShapeVisitor() { + QueryBuilder builder = new BoolQueryBuilder().must(new TermQueryBuilder("foo", "bar")) + .filter(new ConstantScoreQueryBuilder(new RangeQueryBuilder("timestamp").from("12345677").to("2345678"))) + .should( + new BoolQueryBuilder().must(new MatchQueryBuilder("text", "this is some text")) + .mustNot(new RegexpQueryBuilder("color", "red.*")) + ) + .must(new TermsQueryBuilder("genre", "action", "drama", "romance")); + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + builder.visit(shapeVisitor); + assertEquals( + "{\"type\":\"bool\",\"must\"[{\"type\":\"term\"},{\"type\":\"terms\"}],\"filter\"[{\"type\":\"constant_score\",\"filter\"[{\"type\":\"range\"}]}],\"should\"[{\"type\":\"bool\",\"must\"[{\"type\":\"match\"}],\"must_not\"[{\"type\":\"regexp\"}]}]}", + shapeVisitor.toJson() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java index 2e499103540fe..1a2ad49a3f334 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryShardContextTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; @@ -46,13 +45,14 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.TriFunction; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -91,6 +91,8 @@ public class QueryShardContextTests extends OpenSearchTestCase { + private static final int SHARD_ID = 0; + public void testFailIfFieldMappingNotFound() { QueryShardContext context = createQueryShardContext(IndexMetadata.INDEX_UUID_NA_VALUE, null); context.setAllowUnmappedFields(false); @@ -307,6 +309,11 @@ public void testFielddataLookupOneFieldManyReferences() throws IOException { assertEquals(Arrays.asList(expectedFirstDoc.toString(), expectedSecondDoc.toString()), collect("field", queryShardContext)); } + public void testSearchLookupShardId() { + SearchLookup searchLookup = createQueryShardContext("uuid", null, null).lookup(); + assertEquals(SHARD_ID, searchLookup.shardId()); + } + public static QueryShardContext createQueryShardContext(String indexUuid, String clusterAlias) { return createQueryShardContext(indexUuid, clusterAlias, null); } @@ -343,7 +350,7 @@ private static QueryShardContext createQueryShardContext( } final long nowInMillis = randomNonNegativeLong(); return new QueryShardContext( - 0, + SHARD_ID, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 03ddb625c78a8..af4a34aa98116 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.query; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; @@ -61,6 +60,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; @@ -68,12 +68,11 @@ import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.search.QueryStringQueryParser; @@ -116,7 +115,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge("_doc", new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } @Override @@ -1113,9 +1112,7 @@ public void testDisabledFieldNamesField() throws Exception { context.getMapperService() .merge( "_doc", - new CompressedXContent( - Strings.toString(PutMappingRequest.simpleMapping("foo", "type=text", "_field_names", "enabled=false")) - ), + new CompressedXContent(PutMappingRequest.simpleMapping("foo", "type=text", "_field_names", "enabled=false").toString()), MapperService.MergeReason.MAPPING_UPDATE ); @@ -1129,9 +1126,7 @@ public void testDisabledFieldNamesField() throws Exception { context.getMapperService() .merge( "_doc", - new CompressedXContent( - Strings.toString(PutMappingRequest.simpleMapping("foo", "type=text", "_field_names", "enabled=true")) - ), + new CompressedXContent(PutMappingRequest.simpleMapping("foo", "type=text", "_field_names", "enabled=true").toString()), MapperService.MergeReason.MAPPING_UPDATE ); } diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 0e38f6ad94d26..e72be29b85b63 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -45,9 +45,9 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -87,8 +87,8 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { ZonedDateTime start = now.minusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); ZonedDateTime end = now.plusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); query = new RangeQueryBuilder(randomFrom(DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, DATE_ALIAS_FIELD_NAME)); - query.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start)); - query.to(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end)); + query.from(DateFieldMapper.getDefaultDateTimeFormatter().format(start)); + query.to(DateFieldMapper.getDefaultDateTimeFormatter().format(end)); // Create timestamp option only then we have a date mapper, // otherwise we could trigger exception. if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryRewriteTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryRewriteTests.java index 171fe8ef794f9..fc550cfd1916e 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryRewriteTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryRewriteTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; import org.apache.lucene.search.IndexSearcher; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentFactory; @@ -77,18 +76,17 @@ public void testRewriteMissingField() throws Exception { public void testRewriteMissingReader() throws Exception { IndexService indexService = createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("foo") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", "date") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); QueryRewriteContext context = new QueryShardContext( 0, @@ -116,18 +114,17 @@ public void testRewriteMissingReader() throws Exception { public void testRewriteEmptyReader() throws Exception { IndexService indexService = createIndex("test"); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("foo") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("foo") + .field("type", "date") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); IndexReader reader = new MultiReader(); QueryRewriteContext context = new QueryShardContext( diff --git a/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java b/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java index 0eb5ea7589133..ee9839bf96668 100644 --- a/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RegexpIntervalsSourceProviderTests.java @@ -8,9 +8,6 @@ package org.opensearch.index.query; -import static org.opensearch.index.query.IntervalsSourceProvider.Regexp; -import static org.opensearch.index.query.IntervalsSourceProvider.fromXContent; - import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; @@ -19,6 +16,9 @@ import java.util.Arrays; import java.util.List; +import static org.opensearch.index.query.IntervalsSourceProvider.Regexp; +import static org.opensearch.index.query.IntervalsSourceProvider.fromXContent; + public class RegexpIntervalsSourceProviderTests extends AbstractSerializingTestCase<Regexp> { private static final List<String> FLAGS = Arrays.asList("INTERSECTION", "COMPLEMENT", "EMPTY", "ANYSTRING", "INTERVAL", "NONE"); diff --git a/server/src/test/java/org/opensearch/index/query/ScoreModeTests.java b/server/src/test/java/org/opensearch/index/query/ScoreModeTests.java index 6b699f2d40d1f..a71b8bea24b2f 100644 --- a/server/src/test/java/org/opensearch/index/query/ScoreModeTests.java +++ b/server/src/test/java/org/opensearch/index/query/ScoreModeTests.java @@ -33,8 +33,8 @@ package org.opensearch.index.query; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java index 320ff20b2ef5d..29d45fb42ffcc 100644 --- a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java @@ -43,7 +43,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.CoreMatchers.instanceOf; @@ -140,4 +142,11 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testVisit() { + ScriptScoreQueryBuilder scriptQueryBuilder = doCreateTestQueryBuilder(); + List<QueryBuilder> visitedQueries = new ArrayList<>(); + scriptQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java index 7e86edb1edefe..7688772173b08 100644 --- a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.query; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.spans.SpanNearQuery; @@ -51,6 +50,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.tests.util.TestUtil; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java b/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java new file mode 100644 index 0000000000000..6af717a97b328 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.opensearch.core.index.Index; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MapperServiceTestCase; +import org.opensearch.index.mapper.ParsedDocument; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import static org.mockito.Mockito.when; + +public class SourceFieldMatchQueryTests extends MapperServiceTestCase { + + public void testAllPossibleScenarios() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("dessert"); + { + b.field("type", "match_only_text"); + } + b.endObject(); + })); + + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + + String[] desserts = new String[] { "apple pie pie", "banana split pie", "chocolate cake" }; + List<ParsedDocument> docs = new ArrayList<>(); + for (String dessert : desserts) { + docs.add(mapperService.documentMapper().parse(source(b -> b.field("dessert", dessert)))); + } + SourceFieldMatchQuery matchBoth = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchDelegate = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "juice").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchFilter = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "tart").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchNone = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "gulab").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "jamun").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchMultipleDocs = new SourceFieldMatchQuery( + QueryBuilders.matchAllQuery().toQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(mapperService.indexAnalyzer())); + for (ParsedDocument d : docs) { + iw.addDocument(d.rootDoc()); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(matchBoth, 10); + assertEquals(topDocs.totalHits.value, 1); + assertEquals(topDocs.scoreDocs[0].doc, 0); + + topDocs = searcher.search(matchDelegate, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchFilter, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchNone, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchMultipleDocs, 10); + assertEquals(topDocs.totalHits.value, 2); + // assert constant score + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + assertEquals(scoreDoc.score, 1.0, 0.00000000001); + } + } + } + } + + public void testSourceDisabled() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> b.startObject("_source").field("enabled", false).endObject())); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ) + ); + assertEquals( + "SourceFieldMatchQuery error: unable to fetch fields from _source field: " + + "_source is disabled in the mappings for index [test_index]", + e.getMessage() + ); + } + + public void testMissingField() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("dessert"); + { + b.field("type", "match_only_text"); + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + + String[] desserts = new String[] { "apple pie pie", "banana split pie", "chocolate cake" }; + List<ParsedDocument> docs = new ArrayList<>(); + for (String dessert : desserts) { + docs.add(mapperService.documentMapper().parse(source(b -> b.field("dessert", dessert)))); + } + SourceFieldMatchQuery matchDelegate = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("username", "pie").doToQuery(queryShardContext), // Filter query missing field + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(mapperService.indexAnalyzer())); + for (ParsedDocument d : docs) { + iw.addDocument(d.rootDoc()); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(matchDelegate, 10); + assertEquals(topDocs.totalHits.value, 0); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java index fff4369e155b3..2a5441b0ea932 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -192,4 +194,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_containing [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List<QueryBuilder> visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(3, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java index 2733a500fd464..82bb77726b037 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java @@ -34,13 +34,14 @@ import org.apache.lucene.queries.spans.SpanFirstQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; import static org.hamcrest.CoreMatchers.equalTo; @@ -71,7 +72,7 @@ public void testParseEnd() throws IOException { builder.endObject(); builder.endObject(); - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(Strings.toString(builder))); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(builder.toString())); assertTrue(e.getMessage().contains("span_first must have [end] set")); } { @@ -82,7 +83,7 @@ public void testParseEnd() throws IOException { builder.endObject(); builder.endObject(); - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(Strings.toString(builder))); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(builder.toString())); assertTrue(e.getMessage().contains("span_first must have [match] span query clause")); } } @@ -129,4 +130,10 @@ public void testFromJsonWithNonDefaultBoostInMatchQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_first [match] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List<QueryBuilder> visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java index bc514208c803d..b4abff118802e 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; @@ -52,15 +51,17 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopTermsRewrite; import org.apache.lucene.store.Directory; -import org.opensearch.common.Strings; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static java.util.Collections.singleton; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -92,7 +93,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws .endObject() .endObject(); - mapperService.merge("_doc", new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(mapping.toString()), MapperService.MergeReason.MAPPING_UPDATE); } @Override @@ -284,8 +285,9 @@ public void testTermExpansionExceptionOnSpanFailure() throws Exception { BooleanQuery.setMaxClauseCount(1); try { QueryBuilder queryBuilder = new SpanMultiTermQueryBuilder(QueryBuilders.prefixQuery("body", "bar")); - Query query = queryBuilder.toQuery(createShardContext(new IndexSearcher(reader))); - RuntimeException exc = expectThrows(RuntimeException.class, () -> query.rewrite(reader)); + IndexSearcher searcher = new IndexSearcher(reader); + Query query = queryBuilder.toQuery(createShardContext(searcher)); + RuntimeException exc = expectThrows(RuntimeException.class, () -> query.rewrite(searcher)); assertThat(exc.getMessage(), containsString("maxClauseCount")); } finally { BooleanQuery.setMaxClauseCount(origBoolMaxClauseCount); @@ -306,4 +308,12 @@ public void testTopNMultiTermsRewriteInsideSpan() throws Exception { } } + + public void testVisit() { + MultiTermQueryBuilder multiTermQueryBuilder = new PrefixQueryBuilderTests().createTestQueryBuilder(); + SpanMultiTermQueryBuilder spanMultiTermQueryBuilder = new SpanMultiTermQueryBuilder(multiTermQueryBuilder); + List<QueryBuilder> visitorQueries = new ArrayList<>(); + spanMultiTermQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java index 8c9130d4b7bbd..c97b541da2199 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java @@ -41,7 +41,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; @@ -231,4 +233,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_near [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(1); + SpanNearQueryBuilder spanNearQueryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], 1); + List<QueryBuilder> visitorQueries = new ArrayList<>(); + spanNearQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java index 8847aa9882a20..ff42b271ffca5 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java @@ -34,13 +34,14 @@ import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanNearQuery; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; @@ -126,7 +127,7 @@ public void testParseDist() throws IOException { builder.field("dist", 3); builder.endObject(); builder.endObject(); - SpanNotQueryBuilder query = (SpanNotQueryBuilder) parseQuery(Strings.toString(builder)); + SpanNotQueryBuilder query = (SpanNotQueryBuilder) parseQuery(builder.toString()); assertThat(query.pre(), equalTo(3)); assertThat(query.post(), equalTo(3)); assertNotNull(query.includeQuery()); @@ -147,7 +148,7 @@ public void testParserExceptions() throws IOException { builder.endObject(); builder.endObject(); - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(Strings.toString(builder))); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(builder.toString())); assertThat(e.getDetailedMessage(), containsString("span_not must have [include]")); } { @@ -162,7 +163,7 @@ public void testParserExceptions() throws IOException { builder.endObject(); builder.endObject(); - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(Strings.toString(builder))); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(builder.toString())); assertThat(e.getDetailedMessage(), containsString("span_not must have [exclude]")); } { @@ -180,7 +181,7 @@ public void testParserExceptions() throws IOException { builder.endObject(); builder.endObject(); - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(Strings.toString(builder))); + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(builder.toString())); assertThat(e.getDetailedMessage(), containsString("span_not can either use [dist] or [pre] & [post] (or none)")); } } @@ -317,4 +318,10 @@ public void testFromJsonWithNonDefaultBoostInExcludeQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_not [exclude] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List<QueryBuilder> visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java index 45323b5df74df..eb4b8fd486cb0 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java @@ -39,7 +39,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -137,4 +139,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_or [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder spanTermQueryBuilder = new SpanTermQueryBuilder("demo", "demo"); + SpanOrQueryBuilder spanOrQueryBuilder = new SpanOrQueryBuilder(spanTermQueryBuilder); + List<QueryBuilder> visitedQueries = new ArrayList<>(); + spanOrQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanTermQueryBuilderTests.java index bacaaa150ec93..aa864f620b8b5 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanTermQueryBuilderTests.java @@ -33,12 +33,13 @@ package org.opensearch.index.query; import com.fasterxml.jackson.core.io.JsonStringEncoder; -import org.apache.lucene.queries.spans.SpanTermQuery; + import org.apache.lucene.index.Term; +import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.core.common.ParsingException; import org.opensearch.index.mapper.MappedFieldType; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java index e8b6a21254ff8..74b955e872d51 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -189,4 +191,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_within [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List<QueryBuilder> visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java index b42ac35ac9407..c5bdf9b586df1 100644 --- a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java @@ -33,8 +33,10 @@ package org.opensearch.index.query; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.apache.lucene.index.Term; import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -111,6 +113,7 @@ protected void doAssertLuceneQuery(TermQueryBuilder queryBuilder, Query query, Q either(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) .or(instanceOf(AutomatonQuery.class)) + .or(instanceOf(IndexOrDocValuesQuery.class)) ); MappedFieldType mapper = context.fieldMapper(queryBuilder.fieldName()); if (query instanceof TermQuery) { diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index 3ee04450e4e73..32bf290627b63 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointInSetQuery; import org.apache.lucene.search.Query; @@ -42,12 +43,11 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.get.GetResult; import org.opensearch.indices.TermsLookup; @@ -136,6 +136,7 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query, either(instanceOf(TermInSetQuery.class)).or(instanceOf(PointInSetQuery.class)) .or(instanceOf(ConstantScoreQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) + .or(instanceOf(IndexOrDocValuesQuery.class)) ); if (query instanceof ConstantScoreQuery) { assertThat(((ConstantScoreQuery) query).getQuery(), instanceOf(BooleanQuery.class)); @@ -234,7 +235,7 @@ public GetResponse executeGet(GetRequest getRequest) { builder.startObject(); builder.array(termsPath, randomTerms.toArray(new Object[0])); builder.endObject(); - json = Strings.toString(builder); + json = builder.toString(); } catch (IOException ex) { throw new OpenSearchException("boom", ex); } @@ -269,9 +270,14 @@ public void testNumeric() throws IOException { } public void testTermsQueryWithMultipleFields() throws IOException { - String query = Strings.toString( - XContentFactory.jsonBuilder().startObject().startObject("terms").array("foo", 123).array("bar", 456).endObject().endObject() - ); + String query = XContentFactory.jsonBuilder() + .startObject() + .startObject("terms") + .array("foo", 123) + .array("bar", 456) + .endObject() + .endObject() + .toString(); ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query)); assertEquals("[" + TermsQueryBuilder.NAME + "] query does not support multiple fields", e.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsSetQueryBuilderTests.java index 43ca3139e30cb..63ea71458f1e7 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsSetQueryBuilderTests.java @@ -52,7 +52,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.MapperService; @@ -93,7 +92,7 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws String docType = "_doc"; mapperService.merge( docType, - new CompressedXContent(Strings.toString(PutMappingRequest.simpleMapping("m_s_m", "type=long"))), + new CompressedXContent(PutMappingRequest.simpleMapping("m_s_m", "type=long").toString()), MapperService.MergeReason.MAPPING_UPDATE ); } diff --git a/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java index 09605bcd0a8dd..4135f6e0ef049 100644 --- a/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/WrapperQueryBuilderTests.java @@ -40,8 +40,7 @@ import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; @@ -70,7 +69,7 @@ protected WrapperQueryBuilder doCreateTestQueryBuilder() { QueryBuilder wrappedQuery = RandomQueryBuilder.createQuery(random()); BytesReference bytes; try { - bytes = XContentHelper.toXContent(wrappedQuery, XContentType.JSON, false); + bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(wrappedQuery, MediaTypeRegistry.JSON, false); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionModifierTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionModifierTests.java index b283062de11b7..49cdd6de63189 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionModifierTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FieldValueFactorFunctionModifierTests.java @@ -33,8 +33,8 @@ package org.opensearch.index.query.functionscore; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreEquivalenceTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreEquivalenceTests.java index a78cccbbc8083..95a92af65fc8b 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreEquivalenceTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreEquivalenceTests.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.search.RandomApproximationQuery; import org.apache.lucene.tests.search.SearchEquivalenceTestBase; -import org.apache.lucene.search.TermQuery; import org.opensearch.bootstrap.BootstrapForTesting; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java index 8e4ae9c6cfede..4e64a1ec03688 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilderTests.java @@ -39,19 +39,19 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.WeightFactorFunction; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -69,10 +69,10 @@ import org.opensearch.search.MultiValueMode; import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; -import org.hamcrest.CoreMatchers; -import org.hamcrest.Matcher; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import org.hamcrest.CoreMatchers; +import org.hamcrest.Matcher; import java.io.IOException; import java.util.Arrays; @@ -360,7 +360,7 @@ public void testParseFunctionsArray() throws IOException { * given that we copy part of the decay functions as bytes, we test that fromXContent and toXContent both work no matter what the * initial format was */ - for (XContentType xContentType : XContentType.values()) { + for (MediaType xContentType : XContentType.values()) { assertThat(queryBuilder, instanceOf(FunctionScoreQueryBuilder.class)); FunctionScoreQueryBuilder functionScoreQueryBuilder = (FunctionScoreQueryBuilder) queryBuilder; assertThat(functionScoreQueryBuilder.query(), instanceOf(TermQueryBuilder.class)); @@ -521,20 +521,19 @@ public void testProperErrorMessageWhenMissingFunction() throws IOException { } public void testWeight1fStillProducesWeightFunction() throws IOException { - String queryString = Strings.toString( - jsonBuilder().startObject() - .startObject("function_score") - .startArray("functions") - .startObject() - .startObject("field_value_factor") - .field("field", INT_FIELD_NAME) - .endObject() - .field("weight", 1.0) - .endObject() - .endArray() - .endObject() - .endObject() - ); + String queryString = jsonBuilder().startObject() + .startObject("function_score") + .startArray("functions") + .startObject() + .startObject("field_value_factor") + .field("field", INT_FIELD_NAME) + .endObject() + .field("weight", 1.0) + .endObject() + .endArray() + .endObject() + .endObject() + .toString(); QueryBuilder query = parseQuery(queryString); assertThat(query, instanceOf(FunctionScoreQueryBuilder.class)); FunctionScoreQueryBuilder functionScoreQueryBuilder = (FunctionScoreQueryBuilder) query; @@ -561,36 +560,34 @@ public void testWeight1fStillProducesWeightFunction() throws IOException { } public void testProperErrorMessagesForMisplacedWeightsAndFunctions() throws IOException { - String query = Strings.toString( - jsonBuilder().startObject() - .startObject("function_score") - .startArray("functions") - .startObject() - .startObject("script_score") - .field("script", "3") - .endObject() - .endObject() - .endArray() - .field("weight", 2) - .endObject() - .endObject() - ); + String query = jsonBuilder().startObject() + .startObject("function_score") + .startArray("functions") + .startObject() + .startObject("script_score") + .field("script", "3") + .endObject() + .endObject() + .endArray() + .field("weight", 2) + .endObject() + .endObject() + .toString(); expectParsingException( query, "[you can either define [functions] array or a single function, not both. already " + "found [functions] array, now encountering [weight].]" ); - query = Strings.toString( - jsonBuilder().startObject() - .startObject("function_score") - .field("weight", 2) - .startArray("functions") - .startObject() - .endObject() - .endArray() - .endObject() - .endObject() - ); + query = jsonBuilder().startObject() + .startObject("function_score") + .field("weight", 2) + .startArray("functions") + .startObject() + .endObject() + .endArray() + .endObject() + .endObject() + .toString(); expectParsingException( query, "[you can either define [functions] array or a single function, not both. already found " diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreTests.java index 3bcda9f5e762f..0ea91efc568d0 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/FunctionScoreTests.java @@ -45,13 +45,13 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.tests.search.RandomApproximationQuery; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.search.RandomApproximationQuery; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; diff --git a/server/src/test/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilderTests.java b/server/src/test/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilderTests.java index 9badef6af8b04..db51bc89b041f 100644 --- a/server/src/test/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/functionscore/ScoreFunctionBuilderTests.java @@ -43,6 +43,7 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.Script; import org.opensearch.test.OpenSearchTestCase; + import org.mockito.Mockito; public class ScoreFunctionBuilderTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestTestCase.java b/server/src/test/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestTestCase.java index 67969f2870f08..1d22d9c19cf05 100644 --- a/server/src/test/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestTestCase.java +++ b/server/src/test/java/org/opensearch/index/reindex/AbstractBulkByScrollRequestTestCase.java @@ -34,8 +34,8 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.tasks.TaskId; import org.opensearch.test.AbstractXContentTestCase; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java index 1677416249bd2..1d4c3c57f73bb 100644 --- a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollResponseTests.java @@ -51,8 +51,8 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.opensearch.common.unit.TimeValue.timeValueMillis; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class BulkByScrollResponseTests extends AbstractXContentTestCase<BulkByScrollResponse> { diff --git a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java index 8e28580fc9296..ee9b422aa8178 100644 --- a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java @@ -36,8 +36,8 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.AbstractXContentTestCase; import org.opensearch.index.reindex.BulkByScrollTask.StatusOrException; +import org.opensearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.function.Supplier; diff --git a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusTests.java index 8d2c24ff5320d..4942d31f4ca53 100644 --- a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -41,9 +41,9 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.reindex.BulkByScrollTask.Status; import org.opensearch.test.AbstractXContentTestCase; import org.hamcrest.Matchers; -import org.opensearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; import java.util.HashMap; @@ -55,8 +55,8 @@ import static java.lang.Math.abs; import static java.util.stream.Collectors.toList; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class BulkByScrollTaskStatusTests extends AbstractXContentTestCase<BulkByScrollTask.Status> { diff --git a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskTests.java b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskTests.java index 732d6eb8b1f6f..c66d858fff2b4 100644 --- a/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/BulkByScrollTaskTests.java @@ -32,11 +32,10 @@ package org.opensearch.index.reindex; -import org.opensearch.common.Strings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -122,7 +121,7 @@ public void testXContentRepresentationOfUnlimitedRequestsPerSecond() throws IOEx timeValueMillis(0) ); status.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertThat(Strings.toString(builder), containsString("\"requests_per_second\":-1")); + assertThat(builder.toString(), containsString("\"requests_per_second\":-1")); } public void testXContentRepresentationOfUnfinishedSlices() throws IOException { @@ -148,7 +147,7 @@ public void testXContentRepresentationOfUnfinishedSlices() throws IOException { null ); status.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertThat(Strings.toString(builder), containsString("\"slices\":[null,null,{\"slice_id\":2")); + assertThat(builder.toString(), containsString("\"slices\":[null,null,{\"slice_id\":2")); } public void testXContentRepresentationOfSliceFailures() throws IOException { @@ -159,7 +158,7 @@ public void testXContentRepresentationOfSliceFailures() throws IOException { null ); status.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertThat(Strings.toString(builder), containsString("\"slices\":[null,null,{\"type\":\"exception\"")); + assertThat(builder.toString(), containsString("\"slices\":[null,null,{\"type\":\"exception\"")); } public void testMergeStatuses() { diff --git a/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java b/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java index 1b6e7af6c32a0..eec60bec23a98 100644 --- a/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/DeleteByQueryRequestTests.java @@ -40,11 +40,11 @@ import java.io.IOException; -import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; public class DeleteByQueryRequestTests extends AbstractBulkByScrollRequestTestCase<DeleteByQueryRequest> { public void testDeleteteByQueryRequestImplementsIndicesRequestReplaceable() { diff --git a/server/src/test/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskStateTests.java b/server/src/test/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskStateTests.java index d24f720273586..5dcf429085f45 100644 --- a/server/src/test/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/LeaderBulkByScrollTaskStateTests.java @@ -32,16 +32,17 @@ package org.opensearch.index.reindex; -import org.opensearch.action.ActionListener; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.mockito.ArgumentCaptor; + import static java.util.Collections.emptyList; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.mockito.Mockito.atMost; diff --git a/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java b/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java index f65aaa0a30d96..fa8f9a2e99e1e 100644 --- a/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/ReindexRequestTests.java @@ -33,15 +33,15 @@ package org.opensearch.index.reindex; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchModule; import org.opensearch.search.slice.SliceBuilder; diff --git a/server/src/test/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskStateTests.java b/server/src/test/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskStateTests.java index 1724f552da845..08ca8fd3ced90 100644 --- a/server/src/test/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/opensearch/index/reindex/WorkerBulkByScrollTaskStateTests.java @@ -34,7 +34,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java deleted file mode 100644 index 5ccacd4048596..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.IntStream; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class RemoteRefreshSegmentPressureServiceTests extends OpenSearchTestCase { - - private ClusterService clusterService; - - private ThreadPool threadPool; - - private ShardId shardId; - - private RemoteRefreshSegmentPressureService pressureService; - - @Override - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - shardId = new ShardId("index", "uuid", 0); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - threadPool.shutdownNow(); - } - - public void testIsSegmentsUploadBackpressureEnabled() { - pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - assertFalse(pressureService.isSegmentsUploadBackpressureEnabled()); - - Settings newSettings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), "true") - .build(); - clusterService.getClusterSettings().applySettings(newSettings); - - assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); - } - - public void testAfterIndexShardCreatedForRemoteBackedIndex() { - IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNotNull(pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId())); - } - - public void testAfterIndexShardCreatedForNonRemoteBackedIndex() { - IndexShard indexShard = createIndexShard(shardId, false); - pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNull(pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId())); - } - - public void testAfterIndexShardClosed() { - IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNotNull(pressureService.getRemoteRefreshSegmentTracker(shardId)); - - pressureService.afterIndexShardClosed(shardId, indexShard, indexShard.indexSettings().getSettings()); - assertNull(pressureService.getRemoteRefreshSegmentTracker(shardId)); - } - - public void testValidateSegmentUploadLag() { - // Create the pressure tracker - IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - - RemoteRefreshSegmentTracker pressureTracker = pressureService.getRemoteRefreshSegmentTracker(shardId); - pressureTracker.updateLocalRefreshSeqNo(6); - - // 1. time lag more than dynamic threshold - pressureTracker.updateRemoteRefreshSeqNo(3); - AtomicLong sum = new AtomicLong(); - IntStream.range(0, 20).forEach(i -> { - pressureTracker.addUploadTimeMs(i); - sum.addAndGet(i); - }); - double avg = (double) sum.get() / 20; - long currentMs = System.nanoTime() / 1_000_000; - pressureTracker.updateLocalRefreshTimeMs((long) (currentMs + 12 * avg)); - pressureTracker.updateRemoteRefreshTimeMs(currentMs); - Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("time_lag:114 ms dynamic_time_lag_threshold:95.0 ms")); - - pressureTracker.updateRemoteRefreshTimeMs((long) (currentMs + 2 * avg)); - pressureService.validateSegmentsUploadLag(shardId); - - // 2. bytes lag more than dynamic threshold - sum.set(0); - IntStream.range(0, 20).forEach(i -> { - pressureTracker.addUploadBytes(i); - sum.addAndGet(i); - }); - avg = (double) sum.get() / 20; - Map<String, Long> nameSizeMap = new HashMap<>(); - nameSizeMap.put("a", (long) (12 * avg)); - pressureTracker.setLatestLocalFileNameLengthMap(nameSizeMap); - e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("bytes_lag:114 dynamic_bytes_lag_threshold:95.0")); - - nameSizeMap.put("a", (long) (2 * avg)); - pressureTracker.setLatestLocalFileNameLengthMap(nameSizeMap); - pressureService.validateSegmentsUploadLag(shardId); - - // 3. Consecutive failures more than the limit - IntStream.range(0, 5).forEach(ignore -> pressureTracker.incrementTotalUploadsFailed()); - pressureService.validateSegmentsUploadLag(shardId); - pressureTracker.incrementTotalUploadsFailed(); - e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("failure_streak_count:6 min_consecutive_failure_threshold:5")); - pressureTracker.incrementTotalUploadsSucceeded(); - pressureService.validateSegmentsUploadLag(shardId); - } - - private static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)).build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); - IndexShard indexShard = mock(IndexShard.class); - when(indexShard.indexSettings()).thenReturn(indexSettings); - when(indexShard.shardId()).thenReturn(shardId); - return indexShard; - } - -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettingsTests.java deleted file mode 100644 index 75b5b946e8bf8..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureSettingsTests.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.util.concurrent.atomic.AtomicInteger; - -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; - -public class RemoteRefreshSegmentPressureSettingsTests extends OpenSearchTestCase { - - private ClusterService clusterService; - - private ThreadPool threadPool; - - @Override - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - threadPool.shutdownNow(); - } - - public void testGetDefaultSettings() { - RemoteRefreshSegmentPressureSettings pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - Settings.EMPTY, - mock(RemoteRefreshSegmentPressureService.class) - ); - - // Check remote refresh segment pressure enabled is false - assertFalse(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); - - // Check bytes lag variance threshold default value - assertEquals(10.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); - - // Check time lag variance threshold default value - assertEquals(10.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); - - // Check minimum consecutive failures limit default value - assertEquals(5, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size default value - assertEquals(20, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size default value - assertEquals(20, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size default value - assertEquals(20, pressureSettings.getUploadTimeMovingAverageWindowSize()); - } - - public void testGetConfiguredSettings() { - Settings settings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) - .put(RemoteRefreshSegmentPressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) - .put(RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) - .build(); - RemoteRefreshSegmentPressureSettings pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - settings, - mock(RemoteRefreshSegmentPressureService.class) - ); - - // Check remote refresh segment pressure enabled is true - assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); - - // Check bytes lag variance threshold configured value - assertEquals(50.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); - - // Check time lag variance threshold configured value - assertEquals(60.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); - - // Check minimum consecutive failures limit configured value - assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size configured value - assertEquals(102, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size configured value - assertEquals(103, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size configured value - assertEquals(104, pressureSettings.getUploadTimeMovingAverageWindowSize()); - } - - public void testUpdateAfterGetDefaultSettings() { - RemoteRefreshSegmentPressureSettings pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - Settings.EMPTY, - mock(RemoteRefreshSegmentPressureService.class) - ); - - Settings newSettings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) - .put(RemoteRefreshSegmentPressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) - .put(RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) - .build(); - clusterService.getClusterSettings().applySettings(newSettings); - - // Check updated remote refresh segment pressure enabled is false - assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); - - // Check bytes lag variance threshold updated - assertEquals(50.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); - - // Check time lag variance threshold updated - assertEquals(60.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); - - // Check minimum consecutive failures limit updated - assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size updated - assertEquals(102, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size updated - assertEquals(103, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size updated - assertEquals(104, pressureSettings.getUploadTimeMovingAverageWindowSize()); - } - - public void testUpdateAfterGetConfiguredSettings() { - Settings settings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) - .put(RemoteRefreshSegmentPressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) - .put(RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) - .build(); - RemoteRefreshSegmentPressureSettings pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - settings, - mock(RemoteRefreshSegmentPressureService.class) - ); - - Settings newSettings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 40.0) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 50.0) - .put(RemoteRefreshSegmentPressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 111) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 112) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 113) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 114) - .build(); - - clusterService.getClusterSettings().applySettings(newSettings); - - // Check updated remote refresh segment pressure enabled is true - assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); - - // Check bytes lag variance threshold updated - assertEquals(40.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); - - // Check time lag variance threshold updated - assertEquals(50.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); - - // Check minimum consecutive failures limit updated - assertEquals(111, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size updated - assertEquals(112, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size updated - assertEquals(113, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size updated - assertEquals(114, pressureSettings.getUploadTimeMovingAverageWindowSize()); - } - - public void testUpdateTriggeredInRemotePressureServiceOnUpdateSettings() { - - int toUpdateVal1 = 1121, toUpdateVal2 = 1123, toUpdateVal3 = 1125; - - AtomicInteger updatedUploadBytesWindowSize = new AtomicInteger(); - AtomicInteger updatedUploadBytesPerSecWindowSize = new AtomicInteger(); - AtomicInteger updatedUploadTimeWindowSize = new AtomicInteger(); - - RemoteRefreshSegmentPressureService pressureService = mock(RemoteRefreshSegmentPressureService.class); - - // Upload bytes - doAnswer(invocation -> { - updatedUploadBytesWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadBytesMovingAverageWindowSize(anyInt()); - - // Upload bytes per sec - doAnswer(invocation -> { - updatedUploadBytesPerSecWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadBytesPerSecMovingAverageWindowSize(anyInt()); - - // Upload time - doAnswer(invocation -> { - updatedUploadTimeWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadTimeMsMovingAverageWindowSize(anyInt()); - - RemoteRefreshSegmentPressureSettings pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - Settings.EMPTY, - pressureService - ); - Settings newSettings = Settings.builder() - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal1) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal2) - .put(RemoteRefreshSegmentPressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal3) - .build(); - clusterService.getClusterSettings().applySettings(newSettings); - - // Assertions - assertEquals(toUpdateVal1, pressureSettings.getUploadBytesMovingAverageWindowSize()); - assertEquals(toUpdateVal1, updatedUploadBytesWindowSize.get()); - assertEquals(toUpdateVal2, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - assertEquals(toUpdateVal2, updatedUploadBytesPerSecWindowSize.get()); - assertEquals(toUpdateVal3, pressureSettings.getUploadTimeMovingAverageWindowSize()); - assertEquals(toUpdateVal3, updatedUploadTimeWindowSize.get()); - } -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java deleted file mode 100644 index badfeb0d67c05..0000000000000 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java +++ /dev/null @@ -1,480 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.mockito.Mockito.mock; - -public class RemoteRefreshSegmentTrackerTests extends OpenSearchTestCase { - - private RemoteRefreshSegmentPressureSettings pressureSettings; - - private ClusterService clusterService; - - private ThreadPool threadPool; - - private ShardId shardId; - - private RemoteRefreshSegmentTracker pressureTracker; - - @Override - public void setUp() throws Exception { - super.setUp(); - threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); - clusterService = new ClusterService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ); - pressureSettings = new RemoteRefreshSegmentPressureSettings( - clusterService, - Settings.EMPTY, - mock(RemoteRefreshSegmentPressureService.class) - ); - shardId = new ShardId("index", "uuid", 0); - } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - threadPool.shutdownNow(); - } - - public void testGetShardId() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertEquals(shardId, pressureTracker.getShardId()); - } - - public void testUpdateLocalRefreshSeqNo() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long refreshSeqNo = 2; - pressureTracker.updateLocalRefreshSeqNo(refreshSeqNo); - assertEquals(refreshSeqNo, pressureTracker.getLocalRefreshSeqNo()); - } - - public void testUpdateRemoteRefreshSeqNo() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long refreshSeqNo = 4; - pressureTracker.updateRemoteRefreshSeqNo(refreshSeqNo); - assertEquals(refreshSeqNo, pressureTracker.getRemoteRefreshSeqNo()); - } - - public void testUpdateLocalRefreshTimeMs() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long refreshTimeMs = System.nanoTime() / 1_000_000L + randomIntBetween(10, 100); - pressureTracker.updateLocalRefreshTimeMs(refreshTimeMs); - assertEquals(refreshTimeMs, pressureTracker.getLocalRefreshTimeMs()); - } - - public void testUpdateRemoteRefreshTimeMs() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long refreshTimeMs = System.nanoTime() / 1_000_000 + randomIntBetween(10, 100); - pressureTracker.updateRemoteRefreshTimeMs(refreshTimeMs); - assertEquals(refreshTimeMs, pressureTracker.getRemoteRefreshTimeMs()); - } - - public void testComputeSeqNoLagOnUpdate() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - int localRefreshSeqNo = randomIntBetween(50, 100); - int remoteRefreshSeqNo = randomIntBetween(20, 50); - pressureTracker.updateLocalRefreshSeqNo(localRefreshSeqNo); - assertEquals(localRefreshSeqNo, pressureTracker.getRefreshSeqNoLag()); - pressureTracker.updateRemoteRefreshSeqNo(remoteRefreshSeqNo); - assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, pressureTracker.getRefreshSeqNoLag()); - } - - public void testComputeTimeLagOnUpdate() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long currentLocalRefreshTimeMs = pressureTracker.getLocalRefreshTimeMs(); - long currentTimeMs = System.nanoTime() / 1_000_000L; - long localRefreshTimeMs = currentTimeMs + randomIntBetween(100, 500); - long remoteRefreshTimeMs = currentTimeMs + randomIntBetween(50, 99); - pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); - assertEquals(localRefreshTimeMs - currentLocalRefreshTimeMs, pressureTracker.getTimeMsLag()); - pressureTracker.updateRemoteRefreshTimeMs(remoteRefreshTimeMs); - assertEquals(localRefreshTimeMs - remoteRefreshTimeMs, pressureTracker.getTimeMsLag()); - } - - public void testAddUploadBytesStarted() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesStarted(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesStarted()); - long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesStarted(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesStarted()); - } - - public void testAddUploadBytesFailed() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesFailed(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesFailed()); - long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesFailed(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesFailed()); - } - - public void testAddUploadBytesSucceeded() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesSucceeded(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesSucceeded()); - long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesSucceeded(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesSucceeded()); - } - - public void testGetInflightUploadBytes() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - long bytesStarted = randomLongBetween(10000, 100000); - long bytesSucceeded = randomLongBetween(1000, 10000); - long bytesFailed = randomLongBetween(100, 1000); - pressureTracker.addUploadBytesStarted(bytesStarted); - pressureTracker.addUploadBytesSucceeded(bytesSucceeded); - pressureTracker.addUploadBytesFailed(bytesFailed); - assertEquals(bytesStarted - bytesSucceeded - bytesFailed, pressureTracker.getInflightUploadBytes()); - } - - public void testIncrementTotalUploadsStarted() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(1, pressureTracker.getTotalUploadsStarted()); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(2, pressureTracker.getTotalUploadsStarted()); - } - - public void testIncrementTotalUploadsFailed() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(1, pressureTracker.getTotalUploadsFailed()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(2, pressureTracker.getTotalUploadsFailed()); - } - - public void testIncrementTotalUploadSucceeded() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(1, pressureTracker.getTotalUploadsSucceeded()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(2, pressureTracker.getTotalUploadsSucceeded()); - } - - public void testGetInflightUploads() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(1, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(2, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(1, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(0, pressureTracker.getInflightUploads()); - } - - public void testIncrementRejectionCount() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementRejectionCount(); - assertEquals(1, pressureTracker.getRejectionCount()); - pressureTracker.incrementRejectionCount(); - assertEquals(2, pressureTracker.getRejectionCount()); - } - - public void testGetConsecutiveFailureCount() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(1, pressureTracker.getConsecutiveFailureCount()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(2, pressureTracker.getConsecutiveFailureCount()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(0, pressureTracker.getConsecutiveFailureCount()); - } - - public void testComputeBytesLag() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - - // Create local file size map - Map<String, Long> fileSizeMap = new HashMap<>(); - fileSizeMap.put("a", 100L); - fileSizeMap.put("b", 105L); - pressureTracker.setLatestLocalFileNameLengthMap(fileSizeMap); - assertEquals(205L, pressureTracker.getBytesLag()); - - pressureTracker.addToLatestUploadedFiles("a"); - assertEquals(105L, pressureTracker.getBytesLag()); - - fileSizeMap.put("c", 115L); - pressureTracker.setLatestLocalFileNameLengthMap(fileSizeMap); - assertEquals(220L, pressureTracker.getBytesLag()); - - pressureTracker.addToLatestUploadedFiles("b"); - assertEquals(115L, pressureTracker.getBytesLag()); - - pressureTracker.addToLatestUploadedFiles("c"); - assertEquals(0L, pressureTracker.getBytesLag()); - } - - public void testIsUploadBytesAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertFalse(pressureTracker.isUploadBytesAverageReady()); - - long sum = 0; - for (int i = 1; i < 20; i++) { - pressureTracker.addUploadBytes(i); - sum += i; - assertFalse(pressureTracker.isUploadBytesAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadBytesAverage(), 0.0d); - } - - pressureTracker.addUploadBytes(20); - sum += 20; - assertTrue(pressureTracker.isUploadBytesAverageReady()); - assertEquals((double) sum / 20, pressureTracker.getUploadBytesAverage(), 0.0d); - - pressureTracker.addUploadBytes(100); - sum = sum + 100 - 1; - assertEquals((double) sum / 20, pressureTracker.getUploadBytesAverage(), 0.0d); - } - - public void testIsUploadBytesPerSecAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertFalse(pressureTracker.isUploadBytesPerSecAverageReady()); - - long sum = 0; - for (int i = 1; i < 20; i++) { - pressureTracker.addUploadBytesPerSec(i); - sum += i; - assertFalse(pressureTracker.isUploadBytesPerSecAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); - } - - pressureTracker.addUploadBytesPerSec(20); - sum += 20; - assertTrue(pressureTracker.isUploadBytesPerSecAverageReady()); - assertEquals((double) sum / 20, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); - - pressureTracker.addUploadBytesPerSec(100); - sum = sum + 100 - 1; - assertEquals((double) sum / 20, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); - } - - public void testIsUploadTimeMsAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertFalse(pressureTracker.isUploadTimeMsAverageReady()); - - long sum = 0; - for (int i = 1; i < 20; i++) { - pressureTracker.addUploadTimeMs(i); - sum += i; - assertFalse(pressureTracker.isUploadTimeMsAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadTimeMsAverage(), 0.0d); - } - - pressureTracker.addUploadTimeMs(20); - sum += 20; - assertTrue(pressureTracker.isUploadTimeMsAverageReady()); - assertEquals((double) sum / 20, pressureTracker.getUploadTimeMsAverage(), 0.0d); - - pressureTracker.addUploadTimeMs(100); - sum = sum + 100 - 1; - assertEquals((double) sum / 20, pressureTracker.getUploadTimeMsAverage(), 0.0d); - } - - /** - * Tests whether RemoteRefreshSegmentTracker.Stats object generated correctly from RemoteRefreshSegmentTracker. - * */ - public void testStatsObjectCreation() { - pressureTracker = constructTracker(); - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = pressureTracker.stats(); - assertEquals(pressureTracker.getShardId(), pressureTrackerStats.shardId); - assertEquals(pressureTracker.getTimeMsLag(), (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals(pressureTracker.getLocalRefreshSeqNo(), (int) pressureTrackerStats.localRefreshNumber); - assertEquals(pressureTracker.getRemoteRefreshSeqNo(), (int) pressureTrackerStats.remoteRefreshNumber); - assertEquals(pressureTracker.getBytesLag(), (int) pressureTrackerStats.bytesLag); - assertEquals(pressureTracker.getRejectionCount(), (int) pressureTrackerStats.rejectionCount); - assertEquals(pressureTracker.getConsecutiveFailureCount(), (int) pressureTrackerStats.consecutiveFailuresCount); - assertEquals(pressureTracker.getUploadBytesStarted(), (int) pressureTrackerStats.uploadBytesStarted); - assertEquals(pressureTracker.getUploadBytesSucceeded(), (int) pressureTrackerStats.uploadBytesSucceeded); - assertEquals(pressureTracker.getUploadBytesFailed(), (int) pressureTrackerStats.uploadBytesFailed); - assertEquals(pressureTracker.getUploadBytesAverage(), pressureTrackerStats.uploadBytesMovingAverage, 0); - assertEquals(pressureTracker.getUploadBytesPerSecAverage(), pressureTrackerStats.uploadBytesPerSecMovingAverage, 0); - assertEquals(pressureTracker.getUploadTimeMsAverage(), pressureTrackerStats.uploadTimeMovingAverage, 0); - assertEquals(pressureTracker.getTotalUploadsStarted(), (int) pressureTrackerStats.totalUploadsStarted); - assertEquals(pressureTracker.getTotalUploadsSucceeded(), (int) pressureTrackerStats.totalUploadsSucceeded); - assertEquals(pressureTracker.getTotalUploadsFailed(), (int) pressureTrackerStats.totalUploadsFailed); - } - - /** - * Tests whether RemoteRefreshSegmentTracker.Stats object serialize and deserialize is working fine. - * This comes into play during internode data transfer. - * */ - public void testStatsObjectCreationViaStream() throws IOException { - pressureTracker = constructTracker(); - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = pressureTracker.stats(); - try (BytesStreamOutput out = new BytesStreamOutput()) { - pressureTrackerStats.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - RemoteRefreshSegmentTracker.Stats deserializedStats = new RemoteRefreshSegmentTracker.Stats(in); - assertEquals(deserializedStats.shardId, pressureTrackerStats.shardId); - assertEquals((int) deserializedStats.refreshTimeLagMs, (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals((int) deserializedStats.localRefreshNumber, (int) pressureTrackerStats.localRefreshNumber); - assertEquals((int) deserializedStats.remoteRefreshNumber, (int) pressureTrackerStats.remoteRefreshNumber); - assertEquals((int) deserializedStats.bytesLag, (int) pressureTrackerStats.bytesLag); - assertEquals((int) deserializedStats.rejectionCount, (int) pressureTrackerStats.rejectionCount); - assertEquals((int) deserializedStats.consecutiveFailuresCount, (int) pressureTrackerStats.consecutiveFailuresCount); - assertEquals((int) deserializedStats.uploadBytesStarted, (int) pressureTrackerStats.uploadBytesStarted); - assertEquals((int) deserializedStats.uploadBytesSucceeded, (int) pressureTrackerStats.uploadBytesSucceeded); - assertEquals((int) deserializedStats.uploadBytesFailed, (int) pressureTrackerStats.uploadBytesFailed); - assertEquals((int) deserializedStats.uploadBytesMovingAverage, pressureTrackerStats.uploadBytesMovingAverage, 0); - assertEquals( - (int) deserializedStats.uploadBytesPerSecMovingAverage, - pressureTrackerStats.uploadBytesPerSecMovingAverage, - 0 - ); - assertEquals((int) deserializedStats.uploadTimeMovingAverage, pressureTrackerStats.uploadTimeMovingAverage, 0); - assertEquals((int) deserializedStats.totalUploadsStarted, (int) pressureTrackerStats.totalUploadsStarted); - assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) pressureTrackerStats.totalUploadsSucceeded); - assertEquals((int) deserializedStats.totalUploadsFailed, (int) pressureTrackerStats.totalUploadsFailed); - } - } - } - - private RemoteRefreshSegmentTracker constructTracker() { - RemoteRefreshSegmentTracker segmentPressureTracker = new RemoteRefreshSegmentTracker( - shardId, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - segmentPressureTracker.incrementTotalUploadsFailed(); - segmentPressureTracker.addUploadTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); - segmentPressureTracker.addUploadBytes(99); - segmentPressureTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); - segmentPressureTracker.incrementRejectionCount(); - return segmentPressureTracker; - } -} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java new file mode 100644 index 0000000000000..ccdd1fe4ab609 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -0,0 +1,616 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.DirectoryFileTransferTracker; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; + +public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + private ClusterService clusterService; + + private ThreadPool threadPool; + + private ShardId shardId; + + private RemoteSegmentTransferTracker transferTracker; + + private DirectoryFileTransferTracker directoryFileTransferTracker; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + shardId = new ShardId("index", "uuid", 0); + directoryFileTransferTracker = new DirectoryFileTransferTracker(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testGetShardId() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + assertEquals(shardId, transferTracker.getShardId()); + } + + public void testUpdateLocalRefreshSeqNo() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long refreshSeqNo = 2; + transferTracker.updateLocalRefreshSeqNo(refreshSeqNo); + assertEquals(refreshSeqNo, transferTracker.getLocalRefreshSeqNo()); + } + + public void testUpdateRemoteRefreshSeqNo() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long refreshSeqNo = 4; + transferTracker.updateRemoteRefreshSeqNo(refreshSeqNo); + assertEquals(refreshSeqNo, transferTracker.getRemoteRefreshSeqNo()); + } + + public void testUpdateLocalRefreshTimeMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); + transferTracker.updateLocalRefreshTimeMs(refreshTimeMs); + assertEquals(refreshTimeMs, transferTracker.getLocalRefreshTimeMs()); + } + + public void testUpdateRemoteRefreshTimeMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); + transferTracker.updateRemoteRefreshTimeMs(refreshTimeMs); + assertEquals(refreshTimeMs, transferTracker.getRemoteRefreshTimeMs()); + } + + public void testLastDownloadTimestampMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long currentTimeInMs = System.currentTimeMillis(); + transferTracker.getDirectoryFileTransferTracker().updateLastTransferTimestampMs(currentTimeInMs); + assertEquals(currentTimeInMs, transferTracker.getDirectoryFileTransferTracker().getLastTransferTimestampMs()); + } + + public void testComputeSeqNoLagOnUpdate() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + int localRefreshSeqNo = randomIntBetween(50, 100); + int remoteRefreshSeqNo = randomIntBetween(20, 50); + transferTracker.updateLocalRefreshSeqNo(localRefreshSeqNo); + assertEquals(localRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); + transferTracker.updateRemoteRefreshSeqNo(remoteRefreshSeqNo); + assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); + } + + public void testComputeTimeLagOnUpdate() throws InterruptedException { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + + // No lag if there is a remote upload corresponding to a local refresh + assertEquals(0, transferTracker.getTimeMsLag()); + + // Set a local refresh time that is higher than remote refresh time + Thread.sleep(1); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + + transferTracker.updateLatestLocalFileNameLengthMap(List.of("test"), k -> 1L); + // Sleep for 100ms and then the lag should be within 100ms +/- 20ms + Thread.sleep(100); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - 100) <= 20); + + transferTracker.updateRemoteRefreshTimeMs(transferTracker.getLocalRefreshTimeMs()); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + long random = randomIntBetween(50, 200); + Thread.sleep(random); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - random) <= 20); + } + + public void testAddUploadBytesStarted() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + transferTracker.addUploadBytesStarted(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesStarted()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.addUploadBytesStarted(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesStarted()); + } + + public void testAddUploadBytesFailed() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.addUploadBytesStarted(bytesToAdd + moreBytesToAdd); + transferTracker.addUploadBytesFailed(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesFailed()); + transferTracker.addUploadBytesFailed(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesFailed()); + } + + public void testAddUploadBytesSucceeded() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.addUploadBytesStarted(bytesToAdd + moreBytesToAdd); + transferTracker.addUploadBytesSucceeded(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesSucceeded()); + transferTracker.addUploadBytesSucceeded(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesSucceeded()); + } + + public void testAddDownloadBytesStarted() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + } + + public void testAddDownloadBytesFailed() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + } + + public void testAddDownloadBytesSucceeded() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(bytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(moreBytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + } + + public void testGetInflightUploadBytes() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long bytesStarted = randomLongBetween(10000, 100000); + long bytesSucceeded = randomLongBetween(1000, 10000); + long bytesFailed = randomLongBetween(100, 1000); + transferTracker.addUploadBytesStarted(bytesStarted); + transferTracker.addUploadBytesSucceeded(bytesSucceeded); + transferTracker.addUploadBytesFailed(bytesFailed); + assertEquals(bytesStarted - bytesSucceeded - bytesFailed, transferTracker.getInflightUploadBytes()); + } + + public void testIncrementTotalUploadsStarted() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(1, transferTracker.getTotalUploadsStarted()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(2, transferTracker.getTotalUploadsStarted()); + } + + public void testIncrementTotalUploadsFailed() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(1, transferTracker.getTotalUploadsFailed()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(2, transferTracker.getTotalUploadsFailed()); + } + + public void testIncrementTotalUploadSucceeded() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(1, transferTracker.getTotalUploadsSucceeded()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(2, transferTracker.getTotalUploadsSucceeded()); + } + + public void testGetInflightUploads() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(1, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(2, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(1, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(0, transferTracker.getInflightUploads()); + } + + public void testIncrementRejectionCount() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementRejectionCount(); + assertEquals(1, transferTracker.getRejectionCount()); + transferTracker.incrementRejectionCount(); + assertEquals(2, transferTracker.getRejectionCount()); + } + + public void testGetConsecutiveFailureCount() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(1, transferTracker.getConsecutiveFailureCount()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(2, transferTracker.getConsecutiveFailureCount()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(0, transferTracker.getConsecutiveFailureCount()); + } + + public void testComputeBytesLag() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + + // Create local file size map + Map<String, Long> fileSizeMap = new HashMap<>(); + fileSizeMap.put("a", 100L); + fileSizeMap.put("b", 105L); + transferTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); + assertEquals(205L, transferTracker.getBytesLag()); + + transferTracker.addToLatestUploadedFiles("a"); + assertEquals(105L, transferTracker.getBytesLag()); + + fileSizeMap.put("c", 115L); + transferTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); + assertEquals(220L, transferTracker.getBytesLag()); + + transferTracker.addToLatestUploadedFiles("b"); + assertEquals(115L, transferTracker.getBytesLag()); + + transferTracker.addToLatestUploadedFiles("c"); + assertEquals(0L, transferTracker.getBytesLag()); + } + + public void testisUploadBytesMovingAverageReady() { + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadBytesMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadBytesMovingAverage(i); + sum += i; + assertFalse(transferTracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadBytesMovingAverage(), 0.0d); + } + + transferTracker.updateUploadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesMovingAverage(), 0.0d); + + transferTracker.updateUploadBytesMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesMovingAverage(), 0.0d); + } + + public void testIsUploadBytesPerSecAverageReady() { + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadBytesPerSecMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadBytesPerSecMovingAverage(i); + sum += i; + assertFalse(transferTracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + transferTracker.updateUploadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); + + transferTracker.updateUploadBytesPerSecMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + public void testIsUploadTimeMsAverageReady() { + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadTimeMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadTimeMovingAverage(i); + sum += i; + assertFalse(transferTracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadTimeMovingAverage(), 0.0d); + } + + transferTracker.updateUploadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadTimeMovingAverage(), 0.0d); + + transferTracker.updateUploadTimeMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadTimeMovingAverage(), 0.0d); + } + + public void testIsDownloadBytesAverageReady() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + + long sum = 0; + for (int i = 1; i < 20; i++) { + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(i); + sum += i; + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / i, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + } + + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(20); + sum += 20; + assertTrue(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(100); + sum = sum + 100 - 1; + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + } + + public void testIsDownloadBytesPerSecAverageReady() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + + long sum = 0; + for (int i = 1; i < 20; i++) { + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(i); + sum += i; + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / i, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + } + + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(20); + sum += 20; + assertTrue(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(100); + sum = sum + 100 - 1; + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + } + + public void testAddTotalUploadTimeInMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + transferTracker.addUploadTimeInMillis(timeToAdd); + assertEquals(timeToAdd, transferTracker.getTotalUploadTimeInMillis()); + long moreTimeToAdd = randomLongBetween(100, 200); + transferTracker.addUploadTimeInMillis(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, transferTracker.getTotalUploadTimeInMillis()); + } + + public void testAddTotalTransferTimeMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + transferTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(timeToAdd); + assertEquals(timeToAdd, transferTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); + long moreTimeToAdd = randomLongBetween(100, 200); + transferTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, transferTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); + } + + /** + * Tests whether RemoteSegmentTransferTracker.Stats object generated correctly from RemoteSegmentTransferTracker. + * */ + public void testStatsObjectCreation() { + transferTracker = constructTracker(); + RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); + assertEquals(transferTracker.getShardId(), transferTrackerStats.shardId); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - transferTrackerStats.refreshTimeLagMs) <= 20); + assertEquals(transferTracker.getLocalRefreshSeqNo(), (int) transferTrackerStats.localRefreshNumber); + assertEquals(transferTracker.getRemoteRefreshSeqNo(), (int) transferTrackerStats.remoteRefreshNumber); + assertEquals(transferTracker.getBytesLag(), (int) transferTrackerStats.bytesLag); + assertEquals(transferTracker.getRejectionCount(), (int) transferTrackerStats.rejectionCount); + assertEquals(transferTracker.getConsecutiveFailureCount(), (int) transferTrackerStats.consecutiveFailuresCount); + assertEquals(transferTracker.getUploadBytesStarted(), (int) transferTrackerStats.uploadBytesStarted); + assertEquals(transferTracker.getUploadBytesSucceeded(), (int) transferTrackerStats.uploadBytesSucceeded); + assertEquals(transferTracker.getUploadBytesFailed(), (int) transferTrackerStats.uploadBytesFailed); + assertEquals(transferTracker.getUploadBytesMovingAverage(), transferTrackerStats.uploadBytesMovingAverage, 0); + assertEquals(transferTracker.getUploadBytesPerSecMovingAverage(), transferTrackerStats.uploadBytesPerSecMovingAverage, 0); + assertEquals(transferTracker.getUploadTimeMovingAverage(), transferTrackerStats.uploadTimeMovingAverage, 0); + assertEquals(transferTracker.getTotalUploadsStarted(), (int) transferTrackerStats.totalUploadsStarted); + assertEquals(transferTracker.getTotalUploadsSucceeded(), (int) transferTrackerStats.totalUploadsSucceeded); + assertEquals(transferTracker.getTotalUploadsFailed(), (int) transferTrackerStats.totalUploadsFailed); + } + + /** + * Tests whether RemoteSegmentTransferTracker.Stats object serialize and deserialize is working fine. + * This comes into play during internode data transfer. + */ + public void testStatsObjectCreationViaStream() throws IOException { + transferTracker = constructTracker(); + RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + transferTrackerStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteSegmentTransferTracker.Stats(in); + assertEquals(deserializedStats.shardId, transferTrackerStats.shardId); + assertEquals((int) deserializedStats.refreshTimeLagMs, (int) transferTrackerStats.refreshTimeLagMs); + assertEquals((int) deserializedStats.localRefreshNumber, (int) transferTrackerStats.localRefreshNumber); + assertEquals((int) deserializedStats.remoteRefreshNumber, (int) transferTrackerStats.remoteRefreshNumber); + assertEquals((int) deserializedStats.bytesLag, (int) transferTrackerStats.bytesLag); + assertEquals((int) deserializedStats.rejectionCount, (int) transferTrackerStats.rejectionCount); + assertEquals((int) deserializedStats.consecutiveFailuresCount, (int) transferTrackerStats.consecutiveFailuresCount); + assertEquals((int) deserializedStats.uploadBytesStarted, (int) transferTrackerStats.uploadBytesStarted); + assertEquals((int) deserializedStats.uploadBytesSucceeded, (int) transferTrackerStats.uploadBytesSucceeded); + assertEquals((int) deserializedStats.uploadBytesFailed, (int) transferTrackerStats.uploadBytesFailed); + assertEquals((int) deserializedStats.uploadBytesMovingAverage, transferTrackerStats.uploadBytesMovingAverage, 0); + assertEquals( + (int) deserializedStats.uploadBytesPerSecMovingAverage, + transferTrackerStats.uploadBytesPerSecMovingAverage, + 0 + ); + assertEquals((int) deserializedStats.uploadTimeMovingAverage, transferTrackerStats.uploadTimeMovingAverage, 0); + assertEquals((int) deserializedStats.totalUploadsStarted, (int) transferTrackerStats.totalUploadsStarted); + assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) transferTrackerStats.totalUploadsSucceeded); + assertEquals((int) deserializedStats.totalUploadsFailed, (int) transferTrackerStats.totalUploadsFailed); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted, + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded, + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + ); + } + } + } + + private RemoteSegmentTransferTracker constructTracker() { + RemoteSegmentTransferTracker transferTracker = new RemoteSegmentTransferTracker( + shardId, + new DirectoryFileTransferTracker(), + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + transferTracker.updateUploadTimeMovingAverage(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); + transferTracker.updateUploadBytesMovingAverage(99); + transferTracker.updateRemoteRefreshTimeMs(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); + transferTracker.incrementRejectionCount(); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(5); + return transferTracker; + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java new file mode 100644 index 0000000000000..9d00cf9f2be46 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.IntStream; + +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; + +public class RemoteStorePressureServiceTests extends OpenSearchTestCase { + + private ClusterService clusterService; + + private ThreadPool threadPool; + + private ShardId shardId; + + private RemoteStorePressureService pressureService; + + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + shardId = new ShardId("index", "uuid", 0); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testIsSegmentsUploadBackpressureEnabled() { + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); + assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); + + Settings newSettings = Settings.builder() + .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), "true") + .build(); + clusterService.getClusterSettings().applySettings(newSettings); + + assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); + } + + public void testValidateSegmentUploadLag() throws InterruptedException { + // Create the pressure tracker + IndexShard indexShard = createIndexShard(shardId, true); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + + RemoteSegmentTransferTracker pressureTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId); + pressureTracker.updateLocalRefreshSeqNo(6); + + // 1. time lag more than dynamic threshold + pressureTracker.updateRemoteRefreshSeqNo(3); + AtomicLong sum = new AtomicLong(); + IntStream.range(0, 20).forEach(i -> { + pressureTracker.updateUploadTimeMovingAverage(i); + sum.addAndGet(i); + }); + double avg = (double) sum.get() / 20; + + // We run this to ensure that the local and remote refresh time are not same anymore + while (pressureTracker.getLocalRefreshTimeMs() == currentTimeMsUsingSystemNanos()) { + Thread.sleep(10); + } + long localRefreshTimeMs = currentTimeMsUsingSystemNanos(); + pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); + + while (currentTimeMsUsingSystemNanos() - localRefreshTimeMs <= 20 * avg) { + Thread.sleep((long) (4 * avg)); + } + pressureTracker.updateLatestLocalFileNameLengthMap(List.of("test"), k -> 1L); + Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + String regex = "^rejected execution on primary shard:\\[index]\\[0] due to remote segments lagging behind " + + "local segments.time_lag:[0-9]{2,3} ms dynamic_time_lag_threshold:95\\.0 ms$"; + Pattern pattern = Pattern.compile(regex); + Matcher matcher = pattern.matcher(e.getMessage()); + assertTrue(matcher.matches()); + + pressureTracker.updateRemoteRefreshTimeMs(pressureTracker.getLocalRefreshTimeMs()); + pressureTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + Thread.sleep((long) (2 * avg)); + pressureService.validateSegmentsUploadLag(shardId); + + // 2. bytes lag more than dynamic threshold + sum.set(0); + IntStream.range(0, 20).forEach(i -> { + pressureTracker.updateUploadBytesMovingAverage(i); + sum.addAndGet(i); + }); + avg = (double) sum.get() / 20; + Map<String, Long> nameSizeMap = new HashMap<>(); + nameSizeMap.put("a", (long) (12 * avg)); + pressureTracker.updateLatestLocalFileNameLengthMap(nameSizeMap.keySet(), nameSizeMap::get); + e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); + assertTrue(e.getMessage().contains("bytes_lag:114 dynamic_bytes_lag_threshold:95.0")); + + nameSizeMap.clear(); + nameSizeMap.put("b", (long) (2 * avg)); + pressureTracker.updateLatestLocalFileNameLengthMap(nameSizeMap.keySet(), nameSizeMap::get); + pressureService.validateSegmentsUploadLag(shardId); + + // 3. Consecutive failures more than the limit + IntStream.range(0, 5).forEach(ignore -> pressureTracker.incrementTotalUploadsStarted()); + IntStream.range(0, 5).forEach(ignore -> pressureTracker.incrementTotalUploadsFailed()); + pressureService.validateSegmentsUploadLag(shardId); + pressureTracker.incrementTotalUploadsStarted(); + pressureTracker.incrementTotalUploadsFailed(); + e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); + assertTrue(e.getMessage().contains("failure_streak_count:6 min_consecutive_failure_threshold:5")); + pressureTracker.incrementTotalUploadsStarted(); + pressureTracker.incrementTotalUploadsSucceeded(); + pressureService.validateSegmentsUploadLag(shardId); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java new file mode 100644 index 0000000000000..064c6c10eba02 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import static org.mockito.Mockito.mock; + +public class RemoteStorePressureSettingsTests extends OpenSearchTestCase { + + private ClusterService clusterService; + + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("remote_refresh_segment_pressure_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testGetDefaultSettings() { + RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( + clusterService, + Settings.EMPTY, + mock(RemoteStorePressureService.class) + ); + + // Check remote refresh segment pressure enabled is false + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + + // Check bytes lag variance threshold default value + assertEquals(10.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); + + // Check time lag variance threshold default value + assertEquals(10.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); + + // Check minimum consecutive failures limit default value + assertEquals(5, pressureSettings.getMinConsecutiveFailuresLimit()); + } + + public void testGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) + .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) + .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) + .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) + .build(); + RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( + clusterService, + settings, + mock(RemoteStorePressureService.class) + ); + + // Check remote refresh segment pressure enabled is true + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + + // Check bytes lag variance threshold configured value + assertEquals(50.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); + + // Check time lag variance threshold configured value + assertEquals(60.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); + + // Check minimum consecutive failures limit configured value + assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); + } + + public void testUpdateAfterGetDefaultSettings() { + RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( + clusterService, + Settings.EMPTY, + mock(RemoteStorePressureService.class) + ); + + Settings newSettings = Settings.builder() + .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) + .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) + .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) + .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) + .build(); + clusterService.getClusterSettings().applySettings(newSettings); + + // Check updated remote refresh segment pressure enabled is false + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + + // Check bytes lag variance threshold updated + assertEquals(50.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); + + // Check time lag variance threshold updated + assertEquals(60.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); + + // Check minimum consecutive failures limit updated + assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); + } + + public void testUpdateAfterGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), true) + .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) + .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) + .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) + .build(); + RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( + clusterService, + settings, + mock(RemoteStorePressureService.class) + ); + + Settings newSettings = Settings.builder() + .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 40.0) + .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 50.0) + .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 111) + .build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + // Check updated remote refresh segment pressure enabled is true + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + + // Check bytes lag variance threshold updated + assertEquals(40.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); + + // Check time lag variance threshold updated + assertEquals(50.0, pressureSettings.getUploadTimeLagVarianceFactor(), 0.0d); + + // Check minimum consecutive failures limit updated + assertEquals(111, pressureSettings.getMinConsecutiveFailuresLimit()); + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java new file mode 100644 index 0000000000000..c300f316ac633 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; + +public class RemoteStoreStatsTrackerFactoryTests extends OpenSearchTestCase { + private ThreadPool threadPool; + private ClusterService clusterService; + private Settings settings; + private ShardId shardId; + private IndexShard indexShard; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + + @Override + public void setUp() throws Exception { + super.setUp(); + shardId = new ShardId("index", "uuid", 0); + indexShard = createIndexShard(shardId, true); + threadPool = new TestThreadPool(getTestName()); + settings = Settings.builder() + .put( + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE + ) + .build(); + clusterService = new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testAfterIndexShardCreatedForRemoteBackedIndex() { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNotNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId())); + } + + public void testAfterIndexShardCreatedForNonRemoteBackedIndex() { + indexShard = createIndexShard(shardId, false); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId())); + } + + public void testAfterIndexShardClosed() { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNotNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId)); + remoteStoreStatsTrackerFactory.afterIndexShardClosed(shardId, indexShard, indexShard.indexSettings().getSettings()); + assertNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId)); + } + + public void testGetConfiguredSettings() { + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + } + + public void testInvalidMovingAverageWindowSize() { + Settings settings = Settings.builder() + .put( + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE - 1 + ) + .build(); + assertThrows( + "Failed to parse value", + IllegalArgumentException.class, + () -> new RemoteStoreStatsTrackerFactory( + new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + settings + ) + ); + } + + public void testUpdateAfterGetConfiguredSettings() { + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + + Settings newSettings = Settings.builder().put(RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102).build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + // Check moving average window size updated + assertEquals(102, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize()); + } + + public void testGetDefaultSettings() { + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory( + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + Settings.EMPTY + ); + // Check moving average window size updated + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java new file mode 100644 index 0000000000000..e072d3037caad --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.Store; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Helper functions for Remote Store tests + */ +public class RemoteStoreTestsHelper { + static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)) + .build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + Store store = mock(Store.class); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.indexSettings()).thenReturn(indexSettings); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.store()).thenReturn(store); + return indexShard; + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 9afa75dd601b2..d3c7d754d6b61 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,10 +8,85 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; + public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + + private final String oldMetadataFilename = getOldSegmentMetadataFilename(12, 23, 34, 1, 1); + + /* + Gives segment metadata filename for <2.11 version + */ + public static String getOldSegmentMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion + ) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(translogGeneration), + RemoteStoreUtils.invertLong(uploadCounter), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + + public static String getOldTranslogMetadataFilename(long primaryTerm, long generation, int metadataVersion) { + return String.join( + METADATA_SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + public void testInvertToStrInvalid() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); } @@ -60,4 +135,48 @@ public void testGetSegmentNameUnderscoreDelimiterOverrides() { public void testGetSegmentNameException() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.getSegmentName("dvd")); } + + public void testVerifyMultipleWriters_Segment() { + List<String> mdFiles = new ArrayList<>(); + mdFiles.add(metadataFilename); + mdFiles.add(metadataFilename2); + mdFiles.add(oldMetadataFilename); + verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + + mdFiles.add(metadataFilenameDup); + assertThrows( + IllegalStateException.class, + () -> verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen) + ); + } + + public void testVerifyMultipleWriters_Translog() throws InterruptedException { + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename2 = tm2.getFileName(); + List<BlobMetadata> bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + bmList.add(new PlainBlobMetadata(getOldTranslogMetadataFilename(1, 1, 1), 1)); + RemoteStoreUtils.verifyNoMultipleWriters( + bmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); + + bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + bmList.add(new PlainBlobMetadata(tm3.getFileName(), 1)); + List<BlobMetadata> finalBmList = bmList; + assertThrows( + IllegalStateException.class, + () -> RemoteStoreUtils.verifyNoMultipleWriters( + finalBmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java new file mode 100644 index 0000000000000..6b6d388f725f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java @@ -0,0 +1,383 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class RemoteTranslogTransferTrackerTests extends OpenSearchTestCase { + private ShardId shardId; + private RemoteTranslogTransferTracker tracker; + + @Override + public void setUp() throws Exception { + super.setUp(); + shardId = new ShardId("index", "uuid", 0); + } + + @Before + public void initTracker() { + tracker = new RemoteTranslogTransferTracker(shardId, RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE); + } + + public void testGetShardId() { + assertEquals(shardId, tracker.getShardId()); + } + + public void testAddUploadsStarted() { + populateUploadsStarted(); + } + + public void testAddUploadsFailed() { + populateUploadsStarted(); + assertEquals(0L, tracker.getTotalUploadsFailed()); + tracker.incrementTotalUploadsFailed(); + assertEquals(1L, tracker.getTotalUploadsFailed()); + tracker.incrementTotalUploadsFailed(); + assertEquals(2L, tracker.getTotalUploadsFailed()); + } + + public void testInvalidAddUploadsFailed() { + populateUploadsStarted(); + for (int i = 0; i < tracker.getTotalUploadsStarted(); i++) { + tracker.incrementTotalUploadsSucceeded(); + } + + AssertionError error = assertThrows(AssertionError.class, () -> tracker.incrementTotalUploadsFailed()); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadsSucceeded() { + populateUploadsStarted(); + assertEquals(0L, tracker.getTotalUploadsSucceeded()); + tracker.incrementTotalUploadsSucceeded(); + assertEquals(1L, tracker.getTotalUploadsSucceeded()); + tracker.incrementTotalUploadsSucceeded(); + assertEquals(2L, tracker.getTotalUploadsSucceeded()); + } + + public void testInvalidAddUploadsSucceeded() { + populateUploadsStarted(); + for (int i = 0; i < tracker.getTotalUploadsStarted(); i++) { + tracker.incrementTotalUploadsFailed(); + } + + AssertionError error = assertThrows(AssertionError.class, () -> tracker.incrementTotalUploadsSucceeded()); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadBytesStarted() { + populateUploadBytesStarted(); + } + + public void testAddUploadBytesFailed() { + populateUploadBytesStarted(); + assertEquals(0L, tracker.getUploadBytesFailed()); + long count1 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesFailed(count1); + assertEquals(count1, tracker.getUploadBytesFailed()); + long count2 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesFailed(count2); + assertEquals(count1 + count2, tracker.getUploadBytesFailed()); + } + + public void testInvalidAddUploadBytesFailed() { + populateUploadBytesStarted(); + tracker.addUploadBytesSucceeded(tracker.getUploadBytesStarted()); + AssertionError error = assertThrows(AssertionError.class, () -> tracker.addUploadBytesFailed(1L)); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadBytesSucceeded() { + populateUploadBytesStarted(); + assertEquals(0L, tracker.getUploadBytesSucceeded()); + long count1 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesSucceeded(count1); + assertEquals(count1, tracker.getUploadBytesSucceeded()); + long count2 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesSucceeded(count2); + assertEquals(count1 + count2, tracker.getUploadBytesSucceeded()); + } + + public void testInvalidAddUploadBytesSucceeded() { + populateUploadBytesStarted(); + tracker.addUploadBytesFailed(tracker.getUploadBytesStarted()); + AssertionError error = assertThrows(AssertionError.class, () -> tracker.addUploadBytesSucceeded(1L)); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadTimeInMillis() { + assertEquals(0L, tracker.getTotalUploadTimeInMillis()); + int duration1 = randomIntBetween(10, 50); + tracker.addUploadTimeInMillis(duration1); + assertEquals(duration1, tracker.getTotalUploadTimeInMillis()); + int duration2 = randomIntBetween(10, 50); + tracker.addUploadTimeInMillis(duration2); + assertEquals(duration1 + duration2, tracker.getTotalUploadTimeInMillis()); + } + + public void testSetLastSuccessfulUploadTimestamp() { + assertEquals(0, tracker.getLastSuccessfulUploadTimestamp()); + long lastUploadTimestamp = System.currentTimeMillis() + randomIntBetween(10, 100); + tracker.setLastSuccessfulUploadTimestamp(lastUploadTimestamp); + assertEquals(lastUploadTimestamp, tracker.getLastSuccessfulUploadTimestamp()); + } + + public void testUpdateUploadBytesMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadBytesMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadBytesMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadBytesMovingAverage(), 0.0d); + } + + tracker.updateUploadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesMovingAverage(), 0.0d); + + tracker.updateUploadBytesMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesMovingAverage(), 0.0d); + } + + public void testUpdateUploadBytesPerSecMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadBytesPerSecMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadBytesPerSecMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + tracker.updateUploadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + + tracker.updateUploadBytesPerSecMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + public void testUpdateUploadTimeMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadTimeMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadTimeMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadTimeMovingAverage(), 0.0d); + } + + tracker.updateUploadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadTimeMovingAverage(), 0.0d); + + tracker.updateUploadTimeMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadTimeMovingAverage(), 0.0d); + } + + public void testAddDownloadsSucceeded() { + assertEquals(0L, tracker.getTotalDownloadsSucceeded()); + tracker.incrementDownloadsSucceeded(); + assertEquals(1L, tracker.getTotalDownloadsSucceeded()); + tracker.incrementDownloadsSucceeded(); + assertEquals(2L, tracker.getTotalDownloadsSucceeded()); + } + + public void testAddDownloadBytesSucceeded() { + assertEquals(0L, tracker.getDownloadBytesSucceeded()); + long count1 = randomIntBetween(1, 500); + tracker.addDownloadBytesSucceeded(count1); + assertEquals(count1, tracker.getDownloadBytesSucceeded()); + long count2 = randomIntBetween(1, 500); + tracker.addDownloadBytesSucceeded(count2); + assertEquals(count1 + count2, tracker.getDownloadBytesSucceeded()); + } + + public void testAddDownloadTimeInMillis() { + assertEquals(0L, tracker.getTotalDownloadTimeInMillis()); + int duration1 = randomIntBetween(10, 50); + tracker.addDownloadTimeInMillis(duration1); + assertEquals(duration1, tracker.getTotalDownloadTimeInMillis()); + int duration2 = randomIntBetween(10, 50); + tracker.addDownloadTimeInMillis(duration2); + assertEquals(duration1 + duration2, tracker.getTotalDownloadTimeInMillis()); + } + + public void testSetLastSuccessfulDownloadTimestamp() { + assertEquals(0, tracker.getLastSuccessfulDownloadTimestamp()); + long lastSuccessfulDownloadTimestamp = System.currentTimeMillis() + randomIntBetween(10, 100); + tracker.setLastSuccessfulDownloadTimestamp(lastSuccessfulDownloadTimestamp); + assertEquals(lastSuccessfulDownloadTimestamp, tracker.getLastSuccessfulDownloadTimestamp()); + } + + public void testUpdateDowmloadBytesMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadBytesMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadBytesMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadBytesMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadBytesMovingAverage(), 0.0d); + } + + tracker.updateDownloadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesMovingAverage(), 0.0d); + + tracker.updateDownloadBytesMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesMovingAverage(), 0.0d); + } + + public void testUpdateDownloadBytesPerSecMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadBytesPerSecMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadBytesPerSecMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + } + + tracker.updateDownloadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + + tracker.updateDownloadBytesPerSecMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + } + + public void testUpdateDownloadTimeMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadTimeMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadTimeMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadTimeMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadTimeMovingAverage(), 0.0d); + } + + tracker.updateDownloadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadTimeMovingAverage(), 0.0d); + + tracker.updateDownloadTimeMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadTimeMovingAverage(), 0.0d); + } + + public void testStatsObjectCreation() { + populateDummyStats(); + RemoteTranslogTransferTracker.Stats actualStats = tracker.stats(); + assertTrue(tracker.hasSameStatsAs(actualStats)); + } + + public void testStatsObjectCreationViaStream() throws IOException { + populateDummyStats(); + RemoteTranslogTransferTracker.Stats expectedStats = tracker.stats(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + expectedStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteTranslogTransferTracker.Stats deserializedStats = new RemoteTranslogTransferTracker.Stats(in); + assertTrue(tracker.hasSameStatsAs(deserializedStats)); + } + } + } + + private void populateUploadsStarted() { + assertEquals(0L, tracker.getTotalUploadsStarted()); + tracker.incrementTotalUploadsStarted(); + assertEquals(1L, tracker.getTotalUploadsStarted()); + tracker.incrementTotalUploadsStarted(); + assertEquals(2L, tracker.getTotalUploadsStarted()); + } + + private void populateUploadBytesStarted() { + assertEquals(0L, tracker.getUploadBytesStarted()); + long count1 = randomIntBetween(500, 1000); + tracker.addUploadBytesStarted(count1); + assertEquals(count1, tracker.getUploadBytesStarted()); + long count2 = randomIntBetween(500, 1000); + tracker.addUploadBytesStarted(count2); + assertEquals(count1 + count2, tracker.getUploadBytesStarted()); + } + + private void populateDummyStats() { + int startedBytesUpload = randomIntBetween(10, 100); + tracker.addUploadBytesStarted(startedBytesUpload); + tracker.addUploadBytesFailed(randomIntBetween(1, startedBytesUpload / 2)); + tracker.addUploadBytesSucceeded(randomIntBetween(1, startedBytesUpload / 2)); + + tracker.addUploadTimeInMillis(randomIntBetween(10, 100)); + tracker.setLastSuccessfulUploadTimestamp(System.currentTimeMillis() + randomIntBetween(10, 100)); + + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsFailed(); + tracker.incrementTotalUploadsSucceeded(); + + tracker.addDownloadBytesSucceeded(randomIntBetween(10, 100)); + tracker.addDownloadTimeInMillis(randomIntBetween(10, 100)); + tracker.setLastSuccessfulDownloadTimestamp(System.currentTimeMillis() + randomIntBetween(10, 100)); + tracker.incrementDownloadsSucceeded(); + } +} diff --git a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java index d262b5abec0f3..33e08a482b9c3 100644 --- a/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/IndexLevelReplicationTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.bulk.BulkItemResponse; import org.opensearch.action.bulk.BulkShardRequest; @@ -48,7 +47,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.iterable.Iterables; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineFactory; @@ -173,7 +173,7 @@ public void cleanFiles( public void testRetryAppendOnlyAfterRecovering() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); - final IndexRequest originalRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); + final IndexRequest originalRequest = new IndexRequest(index.getName()).source("{}", MediaTypeRegistry.JSON); originalRequest.process(Version.CURRENT, null, index.getName()); final IndexRequest retryRequest = copyIndexRequest(originalRequest); retryRequest.onRetry(); @@ -214,7 +214,7 @@ public IndexResult index(Index op) throws IOException { }) { shards.startAll(); Thread thread = new Thread(() -> { - IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", MediaTypeRegistry.JSON); try { shards.index(indexRequest); } catch (Exception e) { @@ -244,7 +244,7 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener<Vo public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); - final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", MediaTypeRegistry.JSON); indexRequest.onRetry(); // force an update of the timestamp final BulkItemResponse response = shards.index(indexRequest); assertEquals(DocWriteResponse.Result.CREATED, response.getResponse().getResult()); @@ -320,7 +320,7 @@ public void testConflictingOpsOnReplica() throws Exception { shards.startAll(); List<IndexShard> replicas = shards.getReplicas(); IndexShard replica1 = replicas.get(0); - IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", MediaTypeRegistry.JSON); logger.info("--> isolated replica " + replica1.routingEntry()); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary()); for (int i = 1; i < replicas.size(); i++) { @@ -329,7 +329,7 @@ public void testConflictingOpsOnReplica() throws Exception { logger.info("--> promoting replica to primary " + replica1.routingEntry()); shards.promoteReplicaToPrimary(replica1).get(); - indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"2\"}", XContentType.JSON); + indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"2\"}", MediaTypeRegistry.JSON); shards.index(indexRequest); shards.refresh("test"); for (IndexShard shard : shards) { @@ -356,7 +356,7 @@ public void testReplicaTermIncrementWithConcurrentPrimaryPromotion() throws Exce assertEquals(primaryPrimaryTerm, replica2.getPendingPrimaryTerm()); - IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", MediaTypeRegistry.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, replica1); CyclicBarrier barrier = new CyclicBarrier(2); @@ -396,7 +396,7 @@ public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exceptio try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetadata(1, mappings))) { shards.startAll(); long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm(); - IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", XContentType.JSON); + IndexRequest indexRequest = new IndexRequest(index.getName()).id("1").source("{ \"f\": \"1\"}", MediaTypeRegistry.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary()); List<IndexShard> replicas = shards.getReplicas(); @@ -476,7 +476,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { shards.startPrimary(); long primaryTerm = shards.getPrimary().getPendingPrimaryTerm(); List<Translog.Operation> expectedTranslogOps = new ArrayList<>(); - BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON)); + BulkItemResponse indexResp = shards.index(new IndexRequest(index.getName()).id("1").source("{}", MediaTypeRegistry.JSON)); assertThat(indexResp.isFailed(), equalTo(true)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); expectedTranslogOps.add(new Translog.NoOp(0, primaryTerm, indexException.toString())); @@ -504,7 +504,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { } } // the failure replicated directly from the replication channel. - indexResp = shards.index(new IndexRequest(index.getName()).id("any").source("{}", XContentType.JSON)); + indexResp = shards.index(new IndexRequest(index.getName()).id("any").source("{}", MediaTypeRegistry.JSON)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); Translog.NoOp noop2 = new Translog.NoOp(1, primaryTerm, indexException.toString()); expectedTranslogOps.add(noop2); @@ -531,7 +531,9 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { public void testRequestFailureReplication() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); - BulkItemResponse response = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON).version(2)); + BulkItemResponse response = shards.index( + new IndexRequest(index.getName()).id("1").source("{}", MediaTypeRegistry.JSON).version(2) + ); assertTrue(response.isFailed()); assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); @@ -549,7 +551,7 @@ public void testRequestFailureReplication() throws Exception { shards.addReplica(); } shards.startReplicas(nReplica); - response = shards.index(new IndexRequest(index.getName()).id("1").source("{}", XContentType.JSON).version(2)); + response = shards.index(new IndexRequest(index.getName()).id("1").source("{}", MediaTypeRegistry.JSON).version(2)); assertTrue(response.isFailed()); assertThat(response.getFailure().getCause(), instanceOf(VersionConflictEngineException.class)); shards.assertAllEqual(0); @@ -582,7 +584,7 @@ public void testSeqNoCollision() throws Exception { shards.syncGlobalCheckpoint(); logger.info("--> Isolate replica1"); - IndexRequest indexDoc1 = new IndexRequest(index.getName()).id("d1").source("{}", XContentType.JSON); + IndexRequest indexDoc1 = new IndexRequest(index.getName()).id("d1").source("{}", MediaTypeRegistry.JSON); BulkShardRequest replicationRequest = indexOnPrimary(indexDoc1, shards.getPrimary()); indexOnReplica(replicationRequest, shards, replica2); @@ -602,7 +604,7 @@ public void testSeqNoCollision() throws Exception { } logger.info("--> Promote replica1 as the primary"); shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed. - shards.index(new IndexRequest(index.getName()).id("d2").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("d2").source("{}", MediaTypeRegistry.JSON)); final Translog.Operation op2; try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(1)); @@ -652,7 +654,7 @@ public void testLateDeliveryAfterGCTriggeredOnReplica() throws Exception { updateGCDeleteCycle(replica, gcInterval); final BulkShardRequest indexRequest = indexOnPrimary( - new IndexRequest(index.getName()).id("d1").source("{}", XContentType.JSON), + new IndexRequest(index.getName()).id("d1").source("{}", MediaTypeRegistry.JSON), primary ); final BulkShardRequest deleteRequest = deleteOnPrimary(new DeleteRequest(index.getName()).id("d1"), primary); @@ -689,7 +691,7 @@ public void testOutOfOrderDeliveryForAppendOnlyOperations() throws Exception { final IndexShard replica = shards.getReplicas().get(0); // Append-only request - without id final BulkShardRequest indexRequest = indexOnPrimary( - new IndexRequest(index.getName()).id(null).source("{}", XContentType.JSON), + new IndexRequest(index.getName()).id(null).source("{}", MediaTypeRegistry.JSON), primary ); final String docId = Iterables.get(getShardDocUIDs(primary), 0); @@ -709,7 +711,7 @@ public void testIndexingOptimizationUsingSequenceNumbers() throws Exception { for (int i = 0; i < numDocs; i++) { String id = Integer.toString(randomIntBetween(1, 100)); if (randomBoolean()) { - group.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); + group.index(new IndexRequest(index.getName()).id(id).source("{}", MediaTypeRegistry.JSON)); if (liveDocs.add(id) == false) { versionLookups++; } diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index b61150a9a81e2..17b5440ab5424 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.bulk.BulkShardRequest; @@ -46,11 +45,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -141,7 +141,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { shards.startAll(); final int docs = randomIntBetween(0, 16); for (int i = 0; i < docs; i++) { - shards.index(new IndexRequest("index").id(Integer.toString(i)).source("{}", XContentType.JSON)); + shards.index(new IndexRequest("index").id(Integer.toString(i)).source("{}", MediaTypeRegistry.JSON)); } shards.flush(); @@ -158,7 +158,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { 1, randomNonNegativeLong(), false, - new SourceToParse("index", "replica", new BytesArray("{}"), XContentType.JSON) + new SourceToParse("index", "replica", new BytesArray("{}"), MediaTypeRegistry.JSON) ); shards.promoteReplicaToPrimary(promotedReplica).get(); oldPrimary.close("demoted", randomBoolean(), false); @@ -172,7 +172,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { promotedReplica.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse("index", "primary", new BytesArray("{}"), XContentType.JSON), + new SourceToParse("index", "primary", new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -209,7 +209,8 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { final int rollbackDocs = randomIntBetween(1, 5); logger.info("--> indexing {} rollback docs", rollbackDocs); for (int i = 0; i < rollbackDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName()).id("rollback_" + i).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("rollback_" + i) + .source("{}", MediaTypeRegistry.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, replica); } @@ -327,7 +328,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { int staleDocs = scaledRandomIntBetween(1, 10); logger.info("--> indexing {} stale docs", staleDocs); for (int i = 0; i < staleDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName()).id("stale_" + i).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("stale_" + i).source("{}", MediaTypeRegistry.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, replica); } @@ -364,7 +365,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception { for (int i = 0; i < initialDocs; i++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id("initial_doc_" + i) - .source("{ \"f\": \"normal\"}", XContentType.JSON); + .source("{ \"f\": \"normal\"}", MediaTypeRegistry.JSON); shards.index(indexRequest); } @@ -382,7 +383,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception { logger.info("--> indexing {} extra docs", extraDocs); for (int i = 0; i < extraDocs; i++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_doc_" + i) - .source("{ \"f\": \"normal\"}", XContentType.JSON); + .source("{ \"f\": \"normal\"}", MediaTypeRegistry.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); indexOnReplica(bulkShardRequest, shards, newPrimary); } @@ -391,7 +392,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception { logger.info("--> indexing {} extra docs to be trimmed", extraDocsToBeTrimmed); for (int i = 0; i < extraDocsToBeTrimmed; i++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_trimmed_" + i) - .source("{ \"f\": \"trimmed\"}", XContentType.JSON); + .source("{ \"f\": \"trimmed\"}", MediaTypeRegistry.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); // have to replicate to another replica != newPrimary one - the subject to trim indexOnReplica(bulkShardRequest, shards, justReplica); @@ -459,7 +460,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { final String id = "pending_" + i; threadPool.generic().submit(() -> { try { - shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", MediaTypeRegistry.JSON)); } catch (Exception e) { throw new AssertionError(e); } finally { @@ -550,7 +551,7 @@ public void indexTranslogOperations( replicaEngineFactory.latchIndexers(1); threadPool.generic().submit(() -> { try { - shards.index(new IndexRequest(index.getName()).id("pending").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("pending").source("{}", MediaTypeRegistry.JSON)); } catch (final Exception e) { throw new RuntimeException(e); } finally { @@ -562,7 +563,7 @@ public void indexTranslogOperations( replicaEngineFactory.awaitIndexersLatch(); // unblock indexing for the next doc replicaEngineFactory.allowIndexing(); - shards.index(new IndexRequest(index.getName()).id("completed").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("completed").source("{}", MediaTypeRegistry.JSON)); pendingDocActiveWithExtraDocIndexed.countDown(); } catch (final Exception e) { throw new AssertionError(e); @@ -602,7 +603,7 @@ public void indexTranslogOperations( // wait for the translog phase to complete and the recovery to block global checkpoint advancement assertBusy(() -> assertTrue(shards.getPrimary().pendingInSync())); { - shards.index(new IndexRequest(index.getName()).id("last").source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id("last").source("{}", MediaTypeRegistry.JSON)); final long expectedDocs = docs + 3L; assertThat(shards.getPrimary().getLocalCheckpoint(), equalTo(expectedDocs - 1)); // recovery is now in the process of being completed, therefore the global checkpoint can not have advanced on the primary @@ -637,7 +638,7 @@ public void testTransferMaxSeenAutoIdTimestampOnResync() throws Exception { long maxTimestampOnReplica2 = -1; List<IndexRequest> replicationRequests = new ArrayList<>(); for (int numDocs = between(1, 10), i = 0; i < numDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", MediaTypeRegistry.JSON); indexRequest.process(Version.CURRENT, null, index.getName()); final IndexRequest copyRequest; if (randomBoolean()) { @@ -695,10 +696,10 @@ public void testAddNewReplicas() throws Exception { int nextId = docId.incrementAndGet(); if (appendOnly) { String id = randomBoolean() ? Integer.toString(nextId) : null; - shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", MediaTypeRegistry.JSON)); } else if (frequently()) { String id = Integer.toString(frequently() ? nextId : between(0, nextId)); - shards.index(new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(id).source("{}", MediaTypeRegistry.JSON)); } else { String id = Integer.toString(between(0, nextId)); shards.delete(new DeleteRequest(index.getName()).id(id)); @@ -736,7 +737,7 @@ public void testRollbackOnPromotion() throws Exception { int inFlightOps = scaledRandomIntBetween(10, 200); for (int i = 0; i < inFlightOps; i++) { String id = "extra-" + i; - IndexRequest primaryRequest = new IndexRequest(index.getName()).id(id).source("{}", XContentType.JSON); + IndexRequest primaryRequest = new IndexRequest(index.getName()).id(id).source("{}", MediaTypeRegistry.JSON); BulkShardRequest replicationRequest = indexOnPrimary(primaryRequest, shards.getPrimary()); for (IndexShard replica : shards.getReplicas()) { if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java index b4d21a360bc9c..8c59e92a3fe8a 100644 --- a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java @@ -32,19 +32,19 @@ package org.opensearch.index.replication; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Randomness; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseSyncAction; import org.opensearch.index.seqno.RetentionLeaseUtils; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.VersionUtils; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java index 48d04e2aaa3a6..0f173e1ac171e 100644 --- a/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/opensearch/index/search/MultiMatchQueryTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.search; -import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; @@ -44,8 +43,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Settings; @@ -58,8 +57,8 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.search.MultiMatchQuery.FieldAndBoost; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.MockKeywordPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.junit.Before; import java.io.IOException; @@ -285,31 +284,30 @@ public void testKeywordSplitQueriesOnWhitespace() throws IOException { .build() ); MapperService mapperService = indexService.mapperService(); - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field") - .field("type", "keyword") - .endObject() - .startObject("field_normalizer") - .field("type", "keyword") - .field("normalizer", "my_lowercase") - .endObject() - .startObject("field_split") - .field("type", "keyword") - .field("split_queries_on_whitespace", true) - .endObject() - .startObject("field_split_normalizer") - .field("type", "keyword") - .field("normalizer", "my_lowercase") - .field("split_queries_on_whitespace", true) - .endObject() - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .startObject("field_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .endObject() + .startObject("field_split") + .field("type", "keyword") + .field("split_queries_on_whitespace", true) + .endObject() + .startObject("field_split_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .field("split_queries_on_whitespace", true) + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java index 35e4447b39622..7ffcc0fb7437a 100644 --- a/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/opensearch/index/search/NestedHelperTests.java @@ -43,8 +43,8 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NestedPathFieldMapper; diff --git a/server/src/test/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQueryTests.java b/server/src/test/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQueryTests.java index 39160b1ac49bc..45172c946ab36 100644 --- a/server/src/test/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQueryTests.java +++ b/server/src/test/java/org/opensearch/index/search/OpenSearchToParentBlockJoinQueryTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -112,7 +113,7 @@ public void testRewrite() throws IOException { ScoreMode.Avg, "nested" ); - Query rewritten = q.rewrite(new MultiReader()); + Query rewritten = q.rewrite(new IndexSearcher(new MultiReader())); assertEquals(expected, rewritten); } } diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java index 93075cefa5570..097a6827a71a1 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java @@ -33,12 +33,12 @@ package org.opensearch.index.search.geo; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.geo.RandomGeoGenerator; diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java index 11a3ca68c1370..f29b4a25c29d4 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java @@ -35,8 +35,6 @@ import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; import org.opensearch.OpenSearchParseException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; @@ -49,6 +47,10 @@ import java.io.IOException; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; @@ -57,7 +59,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; public class GeoUtilsTests extends OpenSearchTestCase { private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; diff --git a/server/src/test/java/org/opensearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/server/src/test/java/org/opensearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index 85cefbb920a5b..525c1303ab46b 100644 --- a/server/src/test/java/org/opensearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/server/src/test/java/org/opensearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -51,11 +51,11 @@ import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.lucene.search.Queries; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.fielddata.AbstractFieldDataTestCase; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.MultiValueMode; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java index f3ae66bdbf1ab..8f1a9afa243a3 100644 --- a/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/opensearch/index/search/nested/NestedSortingTests.java @@ -55,13 +55,14 @@ import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.fielddata.AbstractFieldDataTestCase; import org.opensearch.index.fielddata.IndexFieldData; @@ -75,7 +76,6 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.MultiValueMode; import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.NestedSortBuilder; diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 7d2d8e38d066e..5656b77445772 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -32,22 +32,34 @@ package org.opensearch.index.search.stats; +import org.opensearch.action.search.SearchPhase; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestOperationsListenerSupport; +import org.opensearch.action.search.SearchRequestStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; -public class SearchStatsTests extends OpenSearchTestCase { +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchStatsTests extends OpenSearchTestCase implements SearchRequestOperationsListenerSupport { // https://github.com/elastic/elasticsearch/issues/7644 public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map<String, Stats> groupStats1 = new HashMap<>(); Map<String, Stats> groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -63,12 +75,47 @@ public void testShardLevelSearchGroupStats() throws Exception { // adding again would then return wrong search stats (would return 4! instead of 3) searchStats1.add(searchStats2); assertStats(groupStats1.get("group1"), 3); + + long paramValue = randomIntBetween(2, 50); + + // Testing for request stats + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int iterator = 0; iterator < paramValue; iterator++) { + onPhaseStart(testRequestStats, ctx); + onPhaseEnd(testRequestStats, ctx); + } + } + searchStats1.setSearchRequestStats(testRequestStats); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals( + 0, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).current + ); + assertEquals( + paramValue, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).total + ); + assertThat( + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).timeInMillis, + greaterThanOrEqualTo(paramValue) + ); + } } private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getQueryCount()); assertEquals(equalTo, stats.getQueryTimeInMillis()); assertEquals(equalTo, stats.getQueryCurrent()); + assertEquals(equalTo, stats.getConcurrentQueryCount()); + assertEquals(equalTo, stats.getConcurrentQueryTimeInMillis()); + assertEquals(equalTo, stats.getConcurrentQueryCurrent()); assertEquals(equalTo, stats.getFetchCount()); assertEquals(equalTo, stats.getFetchTimeInMillis()); assertEquals(equalTo, stats.getFetchCurrent()); @@ -81,6 +128,7 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); + // avg_concurrency is not summed up across stats + assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } - } diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 4832625b315c1..8363ea3757a2b 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -40,11 +40,12 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -53,11 +54,11 @@ import java.util.Collections; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; public class GlobalCheckpointSyncActionTests extends OpenSearchTestCase { @@ -78,7 +79,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java index 4d9c73cd8cb0e..ca80c7b9c4884 100644 --- a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -39,9 +38,10 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.SafeCommitInfo; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.IndexSettingsModule; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 6c9bf879e725a..3cd60ac973709 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -32,17 +32,17 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.WriteStateException; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index bac70cddec85c..e61d27695a5e5 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -38,10 +38,10 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.SafeCommitInfo; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.engine.SafeCommitInfo; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; import java.util.Set; diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 03a6fc3df824d..7971591e82bab 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -33,7 +33,7 @@ package org.opensearch.index.seqno; import org.apache.lucene.codecs.Codec; -import org.opensearch.action.ActionListener; +import org.apache.lucene.util.Version; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.AllocationId; @@ -44,14 +44,17 @@ import org.opensearch.common.Randomness; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.SegmentReplicationShardStats; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.indices.replication.common.SegmentReplicationLagTimer; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; @@ -1293,7 +1296,10 @@ public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { assertThat(allocations.size(), equalTo(active.size() + initializing.size())); final AllocationId primaryId = active.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -1368,7 +1374,10 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { assertThat(allocations.size(), equalTo(active.size() + initializing.size())); final AllocationId primaryId = active.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -1438,7 +1447,10 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { */ public void testUpdateGlobalCheckpointOnReplicaWithRemoteTranslogEnabled() { final AllocationId active = AllocationId.newInitializing(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(active, settings); final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE - 1); tracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); @@ -1460,7 +1472,10 @@ public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Excep Set<AllocationId> initializing = new HashSet<>(initializingWithCheckpoints.keySet()); final AllocationId primaryId = active.iterator().next(); final AllocationId replicaId = initializing.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); @@ -1485,7 +1500,10 @@ public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() assigned.putAll(active); assigned.putAll(initializing); AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1515,7 +1533,10 @@ public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() logger.info("active: {}, initializing: {}", active, initializing); AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1540,7 +1561,10 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManagerWithRemoteTrans final Map<AllocationId, Long> initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map<AllocationId, Long> nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); final AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1578,7 +1602,10 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTrans if (randomBoolean()) { allocations.putAll(initializingToBeRemoved); } - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1624,7 +1651,10 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() final Set<AllocationId> initializingIds = activeAndInitializingAllocationIds.v2(); AllocationId primaryId = activeAllocationIds.iterator().next(); IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1798,34 +1828,43 @@ public void testSegmentReplicationCheckpointTracking() { initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); + final StoreFileMetadata segment_1 = new StoreFileMetadata("segment_1", 1L, "abcd", Version.LATEST); + final StoreFileMetadata segment_2 = new StoreFileMetadata("segment_2", 50L, "abcd", Version.LATEST); + final StoreFileMetadata segment_3 = new StoreFileMetadata("segment_3", 100L, "abcd", Version.LATEST); final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 1, 1, 1L, - Codec.getDefault().getName() + Codec.getDefault().getName(), + Map.of("segment_1", segment_1) ); final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 2, 2, - 50L, - Codec.getDefault().getName() + 51L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1, "segment_2", segment_2) ); final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 2, 3, - 100L, - Codec.getDefault().getName() + 151L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3) ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); tracker.setLatestReplicationCheckpoint(secondCheckpoint); + tracker.startReplicationLagTimers(secondCheckpoint); tracker.setLatestReplicationCheckpoint(thirdCheckpoint); + tracker.startReplicationLagTimers(thirdCheckpoint); final Set<String> expectedIds = ids(initializingIds); @@ -1833,7 +1872,8 @@ public void testSegmentReplicationCheckpointTracking() { assertEquals(expectedIds.size(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(3, shardStat.getCheckpointsBehindCount()); - assertEquals(100L, shardStat.getBytesBehindCount()); + assertEquals(151L, shardStat.getBytesBehindCount()); + assertTrue(shardStat.getCurrentReplicationLagMillis() >= shardStat.getCurrentReplicationTimeMillis()); } // simulate replicas moved up to date. @@ -1849,7 +1889,7 @@ public void testSegmentReplicationCheckpointTracking() { assertEquals(expectedIds.size(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(2, shardStat.getCheckpointsBehindCount()); - assertEquals(99L, shardStat.getBytesBehindCount()); + assertEquals(150L, shardStat.getBytesBehindCount()); } for (String id : expectedIds) { @@ -1867,6 +1907,86 @@ public void testSegmentReplicationCheckpointTracking() { } } + public void testSegmentReplicationCheckpointForRelocatingPrimary() { + Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 2); + final int numberOfInitializingIds = randomIntBetween(2, 2); + final Tuple<Set<AllocationId>, Set<AllocationId>> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set<AllocationId> activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set<AllocationId> initializingIds = activeAndInitializingAllocationIds.v2(); + + AllocationId targetAllocationId = initializingIds.iterator().next(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + String relocatingToNodeId = nodeIdFromAllocationId(targetAllocationId); + + logger.info("--> activeAllocationIds {} Primary {}", activeAllocationIds, primaryId.getId()); + logger.info("--> initializingIds {} Target {}", initializingIds, targetAllocationId); + + final ShardId shardId = new ShardId("test", "_na_", 0); + final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + for (final AllocationId initializingId : initializingIds) { + boolean primaryRelocationTarget = initializingId.equals(targetAllocationId); + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(initializingId), + null, + primaryRelocationTarget, + ShardRoutingState.INITIALIZING, + initializingId + ) + ); + } + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + relocatingToNodeId, + true, + ShardRoutingState.STARTED, + primaryId + ) + ); + IndexShardRoutingTable routingTable = builder.build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); + + final StoreFileMetadata segment_1 = new StoreFileMetadata("segment_1", 5L, "abcd", Version.LATEST); + final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 1, + 1, + 5L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1) + ); + tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); + + final Set<String> expectedIds = initializingIds.stream() + .filter(id -> id.equals(targetAllocationId)) + .map(AllocationId::getId) + .collect(Collectors.toSet()); + + Set<SegmentReplicationShardStats> groupStats = tracker.getSegmentReplicationStats(); + assertEquals(expectedIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(1, shardStat.getCheckpointsBehindCount()); + assertEquals(5L, shardStat.getBytesBehindCount()); + assertTrue(shardStat.getCurrentReplicationLagMillis() >= shardStat.getCurrentReplicationTimeMillis()); + } + } + public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final long initialClusterStateVersion = randomNonNegativeLong(); @@ -1906,9 +2026,11 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { 1, 1, 1L, - Codec.getDefault().getName() + Codec.getDefault().getName(), + Collections.emptyMap() ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); // we expect that the only returned ids from getSegmentReplicationStats will be the initializing ids we marked with // markAsTrackingAndInSyncQuietly. @@ -1936,7 +2058,10 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { } public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOException { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); final ShardId shardId = new ShardId("test", "_na_", 0); @@ -2115,7 +2240,10 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnabled() { final AllocationId active = AllocationId.newInitializing(); final AllocationId initializing = AllocationId.newInitializing(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(active, settings); tracker.updateFromClusterManager( randomNonNegativeLong(), @@ -2128,4 +2256,15 @@ public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnab expectThrows(IllegalStateException.class, () -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(10), randomNonNegativeLong())); } + public void testSegRepTimer() throws Throwable { + SegmentReplicationLagTimer timer = new SegmentReplicationLagTimer(); + Thread.sleep(100); + timer.start(); + Thread.sleep(100); + timer.stop(); + assertTrue("Total time since timer started should be greater than 100", timer.time() >= 100); + assertTrue("Total time since timer was created should be greater than 200", timer.totalElapsedTime() >= 200); + assertTrue("Total elapsed time should be greater than time since timer start", timer.totalElapsedTime() - timer.time() >= 100); + } + } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseActionsTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseActionsTests.java index 2a19a98c1d63a..861ef5194fc27 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseActionsTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseActionsTests.java @@ -32,16 +32,16 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsAction; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index baffd41d68dea..ed04d9a20f18e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; @@ -43,12 +42,14 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.gateway.WriteStateException; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.WriteStateException; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseStatsTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseStatsTests.java index 45bbc92c65aa5..f5fca93436962 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseStatsTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseStatsTests.java @@ -32,13 +32,13 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchSingleNodeTestCase; diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 6a87ea1088492..63a9ac2f2e8ec 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -32,7 +32,6 @@ package org.opensearch.index.seqno; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; @@ -42,13 +41,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -84,7 +85,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -123,7 +125,8 @@ public void testRetentionLeaseSyncActionOnPrimary() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -160,7 +163,8 @@ public void testRetentionLeaseSyncActionOnReplica() throws Exception { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -201,7 +205,8 @@ public void testBlocks() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertNull(action.indexBlockLevel()); @@ -231,7 +236,8 @@ private RetentionLeaseSyncAction createAction() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/seqno/SequenceNumbersTests.java b/server/src/test/java/org/opensearch/index/seqno/SequenceNumbersTests.java index e5a0ab4158cfd..8abdb3c540a6f 100644 --- a/server/src/test/java/org/opensearch/index/seqno/SequenceNumbersTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/SequenceNumbersTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.seqno; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java deleted file mode 100644 index b9df9ed5a13d8..0000000000000 --- a/server/src/test/java/org/opensearch/index/shard/CloseableRetryableRefreshListenerTests.java +++ /dev/null @@ -1,307 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.shard; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.junit.After; -import org.junit.Before; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.util.concurrent.CountDownLatch; - -public class CloseableRetryableRefreshListenerTests extends OpenSearchTestCase { - - private static final Logger logger = LogManager.getLogger(CloseableRetryableRefreshListenerTests.class); - - private ThreadPool threadPool; - - @Before - public void init() { - threadPool = new TestThreadPool(getTestName()); - } - - /** - * This tests that the performAfterRefresh method is being invoked when the afterRefresh method is invoked. We check that the countDownLatch is decreasing as intended to validate that the performAfterRefresh is being invoked. - */ - public void testPerformAfterRefresh() throws IOException { - - CountDownLatch countDownLatch = new CountDownLatch(2); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(null) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return false; - } - - @Override - public void beforeRefresh() {} - - @Override - protected Logger getLogger() { - return logger; - } - }; - - // First invocation of afterRefresh method - testRefreshListener.afterRefresh(true); - assertEquals(1, countDownLatch.getCount()); - - // Second invocation of afterRefresh method - testRefreshListener.afterRefresh(true); - assertEquals(0, countDownLatch.getCount()); - testRefreshListener.close(); - } - - /** - * This tests that close is acquiring all permits and even if the afterRefresh method is called, it is no-op. - */ - public void testCloseAfterRefresh() throws IOException { - final int initialCount = randomIntBetween(10, 100); - final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(null) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return false; - } - - @Override - public void beforeRefresh() {} - - @Override - protected Logger getLogger() { - return logger; - } - }; - - int refreshCount = randomIntBetween(1, initialCount); - for (int i = 0; i < refreshCount; i++) { - testRefreshListener.afterRefresh(true); - } - assertEquals(initialCount - refreshCount, countDownLatch.getCount()); - - // Closing the refresh listener so that no further afterRefreshes are executed going forward - testRefreshListener.close(); - - for (int i = 0; i < initialCount - refreshCount; i++) { - testRefreshListener.afterRefresh(true); - } - assertEquals(initialCount - refreshCount, countDownLatch.getCount()); - } - - /** - * This tests that the retry does not get triggered when there are missing configurations or method overrides that empowers the retry to happen. - */ - public void testNoRetry() throws IOException { - int initialCount = randomIntBetween(10, 100); - final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(null) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(true); - assertEquals(initialCount - 1, countDownLatch.getCount()); - testRefreshListener.close(); - - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(true); - assertEquals(initialCount - 2, countDownLatch.getCount()); - testRefreshListener.close(); - - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected String getRetryThreadPoolName() { - return ThreadPool.Names.REMOTE_REFRESH_RETRY; - } - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(true); - assertEquals(initialCount - 3, countDownLatch.getCount()); - testRefreshListener.close(); - - testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected TimeValue getNextRetryInterval() { - return TimeValue.timeValueMillis(100); - } - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(true); - assertEquals(initialCount - 4, countDownLatch.getCount()); - testRefreshListener.close(); - } - - /** - * This tests that retry gets scheduled and executed when the configurations and method overrides are done properly. - */ - public void testRetry() throws Exception { - int initialCount = randomIntBetween(10, 20); - final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected String getRetryThreadPoolName() { - return ThreadPool.Names.REMOTE_REFRESH_RETRY; - } - - @Override - protected TimeValue getNextRetryInterval() { - return TimeValue.timeValueMillis(100); - } - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(true); - assertBusy(() -> assertEquals(0, countDownLatch.getCount())); - testRefreshListener.close(); - } - - /** - * This tests that once close method is invoked, then even the retries would become no-op. - */ - public void testCloseWithRetryPending() throws IOException { - int initialCount = randomIntBetween(10, 20); - final CountDownLatch countDownLatch = new CountDownLatch(initialCount); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - countDownLatch.countDown(); - return countDownLatch.getCount() == 0; - } - - @Override - public void beforeRefresh() {} - - @Override - protected String getRetryThreadPoolName() { - return ThreadPool.Names.REMOTE_REFRESH_RETRY; - } - - @Override - protected TimeValue getNextRetryInterval() { - return TimeValue.timeValueMillis(100); - } - - @Override - protected Logger getLogger() { - return logger; - } - }; - testRefreshListener.afterRefresh(randomBoolean()); - testRefreshListener.close(); - assertNotEquals(0, countDownLatch.getCount()); - } - - public void testCloseWaitsForAcquiringAllPermits() throws Exception { - final CountDownLatch countDownLatch = new CountDownLatch(1); - CloseableRetryableRefreshListener testRefreshListener = new CloseableRetryableRefreshListener(threadPool) { - @Override - protected boolean performAfterRefresh(boolean didRefresh, boolean isRetry) { - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - countDownLatch.countDown(); - return false; - } - - @Override - public void beforeRefresh() {} - - @Override - protected Logger getLogger() { - return logger; - } - }; - Thread thread = new Thread(() -> { - try { - testRefreshListener.afterRefresh(randomBoolean()); - } catch (IOException e) { - throw new AssertionError(e); - } - }); - thread.start(); - assertBusy(() -> assertEquals(0, countDownLatch.getCount())); - testRefreshListener.close(); - } - - @After - public void tearDown() throws Exception { - super.tearDown(); - terminate(threadPool); - } -} diff --git a/server/src/test/java/org/opensearch/index/shard/DocsStatsTests.java b/server/src/test/java/org/opensearch/index/shard/DocsStatsTests.java index 189996d877c4f..c534f8e0a00be 100644 --- a/server/src/test/java/org/opensearch/index/shard/DocsStatsTests.java +++ b/server/src/test/java/org/opensearch/index/shard/DocsStatsTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.shard; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java b/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java index 68ec1bd9f2814..db575d7d90bbf 100644 --- a/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/GlobalCheckpointListenersTests.java @@ -34,16 +34,15 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.core.Assertions; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.Assertions; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.Scheduler; import org.junit.After; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.io.UncheckedIOException; @@ -65,6 +64,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import org.mockito.ArgumentCaptor; + import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexReaderWrapperTests.java b/server/src/test/java/org/opensearch/index/shard/IndexReaderWrapperTests.java index b9dc75adec974..4134f1bd91985 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexReaderWrapperTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexReaderWrapperTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.tests.index.FieldFilterLeafReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -46,6 +45,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.FieldFilterLeafReader; import org.opensearch.common.CheckedFunction; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.util.io.IOUtils; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java index 0296465352836..0097a615f5e91 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardOperationPermitsTests.java @@ -31,13 +31,13 @@ package org.opensearch.index.shard; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java index 3de5175bac7c5..e6297c0f239e9 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardRetentionLeaseTests.java @@ -32,12 +32,12 @@ package org.opensearch.index.shard; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.seqno.ReplicationTracker; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 915a15da6cb1d..46be10ce62840 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -36,8 +36,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.Term; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.Term; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -49,12 +49,8 @@ import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; -import org.junit.Assert; -import org.opensearch.common.io.PathUtils; -import org.opensearch.core.Assertions; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.admin.indices.stats.CommonStats; @@ -75,13 +71,13 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Randomness; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -89,10 +85,13 @@ import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.env.NodeEnvironment; @@ -106,8 +105,8 @@ import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; -import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.IndexFieldData; @@ -124,6 +123,9 @@ import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.mapper.Uid; import org.opensearch.index.mapper.VersionFieldMapper; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -136,12 +138,12 @@ import org.opensearch.index.store.StoreUtils; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.indices.IndicesQueryCache; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; @@ -158,6 +160,7 @@ import org.opensearch.test.VersionUtils; import org.opensearch.test.store.MockFSDirectoryFactory; import org.opensearch.threadpool.ThreadPool; +import org.junit.Assert; import java.io.IOException; import java.nio.charset.Charset; @@ -168,6 +171,7 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -192,9 +196,15 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; -import java.util.Collection; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.opensearch.common.lucene.Lucene.cleanLuceneIndex; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.opensearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -215,12 +225,6 @@ import static org.hamcrest.Matchers.oneOf; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; -import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; -import static org.opensearch.common.lucene.Lucene.cleanLuceneIndex; -import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -import static org.opensearch.test.hamcrest.RegexMatcher.matches; /** * Simple unit-test IndexShard related operations. @@ -1260,6 +1264,7 @@ public void testGetChangesSnapshotThrowsAssertForRemoteStore() throws IOExceptio .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build(); final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); @@ -1411,7 +1416,7 @@ public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, indexShard, indexShard.getPendingPrimaryTerm() + 1, globalCheckpoint, - randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo), + randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNoOfUpdatesOrDeletesBeforeRollback), new ActionListener<Releasable>() { @Override public void onResponse(Releasable releasable) { @@ -1704,7 +1709,7 @@ public void testShardStats() throws IOException { builder.startObject(); stats.toXContent(builder, EMPTY_PARAMS); builder.endObject(); - String xContent = Strings.toString(builder); + String xContent = builder.toString(); StringBuilder expectedSubSequence = new StringBuilder("\"shard_path\":{\"state_path\":\""); expectedSubSequence.append(shard.shardPath().getRootStatePath().toString()); expectedSubSequence.append("\",\"data_path\":\""); @@ -1775,7 +1780,7 @@ public Set<String> getPendingDeletions() throws IOException { } }; - try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory)) { + try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory, shardPath)) { IndexShard shard = newShard( shardRouting, shardPath, @@ -1811,6 +1816,34 @@ public Set<String> getPendingDeletions() throws IOException { } } + public void testShardStatsWithRemoteStoreEnabled() throws IOException { + IndexShard shard = newStartedShard( + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build() + ); + RemoteSegmentTransferTracker remoteSegmentTransferTracker = shard.getRemoteStoreStatsTrackerFactory() + .getRemoteSegmentTransferTracker(shard.shardId); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = shard.getRemoteStoreStatsTrackerFactory() + .getRemoteTranslogTransferTracker(shard.shardId); + populateSampleRemoteSegmentStats(remoteSegmentTransferTracker); + populateSampleRemoteTranslogStats(remoteTranslogTransferTracker); + ShardStats shardStats = new ShardStats( + shard.routingEntry(), + shard.shardPath(), + new CommonStats(new IndicesQueryCache(Settings.EMPTY), shard, new CommonStatsFlags()), + shard.commitStats(), + shard.seqNoStats(), + shard.getRetentionLeaseStats() + ); + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertRemoteSegmentStats(remoteSegmentTransferTracker, remoteSegmentStats); + RemoteTranslogStats remoteTranslogStats = shardStats.getStats().getTranslog().getRemoteTranslogStats(); + assertRemoteTranslogStats(remoteTranslogTransferTracker, remoteTranslogStats); + closeShards(shard); + } + public void testRefreshMetric() throws IOException { IndexShard shard = newStartedShard(); // refresh on: finalize and end of recovery @@ -2308,7 +2341,7 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId().getIndexName(), "id", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId().getIndexName(), "id", new BytesArray("{}"), MediaTypeRegistry.JSON) ); shard.applyIndexOperationOnReplica( UUID.randomUUID().toString(), @@ -2317,7 +2350,7 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { 3, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId().getIndexName(), "id-3", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId().getIndexName(), "id-3", new BytesArray("{}"), MediaTypeRegistry.JSON) ); // Flushing a new commit with local checkpoint=1 allows to skip the translog gen #1 in recovery. shard.flush(new FlushRequest().force(true).waitIfOngoing(true)); @@ -2328,7 +2361,7 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { 3, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId().getIndexName(), "id-2", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId().getIndexName(), "id-2", new BytesArray("{}"), MediaTypeRegistry.JSON) ); shard.applyIndexOperationOnReplica( UUID.randomUUID().toString(), @@ -2337,7 +2370,7 @@ public void testRecoverFromStoreWithOutOfOrderDelete() throws IOException { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId().getIndexName(), "id-5", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId().getIndexName(), "id-5", new BytesArray("{}"), MediaTypeRegistry.JSON) ); shard.sync(); // advance local checkpoint @@ -2477,7 +2510,7 @@ public void testRecoverFromStoreWithNoOps() throws IOException { // start a replica shard and index the second doc final IndexShard otherShard = newStartedShard(false); updateMappings(otherShard, shard.indexSettings().getIndexMetadata()); - SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), "1", new BytesArray("{}"), XContentType.JSON); + SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), "1", new BytesArray("{}"), MediaTypeRegistry.JSON); otherShard.applyIndexOperationOnReplica( UUID.randomUUID().toString(), 1, @@ -2613,7 +2646,7 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "doc-0", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "doc-0", new BytesArray("{}"), MediaTypeRegistry.JSON) ); flushShard(shard); shard.updateGlobalCheckpointOnReplica(0, "test"); // stick the global checkpoint here. @@ -2624,7 +2657,7 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "doc-1", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "doc-1", new BytesArray("{}"), MediaTypeRegistry.JSON) ); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); @@ -2637,7 +2670,7 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "doc-2", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "doc-2", new BytesArray("{}"), MediaTypeRegistry.JSON) ); flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1", "doc-2")); @@ -2712,6 +2745,7 @@ public void testRelocatedForRemoteTranslogBackedIndexWithAsyncDurability() throw AllocationId.newRelocation(routing.allocationId()) ); IndexShardTestCase.updateRoutingEntry(indexShard, routing); + indexDoc(indexShard, "_doc", "0"); assertTrue(indexShard.isSyncNeeded()); try { indexShard.relocated(routing.getTargetRelocatingShard().allocationId().getId(), primaryContext -> {}, () -> {}); @@ -2816,6 +2850,7 @@ public void testSyncSegmentsFromGivenRemoteSegmentStore() throws IOException { indexDoc(source, "_doc", "1"); indexDoc(source, "_doc", "2"); source.refresh("test"); + assertTrue("At lease one remote sync should have been completed", source.isRemoteSegmentStoreInSync()); assertDocs(source, "1", "2"); indexDoc(source, "_doc", "3"); source.refresh("test"); @@ -2885,13 +2920,14 @@ public void testCommitLevelRestoreShardFromRemoteStore() throws IOException { } public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOException { + String remoteStorePath = createTempDir().toString(); IndexShard target = newStartedShard( true, Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") .build(), new InternalEngineFactory() ); @@ -2956,7 +2992,6 @@ public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOExcep final PlainActionFuture<Boolean> future = PlainActionFuture.newFuture(); target.restoreFromRemoteStore(future); target.remoteStore().decRef(); - assertTrue(future.actionGet()); assertDocs(target, "1", "2"); @@ -3632,14 +3667,13 @@ public void testEstimateTotalDocSize() throws Exception { int numDoc = randomIntBetween(100, 200); for (int i = 0; i < numDoc; i++) { - String doc = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .field("count", randomInt()) - .field("point", randomFloat()) - .field("description", randomUnicodeOfCodepointLength(100)) - .endObject() - ); + String doc = XContentFactory.jsonBuilder() + .startObject() + .field("count", randomInt()) + .field("point", randomFloat()) + .field("description", randomUnicodeOfCodepointLength(100)) + .endObject() + .toString(); indexDoc(indexShard, "_doc", Integer.toString(i), doc); } @@ -3751,7 +3785,7 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept */ public void testCheckpointRefreshListener() throws IOException { final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); - IndexShard shard = newStartedShard(p -> newShard(mock), true); + IndexShard shard = newStartedShard(p -> newShard(true, mock), true); List<ReferenceManager.RefreshListener> refreshListeners = shard.getEngine().config().getInternalRefreshListener(); assertTrue(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); closeShards(shard); @@ -3761,58 +3795,13 @@ public void testCheckpointRefreshListener() throws IOException { * here we are passing null in place of SegmentReplicationCheckpointPublisher and testing on index shard if CheckpointRefreshListener is not added to the InternalrefreshListerners List */ public void testCheckpointRefreshListenerWithNull() throws IOException { - IndexShard shard = newStartedShard(p -> newShard(null), true); + final SegmentReplicationCheckpointPublisher publisher = null; + IndexShard shard = newStartedShard(p -> newShard(true, publisher), true); List<ReferenceManager.RefreshListener> refreshListeners = shard.getEngine().config().getInternalRefreshListener(); assertFalse(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); closeShards(shard); } - /** - * creates a new initializing shard. The shard will be put in its proper path under the - * current node id the shard is assigned to. - * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint - */ - private IndexShard newShard(SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { - final ShardId shardId = new ShardId("index", "_na_", 0); - final ShardRouting shardRouting = TestShardRouting.newShardRouting( - shardId, - randomAlphaOfLength(10), - true, - ShardRoutingState.INITIALIZING, - RecoverySource.EmptyStoreRecoverySource.INSTANCE - ); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); - ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) - .put(Settings.EMPTY) - .build(); - IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) - .settings(indexSettings) - .primaryTerm(0, primaryTerm) - .putMapping("{ \"properties\": {} }") - .build(); - return newShard( - shardRouting, - shardPath, - metadata, - null, - null, - new InternalEngineFactory(), - new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), - () -> {}, - RetentionLeaseSyncer.EMPTY, - EMPTY_EVENT_LISTENER, - checkpointPublisher, - null - ); - } - public void testIndexCheckOnStartup() throws Exception { final IndexShard indexShard = newStartedShard(true); @@ -4096,7 +4085,7 @@ private Result indexOnReplicaWithGaps(final IndexShard indexShard, final int ope indexShard.shardId().getIndexName(), id, new BytesArray("{}"), - XContentType.JSON + MediaTypeRegistry.JSON ); indexShard.applyIndexOperationOnReplica( UUID.randomUUID().toString(), @@ -4730,7 +4719,7 @@ public void testDoNotTrimCommitsWhenOpenReadOnlyEngine() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId.getIndexName(), Long.toString(i), new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId.getIndexName(), Long.toString(i), new BytesArray("{}"), MediaTypeRegistry.JSON) ); shard.updateGlobalCheckpointOnReplica(shard.getLocalCheckpoint(), "test"); if (randomInt(100) < 10) { @@ -4839,12 +4828,13 @@ public void testTranslogFactoryForRemoteTranslogBackedReplicaShard() throws IOEx .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "seg-test") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "txlog-test") .build(); final IndexShard replicaShard = newStartedShard(false, primarySettings, new NRTReplicationEngineFactory()); - assertEquals(replicaShard.getEngine().getClass(), InternalEngine.class); + assertEquals(replicaShard.getEngine().getClass(), NRTReplicationEngine.class); assertEquals(replicaShard.getEngine().config().getTranslogFactory().getClass(), InternalTranslogFactory.class); closeShards(replicaShard); } @@ -4917,4 +4907,47 @@ public void testRecordsForceMerges() throws IOException { assertThat(thirdForceMergeUUID, equalTo(secondForceMergeRequest.forceMergeUUID())); closeShards(shard); } + + private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker tracker) { + tracker.addUploadBytesStarted(30L); + tracker.addUploadBytesSucceeded(10L); + tracker.addUploadBytesFailed(10L); + tracker.incrementRejectionCount(); + tracker.incrementRejectionCount(); + } + + private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsSucceeded(); + tracker.incrementTotalUploadsFailed(); + int bytesStarted = randomIntBetween(100, 1000); + tracker.addUploadBytesStarted(bytesStarted); + tracker.addUploadBytesSucceeded(randomIntBetween(1, bytesStarted / 2)); + tracker.addUploadBytesFailed(randomIntBetween(1, bytesStarted / 2)); + } + + private static void assertRemoteTranslogStats( + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteTranslogStats remoteTranslogStats + ) { + assertEquals(remoteTranslogTransferTracker.getTotalUploadsStarted(), remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsSucceeded(), remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsFailed(), remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesStarted(), remoteTranslogStats.getUploadBytesStarted()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesSucceeded(), remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesFailed(), remoteTranslogStats.getUploadBytesFailed()); + } + + private static void assertRemoteSegmentStats( + RemoteSegmentTransferTracker remoteSegmentTransferTracker, + RemoteSegmentStats remoteSegmentStats + ) { + assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertTrue(remoteSegmentStats.getTotalRejections() > 0); + assertEquals(remoteSegmentTransferTracker.getRejectionCount(), remoteSegmentStats.getTotalRejections()); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexingOperationListenerTests.java b/server/src/test/java/org/opensearch/index/shard/IndexingOperationListenerTests.java index 3ba22b255a109..a62b38a6b0063 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexingOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexingOperationListenerTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.shard; import org.apache.lucene.index.Term; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngineTests; import org.opensearch.index.mapper.ParsedDocument; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java new file mode 100644 index 0000000000000..acf482552c260 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicLong; + +public class IndexingStatsTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + IndexingStats stats = createTestInstance(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + IndexingStats deserializedStats = new IndexingStats(in); + + if (stats.getTotal() == null) { + assertNull(deserializedStats.getTotal()); + return; + } + + IndexingStats.Stats totalStats = stats.getTotal(); + IndexingStats.Stats deserializedTotalStats = deserializedStats.getTotal(); + + assertEquals(totalStats.getIndexCount(), deserializedTotalStats.getIndexCount()); + assertEquals(totalStats.getIndexTime(), deserializedTotalStats.getIndexTime()); + assertEquals(totalStats.getIndexCurrent(), deserializedTotalStats.getIndexCurrent()); + assertEquals(totalStats.getIndexFailedCount(), deserializedTotalStats.getIndexFailedCount()); + assertEquals(totalStats.getDeleteCount(), deserializedTotalStats.getDeleteCount()); + assertEquals(totalStats.getDeleteTime(), deserializedTotalStats.getDeleteTime()); + assertEquals(totalStats.getDeleteCurrent(), deserializedTotalStats.getDeleteCurrent()); + assertEquals(totalStats.getNoopUpdateCount(), deserializedTotalStats.getNoopUpdateCount()); + assertEquals(totalStats.isThrottled(), deserializedTotalStats.isThrottled()); + assertEquals(totalStats.getThrottleTime(), deserializedTotalStats.getThrottleTime()); + + if (totalStats.getDocStatusStats() == null) { + assertNull(deserializedTotalStats.getDocStatusStats()); + return; + } + + IndexingStats.Stats.DocStatusStats docStatusStats = totalStats.getDocStatusStats(); + IndexingStats.Stats.DocStatusStats deserializedDocStatusStats = deserializedTotalStats.getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + deserializedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + } + } + + public void testToXContentForIndexingStats() throws IOException { + IndexingStats stats = createTestInstance(); + IndexingStats.Stats totalStats = stats.getTotal(); + AtomicLong[] counter = totalStats.getDocStatusStats().getDocStatusCounter(); + + String expected = "{\"indexing\":{\"index_total\":" + + totalStats.getIndexCount() + + ",\"index_time_in_millis\":" + + totalStats.getIndexTime().getMillis() + + ",\"index_current\":" + + totalStats.getIndexCurrent() + + ",\"index_failed\":" + + totalStats.getIndexFailedCount() + + ",\"delete_total\":" + + totalStats.getDeleteCount() + + ",\"delete_time_in_millis\":" + + totalStats.getDeleteTime().getMillis() + + ",\"delete_current\":" + + totalStats.getDeleteCurrent() + + ",\"noop_update_total\":" + + totalStats.getNoopUpdateCount() + + ",\"is_throttled\":" + + totalStats.isThrottled() + + ",\"throttle_time_in_millis\":" + + totalStats.getThrottleTime().getMillis() + + ",\"doc_status\":{\"1xx\":" + + counter[0] + + ",\"2xx\":" + + counter[1] + + ",\"3xx\":" + + counter[2] + + ",\"4xx\":" + + counter[3] + + ",\"5xx\":" + + counter[4] + + "}}}"; + + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = stats.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + + assertEquals(expected, xContentBuilder.toString()); + } + + private IndexingStats createTestInstance() { + IndexingStats.Stats.DocStatusStats docStatusStats = new IndexingStats.Stats.DocStatusStats(); + for (int i = 1; i < 6; ++i) { + docStatusStats.add(RestStatus.fromCode(i * 100), randomNonNegativeLong()); + } + + IndexingStats.Stats stats = new IndexingStats.Stats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomBoolean(), + randomNonNegativeLong(), + docStatusStats + ); + + return new IndexingStats(stats); + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/NewPathForShardTests.java b/server/src/test/java/org/opensearch/index/shard/NewPathForShardTests.java index 36b2504159a19..3f85256a1094e 100644 --- a/server/src/test/java/org/opensearch/index/shard/NewPathForShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/NewPathForShardTests.java @@ -42,8 +42,8 @@ import org.opensearch.env.NodeEnvironment.NodePath; import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java index 282f4c09b982d..b1bcaac2c1947 100644 --- a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java @@ -37,21 +37,20 @@ import org.opensearch.action.resync.ResyncReplicationResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.ByteBufferStreamInput; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.lucene.uid.Versions; -import org.opensearch.common.network.NetworkModule; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.seqno.SequenceNumbers; @@ -94,7 +93,7 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), + new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -176,7 +175,7 @@ public void testSyncerOnClosingShard() throws Exception { shard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), + new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -315,7 +314,7 @@ public void testStatusReportsCorrectNumbers() throws IOException { PrimaryReplicaSyncer.ResyncTask.Status status = task.getStatus(); XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); status.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); - String jsonString = Strings.toString(jsonBuilder); + String jsonString = jsonBuilder.toString(); assertThat(jsonString, containsString("\"phase\":\"" + task.getPhase() + "\"")); assertThat(jsonString, containsString("\"totalOperations\":" + task.getTotalOperations())); assertThat(jsonString, containsString("\"resyncedOperations\":" + task.getResyncedOperations())); diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index 1be6c07539cb7..a45b25f04060b 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -43,19 +43,20 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.metrics.MeanMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Engine; @@ -71,10 +72,9 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.DummyShardLock; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.Scheduler.Cancellable; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -133,7 +133,8 @@ public void setupListeners() throws Exception { shardId, createTempDir("translog"), indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); Engine.EventListener eventListener = new Engine.EventListener() { @Override @@ -438,7 +439,16 @@ private Engine.IndexResult index(String id, String testFieldValue) throws IOExce document.add(seqID.seqNoDocValue); document.add(seqID.primaryTerm); BytesReference source = new BytesArray(new byte[] { 1 }); - ParsedDocument doc = new ParsedDocument(versionField, seqID, id, null, Arrays.asList(document), source, XContentType.JSON, null); + ParsedDocument doc = new ParsedDocument( + versionField, + seqID, + id, + null, + Arrays.asList(document), + source, + MediaTypeRegistry.JSON, + null + ); Engine.Index index = new Engine.Index(new Term("_id", doc.id()), engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); } diff --git a/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java new file mode 100644 index 0000000000000..a0641c365a2a1 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/ReleasableRetryableRefreshListenerTests.java @@ -0,0 +1,590 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ReleasableRetryableRefreshListenerTests extends OpenSearchTestCase { + + private static final Logger logger = LogManager.getLogger(ReleasableRetryableRefreshListenerTests.class); + + private ThreadPool threadPool; + + @Before + public void init() { + threadPool = new TestThreadPool(getTestName()); + } + + /** + * This tests that the performAfterRefresh method is being invoked when the afterRefresh method is invoked. We check that the countDownLatch is decreasing as intended to validate that the performAfterRefresh is being invoked. + */ + public void testPerformAfterRefresh() throws IOException { + + CountDownLatch countDownLatch = new CountDownLatch(2); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return false; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + + // First invocation of afterRefresh method + testRefreshListener.afterRefresh(true); + assertEquals(1, countDownLatch.getCount()); + + // Second invocation of afterRefresh method + testRefreshListener.afterRefresh(true); + assertEquals(0, countDownLatch.getCount()); + testRefreshListener.drainRefreshes(); + } + + /** + * This tests that close is acquiring all permits and even if the afterRefresh method is called, it is no-op. + */ + public void testCloseAfterRefresh() throws IOException { + final int initialCount = randomIntBetween(10, 100); + final CountDownLatch countDownLatch = new CountDownLatch(initialCount); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return false; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + + int refreshCount = randomIntBetween(1, initialCount); + for (int i = 0; i < refreshCount; i++) { + testRefreshListener.afterRefresh(true); + } + assertEquals(initialCount - refreshCount, countDownLatch.getCount()); + + // Closing the refresh listener so that no further afterRefreshes are executed going forward + testRefreshListener.drainRefreshes(); + + for (int i = 0; i < initialCount - refreshCount; i++) { + testRefreshListener.afterRefresh(true); + } + assertEquals(initialCount - refreshCount, countDownLatch.getCount()); + } + + /** + * This tests that the retry does not get triggered when there are missing configurations or method overrides that empowers the retry to happen. + */ + public void testNoRetry() throws IOException { + int initialCount = randomIntBetween(10, 100); + final CountDownLatch countDownLatch = new CountDownLatch(initialCount); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + testRefreshListener.afterRefresh(true); + assertEquals(initialCount - 1, countDownLatch.getCount()); + testRefreshListener.drainRefreshes(); + + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + testRefreshListener.afterRefresh(true); + assertEquals(initialCount - 2, countDownLatch.getCount()); + testRefreshListener.drainRefreshes(); + + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected Logger getLogger() { + return logger; + } + }; + testRefreshListener.afterRefresh(true); + assertEquals(initialCount - 3, countDownLatch.getCount()); + testRefreshListener.drainRefreshes(); + + testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueMillis(100); + } + + @Override + protected Logger getLogger() { + return logger; + } + }; + testRefreshListener.afterRefresh(true); + assertEquals(initialCount - 4, countDownLatch.getCount()); + testRefreshListener.drainRefreshes(); + } + + /** + * This tests that retry gets scheduled and executed when the configurations and method overrides are done properly. + */ + public void testRetry() throws Exception { + int initialCount = randomIntBetween(10, 20); + final CountDownLatch countDownLatch = new CountDownLatch(initialCount); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueMillis(100); + } + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + protected boolean isRetryEnabled() { + return true; + } + }; + testRefreshListener.afterRefresh(true); + assertBusy(() -> assertEquals(0, countDownLatch.getCount())); + testRefreshListener.drainRefreshes(); + } + + /** + * This tests that once close method is invoked, then even the retries would become no-op. + */ + public void testCloseWithRetryPending() throws IOException { + int initialCount = randomIntBetween(10, 20); + final CountDownLatch countDownLatch = new CountDownLatch(initialCount); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + countDownLatch.countDown(); + return countDownLatch.getCount() == 0; + } + + @Override + public void beforeRefresh() {} + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueMillis(100); + } + + @Override + protected Logger getLogger() { + return logger; + } + }; + testRefreshListener.afterRefresh(randomBoolean()); + testRefreshListener.drainRefreshes(); + assertNotEquals(0, countDownLatch.getCount()); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testCloseWaitsForAcquiringAllPermits() throws Exception { + final CountDownLatch countDownLatch = new CountDownLatch(1); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + countDownLatch.countDown(); + return false; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + Thread thread = new Thread(() -> { + try { + testRefreshListener.afterRefresh(randomBoolean()); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread.start(); + assertBusy(() -> assertEquals(0, countDownLatch.getCount())); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testScheduleRetryAfterClose() throws Exception { + // This tests that once the listener has been closed, even the retries would not be scheduled. + final AtomicLong runCount = new AtomicLong(); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + runCount.incrementAndGet(); + return false; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected TimeValue getNextRetryInterval() { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return TimeValue.timeValueMillis(100); + } + }; + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + Thread thread2 = new Thread(() -> { + try { + Thread.sleep(500); + testRefreshListener.drainRefreshes(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + thread2.start(); + thread1.join(); + thread2.join(); + assertBusy(() -> assertEquals(1, runCount.get())); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testConcurrentScheduleRetry() throws Exception { + // This tests that there can be only 1 retry that can be scheduled at a time. + final AtomicLong runCount = new AtomicLong(); + final AtomicInteger retryCount = new AtomicInteger(0); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(threadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + retryCount.incrementAndGet(); + runCount.incrementAndGet(); + return retryCount.get() >= 2; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueMillis(5000); + } + + @Override + protected boolean isRetryEnabled() { + return true; + } + }; + testRefreshListener.afterRefresh(true); + testRefreshListener.afterRefresh(true); + assertBusy(() -> assertEquals(3, runCount.get())); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testExceptionDuringThreadPoolSchedule() throws Exception { + // This tests that if there are exceptions while scheduling the task in the threadpool, the retrySchedule boolean + // is reset properly to allow future scheduling to happen. + AtomicInteger runCount = new AtomicInteger(); + ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.schedule(any(), any(), any())).thenThrow(new RuntimeException()); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mockThreadPool) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + runCount.incrementAndGet(); + return false; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + protected String getRetryThreadPoolName() { + return ThreadPool.Names.REMOTE_REFRESH_RETRY; + } + + @Override + protected TimeValue getNextRetryInterval() { + return TimeValue.timeValueMillis(100); + } + + @Override + protected boolean isRetryEnabled() { + return true; + } + }; + assertThrows(RuntimeException.class, () -> testRefreshListener.afterRefresh(true)); + assertBusy(() -> assertFalse(testRefreshListener.getRetryScheduledStatus())); + assertEquals(1, runCount.get()); + testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + } + + public void testTimeoutDuringClose() throws Exception { + // This test checks the expected behaviour when the drainRefreshes times out. + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(TimeValue.timeValueSeconds(2).millis()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + TimeValue getDrainTimeout() { + return TimeValue.timeValueSeconds(1); + } + }; + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + assertBusy(() -> assertEquals(0, testRefreshListener.availablePermits())); + RuntimeException ex = assertThrows(RuntimeException.class, testRefreshListener::drainRefreshes); + assertEquals("Failed to acquire all permits", ex.getMessage()); + thread1.join(); + } + + public void testThreadInterruptDuringClose() throws Exception { + // This test checks the expected behaviour when the thread performing the drainRefresh is interrupted. + CountDownLatch latch = new CountDownLatch(2); + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + try { + Thread.sleep(TimeValue.timeValueSeconds(2).millis()); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + + @Override + TimeValue getDrainTimeout() { + return TimeValue.timeValueSeconds(2); + } + }; + Thread thread1 = new Thread(() -> { + try { + testRefreshListener.afterRefresh(true); + latch.countDown(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + Thread thread2 = new Thread(() -> { + RuntimeException ex = assertThrows(RuntimeException.class, testRefreshListener::drainRefreshes); + assertEquals("Failed to acquire all permits", ex.getMessage()); + latch.countDown(); + }); + thread1.start(); + assertBusy(() -> assertEquals(0, testRefreshListener.availablePermits())); + thread2.start(); + thread2.interrupt(); + thread1.join(); + thread2.join(); + assertEquals(0, latch.getCount()); + } + + public void testResumeRefreshesAfterDrainRefreshes() { + // This test checks the expected behaviour when the refresh listener is drained, but then refreshes are resumed again + // by closing the releasables acquired by calling the drainRefreshes method. + ReleasableRetryableRefreshListener testRefreshListener = new ReleasableRetryableRefreshListener(mock(ThreadPool.class)) { + @Override + protected boolean performAfterRefreshWithPermit(boolean didRefresh) { + return true; + } + + @Override + public void beforeRefresh() {} + + @Override + protected Logger getLogger() { + return logger; + } + }; + assertRefreshListenerOpen(testRefreshListener); + Releasable releasable = testRefreshListener.drainRefreshes(); + assertRefreshListenerClosed(testRefreshListener); + releasable.close(); + assertRefreshListenerOpen(testRefreshListener); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + terminate(threadPool); + } + + private void assertRefreshListenerClosed(ReleasableRetryableRefreshListener testRefreshListener) { + assertTrue(testRefreshListener.isClosed()); + assertEquals(0, testRefreshListener.availablePermits()); + } + + private void assertRefreshListenerOpen(ReleasableRetryableRefreshListener testRefreshListener) { + assertFalse(testRefreshListener.isClosed()); + assertEquals(1, testRefreshListener.availablePermits()); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java new file mode 100644 index 0000000000000..21bf580712761 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.test.CorruptionUtils; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.stream.Stream; + +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class RemoteIndexShardCorruptionTests extends IndexShardTestCase { + + public void testLocalDirectoryContains() throws IOException { + IndexShard indexShard = newStartedShard(true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } + flushShard(indexShard); + indexShard.store().incRef(); + Directory localDirectory = indexShard.store().directory(); + Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + Path tempDir = createTempDir(); + for (String file : localDirectory.listAll()) { + if (file.equals("write.lock") || file.startsWith("extra")) { + continue; + } + boolean corrupted = randomBoolean(); + long checksum = 0; + try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { + checksum = CodecUtil.retrieveChecksum(indexInput); + } + if (corrupted) { + Files.copy(shardPath.resolve(file), tempDir.resolve(file)); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + } + if (corrupted == false) { + assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); + } else { + assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); + assertFalse(Files.exists(shardPath.resolve(file))); + } + } + try (Stream<Path> files = Files.list(tempDir)) { + files.forEach(p -> { + try { + Files.copy(p, shardPath.resolve(p.getFileName())); + } catch (IOException e) { + // Ignore + } + }); + } + FileSystemUtils.deleteSubDirectories(tempDir); + indexShard.store().decRef(); + closeShards(indexShard); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java new file mode 100644 index 0000000000000..57a561bc8f2a3 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -0,0 +1,601 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.util.Version; +import org.opensearch.action.StepListener; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.RemoteStoreReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.CorruptionUtils; +import org.opensearch.test.junit.annotations.TestLogging; +import org.hamcrest.MatcherAssert; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; +import static org.opensearch.index.shard.RemoteStoreRefreshListener.EXCLUDE_FILES; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { + + private static final String REPOSITORY_NAME = "temp-fs"; + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) + .build(); + + protected Settings getIndexSettings() { + return settings; + } + + protected ReplicationGroup getReplicationGroup(int numberOfReplicas) throws IOException { + return createGroup(numberOfReplicas, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshRefresh() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(false, false); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshCommit() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(false, true); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitRefresh() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(true, false); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitCommit() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(true, true); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushFirst, boolean performFlushSecond) throws Exception { + try ( + ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), createTempDir()) + ) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh but do not copy the segments over. + if (performFlushFirst) { + flushShard(oldPrimary, true); + } else { + oldPrimary.refresh("Test"); + } + // replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, 0); + } + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int additonalDocs = shards.indexDocs(randomInt(10)); + final int totalDocs = numDocs + additonalDocs; + + if (performFlushSecond) { + flushShard(oldPrimary, true); + } else { + oldPrimary.refresh("Test"); + } + assertDocCounts(oldPrimary, totalDocs, totalDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, 0); + } + assertTrue(nextPrimary.translogStats().estimatedNumberOfOperations() >= additonalDocs); + assertTrue(nextPrimary.translogStats().getUncommittedOperations() >= additonalDocs); + + int prevOperationCount = nextPrimary.translogStats().estimatedNumberOfOperations(); + + // promote the replica + shards.promoteReplicaToPrimary(nextPrimary).get(); + + // close oldPrimary. + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testNoDuplicateSeqNo() throws Exception { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startPrimary(); + shards.startAll(); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + flushShard(primaryShard); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.indexDocs(10); + primaryShard.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + + CountDownLatch latch = new CountDownLatch(1); + shards.promoteReplicaToPrimary(replicaShard, (shard, listener) -> { + try { + assertAtMostOneLuceneDocumentPerSequenceNumber(replicaShard.getEngine()); + } catch (IOException e) { + throw new RuntimeException(e); + } + latch.countDown(); + }); + latch.await(); + for (IndexShard shard : shards) { + if (shard != null) { + closeShard(shard, false); + } + } + } + + public void testReplicaCommitsInfosBytesOnRecovery() throws Exception { + final Path remotePath = createTempDir(); + try (ReplicationGroup shards = createGroup(0, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { + shards.startAll(); + // ensure primary has uploaded something + shards.indexDocs(10); + shards.refresh("test"); + + final IndexShard primary = shards.getPrimary(); + final Engine primaryEngine = getEngine(primary); + assertNotNull(primaryEngine); + final SegmentInfos latestCommit = SegmentInfos.readLatestCommit(primary.store().directory()); + assertEquals("On-disk commit references no segments", Set.of("segments_3"), latestCommit.files(true)); + assertEquals( + "Latest remote commit On-disk commit references no segments", + Set.of("segments_3"), + primary.remoteStore().readLastCommittedSegmentsInfo().files(true) + ); + + try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = primaryEngine.getSegmentInfosSnapshot()) { + MatcherAssert.assertThat( + "Segments are referenced in memory only", + segmentInfosSnapshot.get().files(false), + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") + ); + } + + final IndexShard replica = shards.addReplica(remotePath); + replica.store().createEmpty(Version.LATEST); + assertEquals( + "Replica starts at empty segment 2", + Set.of("segments_1"), + replica.store().readLastCommittedSegmentsInfo().files(true) + ); + // commit replica infos so it has a conflicting commit with remote. + final SegmentInfos segmentCommitInfos = replica.store().readLastCommittedSegmentsInfo(); + segmentCommitInfos.commit(replica.store().directory()); + segmentCommitInfos.commit(replica.store().directory()); + assertEquals( + "Replica starts recovery at empty segment 3", + Set.of("segments_3"), + replica.store().readLastCommittedSegmentsInfo().files(true) + ); + + shards.recoverReplica(replica); + + final Engine replicaEngine = getEngine(replica); + assertNotNull(replicaEngine); + final SegmentInfos latestReplicaCommit = SegmentInfos.readLatestCommit(replica.store().directory()); + logger.info(List.of(replica.store().directory().listAll())); + MatcherAssert.assertThat( + "Replica commits infos bytes referencing latest refresh point", + latestReplicaCommit.files(true), + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs", "segments_6") + ); + + try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = replicaEngine.getSegmentInfosSnapshot()) { + MatcherAssert.assertThat( + "Segments are referenced in memory", + segmentInfosSnapshot.get().files(false), + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") + ); + } + + final Store.RecoveryDiff recoveryDiff = Store.segmentReplicationDiff( + primary.getSegmentMetadataMap(), + replica.getSegmentMetadataMap() + ); + assertTrue(recoveryDiff.missing.isEmpty()); + assertTrue(recoveryDiff.different.isEmpty()); + } + } + + public void testPrimaryRestart_PrimaryHasExtraCommits() throws Exception { + final Path remotePath = createTempDir(); + try (ReplicationGroup shards = createGroup(0, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { + shards.startAll(); + // ensure primary has uploaded something + shards.indexDocs(10); + IndexShard primary = shards.getPrimary(); + if (randomBoolean()) { + flushShard(primary); + } else { + primary.refresh("test"); + } + assertDocCount(primary, 10); + // get a metadata map - we'll use segrep diff to ensure segments on reader are identical after restart. + final Map<String, StoreFileMetadata> metadataBeforeRestart = primary.getSegmentMetadataMap(); + // restart the primary + shards.reinitPrimaryShard(remotePath); + // the store is open at this point but the shard has not yet run through recovery + primary = shards.getPrimary(); + SegmentInfos latestPrimaryCommit = SegmentInfos.readLatestCommit(primary.store().directory()); + latestPrimaryCommit.commit(primary.store().directory()); + latestPrimaryCommit = SegmentInfos.readLatestCommit(primary.store().directory()); + latestPrimaryCommit.commit(primary.store().directory()); + shards.startPrimary(); + assertDocCount(primary, 10); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(metadataBeforeRestart, primary.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + } + } + + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") + public void testRepicaCleansUpOldCommitsWhenReceivingNew() throws Exception { + final Path remotePath = createTempDir(); + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final Store store = replica.store(); + final SegmentInfos initialCommit = store.readLastCommittedSegmentsInfo(); + shards.indexDocs(1); + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + assertDocCount(primary, 1); + assertDocCount(replica, 1); + assertSingleSegmentFile(replica); + final SegmentInfos secondCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(secondCommit.getGeneration() > initialCommit.getGeneration()); + + shards.indexDocs(1); + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertDocCount(replica, 2); + assertSingleSegmentFile(replica); + assertEquals(store.readLastCommittedSegmentsInfo().getGeneration(), secondCommit.getGeneration()); + + shards.indexDocs(1); + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + assertDocCount(replica, 3); + assertSingleSegmentFile(replica); + final SegmentInfos thirdCommit = store.readLastCommittedSegmentsInfo(); + assertTrue(thirdCommit.getGeneration() > secondCommit.getGeneration()); + + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + } + } + + public void testPrimaryRestart() throws Exception { + final Path remotePath = createTempDir(); + try (ReplicationGroup shards = createGroup(0, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), remotePath)) { + shards.startAll(); + // ensure primary has uploaded something + shards.indexDocs(10); + IndexShard primary = shards.getPrimary(); + if (randomBoolean()) { + flushShard(primary); + } else { + primary.refresh("test"); + } + assertDocCount(primary, 10); + // get a metadata map - we'll use segrep diff to ensure segments on reader are identical after restart. + final Map<String, StoreFileMetadata> metadataBeforeRestart = primary.getSegmentMetadataMap(); + // restart the primary + shards.reinitPrimaryShard(remotePath); + // the store is open at this point but the shard has not yet run through recovery + primary = shards.getPrimary(); + shards.startPrimary(); + assertDocCount(primary, 10); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(metadataBeforeRestart, primary.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + logger.info("DIFF FILE {}", diff.different); + assertTrue(diff.different.isEmpty()); + } + } + + /** + * This test validates that unreferenced on disk file are ignored while requesting files from replication source to + * prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without + * committing locally so that in next round of segment replication those files are not considered for download again + */ + public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + shards.indexDocs(10); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch latch = new CountDownLatch(1); + + logger.info("--> Starting first round of replication"); + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + final SegmentReplicationTarget target = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + latch.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + latch.countDown(); + assertFalse(sendShardFailure); + logger.error("Replication error", e); + } + } + ); + latch.await(); + Set<String> onDiskFiles = new HashSet<>(Arrays.asList(replica.store().directory().listAll())); + onDiskFiles.removeIf(name -> EXCLUDE_FILES.contains(name) || name.startsWith(IndexFileNames.SEGMENTS)); + List<String> activeFiles = replica.getSegmentMetadataMap() + .values() + .stream() + .map(metadata -> metadata.name()) + .collect(Collectors.toList()); + assertTrue("Files should not be committed", activeFiles.isEmpty()); + assertEquals("Files should be copied to disk", false, onDiskFiles.isEmpty()); + assertEquals(target.state().getStage(), SegmentReplicationState.Stage.GET_FILES); + + // Start next round of segment replication and not throwing exception resulting in commit on replica + when(sourceFactory.get(any())).thenReturn(getRemoteStoreReplicationSource(replica, () -> {})); + CountDownLatch waitForSecondRound = new CountDownLatch(1); + logger.info("--> Starting second round of replication"); + final SegmentReplicationTarget newTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitForSecondRound.countDown(); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitForSecondRound.countDown(); + logger.error("Replication error", e); + Assert.fail("Replication should not fail"); + } + } + ); + waitForSecondRound.await(); + assertEquals(newTarget.state().getStage(), SegmentReplicationState.Stage.DONE); + activeFiles = replica.getSegmentMetadataMap().values().stream().map(metadata -> metadata.name()).collect(Collectors.toList()); + assertTrue("Replica should have consistent disk & reader", activeFiles.containsAll(onDiskFiles)); + shards.removeReplica(replica); + closeShards(replica); + } + } + + /** + * This test validates that local non-readable (corrupt, partially) on disk are deleted vs failing the + * replication event. This test mimics local files (not referenced by reader) by throwing exception post file copy and + * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next + * round of segment replication by ensuring doc count. + */ + public void testNoFailuresOnFileReads() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int docCount = 10; + shards.indexDocs(docCount); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch waitOnReplicationCompletion = new CountDownLatch(1); + + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + SegmentReplicationTarget segmentReplicationTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitOnReplicationCompletion.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitOnReplicationCompletion.countDown(); + assertFalse(sendShardFailure); + } + } + ); + waitOnReplicationCompletion.await(); + assertBusy(() -> { assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); }); + String fileToCorrupt = null; + // Corrupt one data file + Path shardPath = replica.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + for (String file : replica.store().directory().listAll()) { + if (file.equals("write.lock") || file.startsWith("extra") || file.startsWith("segment")) { + continue; + } + fileToCorrupt = file; + logger.info("--> Corrupting file {}", fileToCorrupt); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + break; + } + Assert.assertNotNull(fileToCorrupt); + + // Ingest more data and start next round of segment replication + shards.indexDocs(docCount); + primary.refresh("Post corruption"); + replicateSegments(primary, List.of(replica)); + + assertDocCount(primary, 2 * docCount); + assertDocCount(replica, 2 * docCount); + + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + + // clean up + shards.removeReplica(replica); + closeShards(replica); + } + } + + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { + return new RemoteStoreReplicationSource(shard) { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + super.getCheckpointMetadata(replicationId, checkpoint, listener); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + StepListener<GetSegmentFilesResponse> waitForCopyFilesListener = new StepListener(); + super.getSegmentFiles( + replicationId, + checkpoint, + filesToFetch, + indexShard, + (fileName, bytesRecovered) -> {}, + waitForCopyFilesListener + ); + waitForCopyFilesListener.whenComplete(response -> { + postGetFilesRunnable.run(); + listener.onResponse(response); + }, listener::onFailure); + } + + @Override + public String getDescription() { + return "TestRemoteStoreReplicationSource"; + } + }; + } + + @Override + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertFalse(primary.isSearchIdleSupported()); + assertTrue(primary.isSearchIdle()); + assertTrue(primary.scheduledRefresh()); + assertFalse(primary.hasRefreshPending()); + } + + private void assertSingleSegmentFile(IndexShard shard) throws IOException { + final Set<String> segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) + .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) + .collect(Collectors.toSet()); + assertEquals("Expected a single segment file", 1, segmentsFileNames.size()); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 7c119bfbbc573..85878cc2e1c9d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -10,11 +10,10 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.tests.store.BaseDirectoryWrapper; -import org.junit.After; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -24,14 +23,22 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.InternalEngineFactory; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.store.RemoteDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; import java.io.IOException; import java.util.Collections; @@ -40,18 +47,23 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; +import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; +import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; public class RemoteStoreRefreshListenerTests extends IndexShardTestCase { private IndexShard indexShard; private ClusterService clusterService; private RemoteStoreRefreshListener remoteStoreRefreshListener; - private RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; public void setup(boolean primary, int numberOfDocs) throws IOException { indexShard = newStartedShard( @@ -75,13 +87,10 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); - remoteRefreshSegmentPressureService.afterIndexShardCreated(indexShard); - remoteStoreRefreshListener = new RemoteStoreRefreshListener( - indexShard, - SegmentReplicationCheckpointPublisher.EMPTY, - remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()) - ); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard, SegmentReplicationCheckpointPublisher.EMPTY, tracker); } private void indexDocs(int startDocId, int numberOfDocs) throws IOException { @@ -94,10 +103,89 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { public void tearDown() throws Exception { Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) indexShard.store().directory()).getDelegate()).getDelegate(); ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + + for (ReferenceManager.RefreshListener refreshListener : indexShard.getEngine().config().getInternalRefreshListener()) { + if (refreshListener instanceof ReleasableRetryableRefreshListener) { + ((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes(); + } + } + if (remoteStoreRefreshListener != null) { + remoteStoreRefreshListener.drainRefreshes(); + } + closeShards(indexShard); super.tearDown(); } + public void testRemoteDirectoryInitThrowsException() throws IOException { + // Methods used in the constructor of RemoteSegmentTrackerListener have been mocked to reproduce specific exceptions + // to test the failure modes possible during construction of RemoteSegmentTrackerListener object. + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); + + // Mocking the IndexShard methods and dependent classes. + ShardId shardId = new ShardId("index1", "_na_", 1); + IndexShard shard = mock(IndexShard.class); + Store store = mock(Store.class); + Directory directory = mock(Directory.class); + ShardRouting shardRouting = mock(ShardRouting.class); + when(shard.store()).thenReturn(store); + when(store.directory()).thenReturn(directory); + when(shard.shardId()).thenReturn(shardId); + when(shard.routingEntry()).thenReturn(shardRouting); + when(shardRouting.primary()).thenReturn(true); + when(shard.getThreadPool()).thenReturn(mock(ThreadPool.class)); + + // Mock the Store, Directory and RemoteSegmentStoreDirectory classes + Store remoteStore = mock(Store.class); + when(shard.remoteStore()).thenReturn(remoteStore); + RemoteDirectory remoteMetadataDirectory = mock(RemoteDirectory.class); + AtomicLong listFilesCounter = new AtomicLong(); + + // Below we are trying to get the IOException thrown in the constructor of the RemoteSegmentStoreDirectory. + doAnswer(invocation -> { + if (listFilesCounter.incrementAndGet() <= 1) { + return Collections.singletonList("dummy string"); + } + throw new IOException(); + }).when(remoteMetadataDirectory) + .listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH); + + SegmentInfos segmentInfos; + try (Store indexShardStore = indexShard.store()) { + segmentInfos = indexShardStore.readLastCommittedSegmentsInfo(); + } + + when(remoteMetadataDirectory.getBlobStream(any())).thenAnswer( + I -> createMetadataFileBytes(getDummyMetadata("_0", 1), indexShard.getLatestReplicationCheckpoint(), segmentInfos) + ); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + mock(RemoteDirectory.class), + remoteMetadataDirectory, + mock(RemoteStoreLockManager.class), + mock(ThreadPool.class), + shardId + ); + FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( + new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) + ); + when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); + + // Since the thrown IOException is caught in the constructor, ctor should be invoked successfully. + new RemoteStoreRefreshListener(shard, SegmentReplicationCheckpointPublisher.EMPTY, mock(RemoteSegmentTransferTracker.class)); + + // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the + // listFilesByPrefixInLexicographicOrder has been called twice. + verify(remoteMetadataDirectory, times(1)).getBlobStream(any()); + verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + METADATA_FILES_TO_FETCH + ); + } + public void testAfterRefresh() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); @@ -161,7 +249,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP + 3; i++) { + for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -191,6 +279,7 @@ public void testReplica() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9773") public void testReplicaPromotion() throws IOException, InterruptedException { setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); @@ -242,41 +331,42 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { // This is the case of isRetry=false, shouldRetry=false // Succeed on 1st attempt int succeedOnAttempt = 1; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 0); + assertTrue("remote store in sync", tuple.v1().isRemoteSegmentStoreInSync()); } public void testRefreshSuccessOnSecondAttempt() throws Exception { // This covers 2 cases - 1) isRetry=false, shouldRetry=true 2) isRetry=true, shouldRetry=false // Succeed on 2nd attempt int succeedOnAttempt = 2; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } @@ -287,7 +377,7 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { public void testRefreshSuccessAfterFailureInFirstAttemptAfterSnapshotAndMetadataUpload() throws Exception { int succeedOnAttempt = 1; int checkpointPublishSucceedOnAttempt = 2; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 6 as during a successful upload IndexShard.getEngine() is hit thrice and here we are running the flow twice @@ -309,24 +399,38 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { // This covers 3 cases - 1) isRetry=false, shouldRetry=true 2) isRetry=true, shouldRetry=false 3) isRetry=True, shouldRetry=true // Succeed on 3rd attempt int succeedOnAttempt = 3; - // We spy on IndexShard.getReplicationTracker() to validate that we have tried running remote time as per the expectation. + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. CountDownLatch refreshCountLatch = new CountDownLatch(succeedOnAttempt); // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 2); } - private void assertNoLagAndTotalUploadsFailed(RemoteRefreshSegmentTracker segmentTracker, long totalUploadsFailed) throws Exception { + public void testRefreshPersistentFailure() throws Exception { + int succeedOnAttempt = 10; + CountDownLatch refreshCountLatch = new CountDownLatch(1); + CountDownLatch successLatch = new CountDownLatch(10); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch + ); + // Giving 10ms for some iterations of remote refresh upload + Thread.sleep(10); + assertFalse("remote store should not in sync", tuple.v1().isRemoteSegmentStoreInSync()); + } + + private void assertNoLagAndTotalUploadsFailed(RemoteSegmentTransferTracker segmentTracker, long totalUploadsFailed) throws Exception { assertBusy(() -> { assertEquals(0, segmentTracker.getBytesLag()); assertEquals(0, segmentTracker.getRefreshSeqNoLag()); @@ -336,18 +440,45 @@ private void assertNoLagAndTotalUploadsFailed(RemoteRefreshSegmentTracker segmen } public void testTrackerData() throws Exception { - Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> tuple = mockIndexShardWithRetryAndScheduleRefresh(1); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh(1); RemoteStoreRefreshListener listener = tuple.v1(); - RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker tracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); - assertNoLag(tracker); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker tracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); + assertBusy(() -> assertNoLag(tracker)); indexDocs(100, randomIntBetween(100, 200)); indexShard.refresh("test"); listener.afterRefresh(true); assertBusy(() -> assertNoLag(tracker)); } - private void assertNoLag(RemoteRefreshSegmentTracker tracker) { + /** + * Tests segments upload fails with replication checkpoint and replication tracker primary term mismatch + */ + public void testRefreshFailedDueToPrimaryTermMisMatch() throws Exception { + int totalAttempt = 1; + int checkpointPublishSucceedOnAttempt = 0; + // We spy on IndexShard.isPrimaryStarted() to validate that we have tried running remote time as per the expectation. + CountDownLatch refreshCountLatch = new CountDownLatch(totalAttempt); + + // success latch should change as we would be failed primary term latest validation. + CountDownLatch successLatch = new CountDownLatch(1); + CountDownLatch reachedCheckpointPublishLatch = new CountDownLatch(0); + Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> tuple = mockIndexShardWithRetryAndScheduleRefresh( + totalAttempt, + refreshCountLatch, + successLatch, + checkpointPublishSucceedOnAttempt, + reachedCheckpointPublishLatch, + false + ); + + assertBusy(() -> assertEquals(1, tuple.v2().getRemoteSegmentTransferTracker(indexShard.shardId()).getTotalUploadsFailed())); + assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); + assertBusy(() -> assertEquals(1, successLatch.getCount())); + assertBusy(() -> assertEquals(0, reachedCheckpointPublishLatch.getCount())); + } + + private void assertNoLag(RemoteSegmentTransferTracker tracker) { assertEquals(0, tracker.getRefreshSeqNoLag()); assertEquals(0, tracker.getBytesLag()); assertEquals(0, tracker.getTimeMsLag()); @@ -361,13 +492,13 @@ private void assertNoLag(RemoteRefreshSegmentTracker tracker) { assertEquals(0, tracker.getTotalUploadsFailed()); } - private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> mockIndexShardWithRetryAndScheduleRefresh( + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( int succeedOnAttempt ) throws IOException { return mockIndexShardWithRetryAndScheduleRefresh(succeedOnAttempt, null, null); } - private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> mockIndexShardWithRetryAndScheduleRefresh( + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( int succeedOnAttempt, CountDownLatch refreshCountLatch, CountDownLatch successLatch @@ -376,12 +507,30 @@ private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> m return mockIndexShardWithRetryAndScheduleRefresh(succeedOnAttempt, refreshCountLatch, successLatch, 1, noOpLatch); } - private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> mockIndexShardWithRetryAndScheduleRefresh( + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( int succeedOnAttempt, CountDownLatch refreshCountLatch, CountDownLatch successLatch, int succeedCheckpointPublishOnAttempt, CountDownLatch reachedCheckpointPublishLatch + ) throws IOException { + return mockIndexShardWithRetryAndScheduleRefresh( + succeedOnAttempt, + refreshCountLatch, + successLatch, + succeedCheckpointPublishOnAttempt, + reachedCheckpointPublishLatch, + true + ); + } + + private Tuple<RemoteStoreRefreshListener, RemoteStoreStatsTrackerFactory> mockIndexShardWithRetryAndScheduleRefresh( + int succeedOnAttempt, + CountDownLatch refreshCountLatch, + CountDownLatch successLatch, + int succeedCheckpointPublishOnAttempt, + CountDownLatch reachedCheckpointPublishLatch, + boolean mockPrimaryTerm ) throws IOException { // Create index shard that we will be using to mock different methods in IndexShard for the unit test indexShard = newStartedShard( @@ -395,12 +544,21 @@ private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> m new InternalEngineFactory() ); + RemoteSegmentTransferTracker tracker = indexShard.getRemoteStoreStatsTrackerFactory() + .getRemoteSegmentTransferTracker(indexShard.shardId()); + try { + assertBusy(() -> assertTrue(tracker.getTotalUploadsSucceeded() > 0)); + } catch (Exception e) { + assert false; + } + indexDocs(1, randomIntBetween(1, 100)); // Mock indexShard.store().directory() IndexShard shard = mock(IndexShard.class); Store store = mock(Store.class); when(shard.store()).thenReturn(store); + when(shard.state()).thenReturn(IndexShardState.STARTED); when(store.directory()).thenReturn(indexShard.store().directory()); // Mock (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) @@ -413,7 +571,10 @@ private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> m when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Mock indexShard.getOperationPrimaryTerm() - when(shard.getOperationPrimaryTerm()).thenReturn(indexShard.getOperationPrimaryTerm()); + if (mockPrimaryTerm) { + when(shard.getOperationPrimaryTerm()).thenReturn(indexShard.getOperationPrimaryTerm()); + } + when(shard.getLatestReplicationCheckpoint()).thenReturn(indexShard.getLatestReplicationCheckpoint()); // Mock indexShard.routingEntry().primary() when(shard.routingEntry()).thenReturn(indexShard.routingEntry()); @@ -422,26 +583,34 @@ private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> m when(shard.getThreadPool()).thenReturn(threadPool); // Mock indexShard.getReplicationTracker().isPrimaryMode() - doAnswer(invocation -> { if (Objects.nonNull(refreshCountLatch)) { refreshCountLatch.countDown(); } - return indexShard.getReplicationTracker(); - }).when(shard).getReplicationTracker(); + return true; + }).when(shard).isStartedPrimary(); AtomicLong counter = new AtomicLong(); // Mock indexShard.getSegmentInfosSnapshot() doAnswer(invocation -> { - if (counter.incrementAndGet() <= succeedOnAttempt - 1) { + if (counter.incrementAndGet() <= succeedOnAttempt) { + logger.error("Failing in get segment info {}", counter.get()); throw new RuntimeException("Inducing failure in upload"); } return indexShard.getSegmentInfosSnapshot(); }).when(shard).getSegmentInfosSnapshot(); + doAnswer((invocation -> { + if (counter.incrementAndGet() <= succeedOnAttempt) { + throw new RuntimeException("Inducing failure in upload"); + } + return indexShard.getLatestReplicationCheckpoint(); + })).when(shard).computeReplicationCheckpoint(any()); + doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { successLatch.countDown(); + logger.info("Value fo latch {}", successLatch.getCount()); } return indexShard.getEngine(); }).when(shard).getEngine(); @@ -463,20 +632,15 @@ private Tuple<RemoteStoreRefreshListener, RemoteRefreshSegmentPressureService> m new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService( - clusterService, - Settings.EMPTY - ); + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - remoteRefreshSegmentPressureService.afterIndexShardCreated(shard); - RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener( - shard, - emptyCheckpointPublisher, - remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()) - ); + RecoverySettings recoverySettings = mock(RecoverySettings.class); + when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRecoverySettings()).thenReturn(recoverySettings); + RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); - return Tuple.tuple(refreshListener, remoteRefreshSegmentPressureService); + return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); } public static class TestFilterDirectory extends FilterDirectory { @@ -506,5 +670,31 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + assertTrue(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } + + public void testRemoteSegmentStoreNotInSync() throws IOException { + setup(true, 3); + remoteStoreRefreshListener.afterRefresh(true); + try (Store remoteStore = indexShard.remoteStore()) { + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = + (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) remoteStore.directory()).getDelegate()).getDelegate(); + verifyUploadedSegments(remoteSegmentStoreDirectory); + remoteStoreRefreshListener.isRemoteSegmentStoreInSync(); + boolean oneFileDeleted = false; + // Delete any one file from remote store + try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + for (String file : segmentInfos.files(true)) { + if (oneFileDeleted == false && RemoteStoreRefreshListener.EXCLUDE_FILES.contains(file) == false) { + remoteSegmentStoreDirectory.deleteFile(file); + oneFileDeleted = true; + break; + } + } + } + assertFalse(remoteStoreRefreshListener.isRemoteSegmentStoreInSync()); + } } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 040c47fae2858..c88c86d51be08 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,8 +58,9 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; +import org.opensearch.index.engine.EngineCreationFailureException; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.store.Store; @@ -133,7 +134,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); @@ -292,7 +293,10 @@ public void testCorruptedTranslog() throws Exception { allowShardFailures(); // it has to fail on start up due to index.shard.check_on_startup = checksum final Exception exception = expectThrows(Exception.class, () -> newStartedShard(p -> corruptedShard, true)); - final Throwable cause = exception.getCause() instanceof TranslogException ? exception.getCause().getCause() : exception.getCause(); + // if corruption is in engine UUID in header, the TranslogCorruptedException is caught and rethrown as + // EngineCreationFailureException rather than TranslogException + final Throwable cause = exception.getCause() instanceof TranslogException + || exception.getCause() instanceof EngineCreationFailureException ? exception.getCause().getCause() : exception.getCause(); assertThat(cause, instanceOf(TranslogCorruptedException.class)); closeShard(corruptedShard, false); // translog is corrupted already - do not check consistency diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 690c7955ff338..4f5cad70fd643 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -8,7 +8,6 @@ package org.opensearch.index.shard; -import org.junit.Assert; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -18,13 +17,16 @@ import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.WriteOnlyTranslogManager; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; +import org.junit.Assert; import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @@ -40,14 +42,15 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .build(); public void testStartSequenceForReplicaRecovery() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { - + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { shards.startPrimary(); final IndexShard primary = shards.getPrimary(); int numDocs = shards.indexDocs(randomIntBetween(10, 100)); shards.flush(); - final IndexShard replica = shards.addReplica(); + final IndexShard replica = shards.addReplica(remoteDir); shards.startAll(); allowShardFailures(); @@ -62,7 +65,6 @@ public void testStartSequenceForReplicaRecovery() throws Exception { int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); shards.flush(); - IndexShard newReplicaShard = newShard( newShardRouting( replicaRouting.shardId(), @@ -80,7 +82,7 @@ public void testStartSequenceForReplicaRecovery() throws Exception { replica.getGlobalCheckpointSyncer(), replica.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, - null + remoteDir ); shards.addReplica(newReplicaShard); AtomicBoolean assertDone = new AtomicBoolean(false); @@ -103,7 +105,6 @@ public IndexShard indexShard() { return idxShard; } }); - shards.flush(); replicateSegments(primary, shards.getReplicas()); shards.assertAllEqual(numDocs + moreDocs); @@ -111,7 +112,9 @@ public IndexShard indexShard() { } public void testNoTranslogHistoryTransferred() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { // Step1 - Start primary, index docs, flush, index more docs, check translog in primary as expected shards.startPrimary(); @@ -123,7 +126,7 @@ public void testNoTranslogHistoryTransferred() throws Exception { assertEquals(numDocs + moreDocs, getTranslog(primary).totalOperations()); // Step 2 - Start replica, recovery happens, check docs recovered till last flush - final IndexShard replica = shards.addReplica(); + final IndexShard replica = shards.addReplica(remoteDir); shards.startAll(); assertEquals(docIdAndSeqNosAfterFlush, getDocIdAndSeqNos(replica)); assertDocCount(replica, numDocs); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index cc4fa6f28bafc..7caff3e5f5479 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -11,35 +11,41 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; -import org.junit.Assert; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; -import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.replication.TestReplicationSource; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.SnapshotMatchers; @@ -54,37 +60,45 @@ import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotInfoTests; +import org.opensearch.snapshots.SnapshotShardsService; +import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.junit.Assert; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import static java.util.Arrays.asList; +import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; @@ -100,6 +114,40 @@ public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelRepli .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); + protected ReplicationGroup getReplicationGroup(int numberOfReplicas) throws IOException { + return createGroup(numberOfReplicas, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory()); + } + + protected ReplicationGroup getReplicationGroup(int numberOfReplicas, String indexMapping) throws IOException { + return createGroup(numberOfReplicas, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory()); + } + + protected Settings getIndexSettings() { + return settings; + } + + /** + * Validates happy path of segment replication where primary index docs which are replicated to replica shards. Assertions + * made on doc count on both primary and replica. + */ + public void testReplication() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory());) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + + // index and replicate segments to replica. + int numDocs = randomIntBetween(10, 20); + shards.indexDocs(numDocs); + primaryShard.refresh("test"); + flushShard(primaryShard); + replicateSegments(primaryShard, List.of(replicaShard)); + + // Assertions + shards.assertAllEqual(numDocs); + } + } + /** * Test that latestReplicationCheckpoint returns null only for docrep enabled indices */ @@ -114,7 +162,7 @@ public void testReplicationCheckpointNullForDocRep() throws IOException { * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices */ public void testReplicationCheckpointNotNullForSegRep() throws IOException { - final IndexShard indexShard = newStartedShard(randomBoolean(), settings, new NRTReplicationEngineFactory()); + final IndexShard indexShard = newStartedShard(randomBoolean(), getIndexSettings(), new NRTReplicationEngineFactory()); final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); assertNotNull(replicationCheckpoint); closeShards(indexShard); @@ -127,7 +175,7 @@ public void testNRTReplicasDoNotAcceptRefreshListeners() throws IOException { } public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory())) { shards.startAll(); final IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); @@ -149,7 +197,7 @@ public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { assertEquals(1, primary.getLatestReplicationCheckpoint().compareTo(replica.getLatestReplicationCheckpoint())); // index and copy segments to replica. - int numDocs = randomIntBetween(10, 100); + int numDocs = randomIntBetween(10, 20); shards.indexDocs(numDocs); primary.refresh("test"); replicateSegments(primary, List.of(replica)); @@ -172,6 +220,51 @@ public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { } } + public void testPrimaryRelocationWithSegRepFailure() throws Exception { + final IndexShard primarySource = newStartedShard(true, getIndexSettings()); + int totalOps = randomInt(10); + for (int i = 0; i < totalOps; i++) { + indexDoc(primarySource, "_doc", Integer.toString(i)); + } + IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); + final IndexShard primaryTarget = newShard( + primarySource.routingEntry().getTargetRelocatingShard(), + getIndexSettings(), + new NRTReplicationEngineFactory() + ); + updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); + + Function<List<IndexShard>, List<SegmentReplicationTarget>> replicatePrimaryFunction = (shardList) -> { + try { + throw new IOException("Expected failure"); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + Exception e = expectThrows( + Exception.class, + () -> recoverReplica( + primaryTarget, + primarySource, + (primary, sourceNode) -> new RecoveryTarget(primary, sourceNode, new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + throw new AssertionError("recovery must fail"); + } + + @Override + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { + assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); + } + }), + true, + true, + replicatePrimaryFunction + ) + ); + closeShards(primarySource, primaryTarget); + } + private void assertReplicationCheckpoint(IndexShard shard, SegmentInfos segmentInfos, ReplicationCheckpoint checkpoint) throws IOException { assertNotNull(segmentInfos); @@ -180,7 +273,7 @@ private void assertReplicationCheckpoint(IndexShard shard, SegmentInfos segmentI } public void testIsSegmentReplicationAllowed_WrongEngineType() throws IOException { - final IndexShard indexShard = newShard(false, settings, new InternalEngineFactory()); + final IndexShard indexShard = newShard(false, getIndexSettings(), new InternalEngineFactory()); assertFalse(indexShard.isSegmentReplicationAllowed()); closeShards(indexShard); } @@ -193,16 +286,16 @@ public void testIsSegmentReplicationAllowed_WrongEngineType() throws IOException */ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Exception { String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primaryShard = shards.getPrimary(); final IndexShard replicaShard = shards.getReplicas().get(0); // Step 1. Ingest numDocs documents & replicate to replica shard - final int numDocs = randomIntBetween(100, 200); + final int numDocs = randomIntBetween(10, 20); logger.info("--> Inserting documents {}", numDocs); for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); } assertEqualTranslogOperations(shards, primaryShard); primaryShard.refresh("Test"); @@ -216,7 +309,7 @@ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Except // Step 2. Ingest numDocs documents again & replicate to replica shard logger.info("--> Ingest {} docs again", numDocs); for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); } assertEqualTranslogOperations(shards, primaryShard); primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); @@ -242,16 +335,16 @@ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Except */ public void testSegmentReplication_With_EngineClosedConcurrently() throws Exception { String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primaryShard = shards.getPrimary(); final IndexShard replicaShard = shards.getReplicas().get(0); // Step 1. Ingest numDocs documents - final int numDocs = randomIntBetween(100, 200); + final int numDocs = randomIntBetween(10, 20); logger.info("--> Inserting documents {}", numDocs); for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); } assertEqualTranslogOperations(shards, primaryShard); primaryShard.refresh("Test"); @@ -262,7 +355,7 @@ public void testSegmentReplication_With_EngineClosedConcurrently() throws Except // Step 2. Ingest numDocs documents again to create a new commit logger.info("--> Ingest {} docs again", numDocs); for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); } assertEqualTranslogOperations(shards, primaryShard); primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); @@ -284,137 +377,9 @@ public void testSegmentReplication_With_EngineClosedConcurrently() throws Except } } - /** - * Verifies that commits on replica engine resulting from engine or reader close does not cleanup the temporary - * replication files from ongoing round of segment replication - */ - public void testTemporaryFilesNotCleanup() throws Exception { - String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primaryShard = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - // Step 1. Ingest numDocs documents, commit to create commit point on primary & replicate - final int numDocs = randomIntBetween(100, 200); - logger.info("--> Inserting documents {}", numDocs); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); - replicateSegments(primaryShard, shards.getReplicas()); - shards.assertAllEqual(numDocs); - - // Step 2. Ingest numDocs documents again to create a new commit on primary - logger.info("--> Ingest {} docs again", numDocs); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); - - // Step 3. Copy segment files to replica shard but prevent commit - final CountDownLatch countDownLatch = new CountDownLatch(1); - Map<String, StoreFileMetadata> primaryMetadata; - try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); - } - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final IndicesService indicesService = mock(IndicesService.class); - when(indicesService.getShardOrNull(replica.shardId)).thenReturn(replica); - final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( - threadPool, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - mock(TransportService.class), - sourceFactory, - indicesService, - clusterService - ); - final Consumer<IndexShard> runnablePostGetFiles = (indexShard) -> { - try { - Collection<String> temporaryFiles = Stream.of(indexShard.store().directory().listAll()) - .filter(name -> name.startsWith(SegmentReplicationTarget.REPLICATION_PREFIX)) - .collect(Collectors.toList()); - - // Step 4. Perform a commit on replica shard. - NRTReplicationEngine engine = (NRTReplicationEngine) indexShard.getEngine(); - engine.updateSegments(engine.getSegmentInfosSnapshot().get()); - - // Step 5. Validate temporary files are not deleted from store. - Collection<String> replicaStoreFiles = List.of(indexShard.store().directory().listAll()); - assertTrue(replicaStoreFiles.containsAll(temporaryFiles)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - SegmentReplicationSource segmentReplicationSource = getSegmentReplicationSource( - primaryShard, - (repId) -> targetService.get(repId), - runnablePostGetFiles - ); - when(sourceFactory.get(any())).thenReturn(segmentReplicationSource); - targetService.startReplication(replica, getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch)); - countDownLatch.await(30, TimeUnit.SECONDS); - assertEquals("Replication failed", 0, countDownLatch.getCount()); - shards.assertAllEqual(numDocs); - } - } - - public void testSegmentReplication_Index_Update_Delete() throws Exception { - String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(2, settings, mappings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard primaryShard = shards.getPrimary(); - - final int numDocs = randomIntBetween(100, 200); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - - shards.assertAllEqual(numDocs); - - for (int i = 0; i < numDocs; i++) { - // randomly update docs. - if (randomBoolean()) { - shards.index( - new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", XContentType.JSON) - ); - } - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - shards.assertAllEqual(numDocs); - - final List<DocIdSeqNoAndSource> docs = getDocIdAndSeqNos(primaryShard); - for (IndexShard shard : shards.getReplicas()) { - assertEquals(getDocIdAndSeqNos(shard), docs); - } - for (int i = 0; i < numDocs; i++) { - // randomly delete. - if (randomBoolean()) { - shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); - } - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - final List<DocIdSeqNoAndSource> docsAfterDelete = getDocIdAndSeqNos(primaryShard); - for (IndexShard shard : shards.getReplicas()) { - assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); - } - } - } - public void testIgnoreShardIdle() throws Exception { Settings updatedSettings = Settings.builder() - .put(settings) + .put(getIndexSettings()) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) .build(); try (ReplicationGroup shards = createGroup(1, updatedSettings, new NRTReplicationEngineFactory())) { @@ -464,24 +429,91 @@ public void testShardIdle_Docrep() throws Exception { public void testShardIdleWithNoReplicas() throws Exception { Settings updatedSettings = Settings.builder() - .put(settings) + .put(getIndexSettings()) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) .build(); try (ReplicationGroup shards = createGroup(0, updatedSettings, new NRTReplicationEngineFactory())) { shards.startAll(); final IndexShard primary = shards.getPrimary(); shards.indexDocs(randomIntBetween(1, 10)); - // ensure search idle conditions are met. - assertTrue(primary.isSearchIdle()); - assertFalse(primary.scheduledRefresh()); - assertTrue(primary.hasRefreshPending()); + validateShardIdleWithNoReplicas(primary); } } + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertTrue(primary.isSearchIdle()); + assertFalse(primary.scheduledRefresh()); + assertTrue(primary.hasRefreshPending()); + } + /** * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. */ - public void testPublishCheckpointOnPrimaryMode() throws IOException { + public void testPublishCheckpointOnPrimaryMode() throws IOException, InterruptedException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(p -> newShard(false, mock, settings), false); + + final ShardRouting shardRouting = shard.routingEntry(); + promoteReplica( + shard, + Collections.singleton(shardRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(shardRouting.shardId()).addShard(shardRouting).build() + ); + + final CountDownLatch latch = new CountDownLatch(1); + shard.acquirePrimaryOperationPermit(new ActionListener<Releasable>() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + // verify checkpoint is published + verify(mock, times(1)).publish(any(), any()); + closeShards(shard); + } + + public void testPublishCheckpointOnPrimaryMode_segrep_off() throws IOException, InterruptedException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); + IndexShard shard = newStartedShard(p -> newShard(false, mock, settings), false); + + final ShardRouting shardRouting = shard.routingEntry(); + promoteReplica( + shard, + Collections.singleton(shardRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(shardRouting.shardId()).addShard(shardRouting).build() + ); + + final CountDownLatch latch = new CountDownLatch(1); + shard.acquirePrimaryOperationPermit(new ActionListener<Releasable>() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + // verify checkpoint is published + verify(mock, times(0)).publish(any(), any()); + closeShards(shard); + } + + public void testPublishCheckpointPostFailover() throws IOException { final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); IndexShard shard = newStartedShard(true); CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); @@ -540,136 +572,14 @@ public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L, Codec.getDefault().getName()), spyShard); // Verify that checkpoint is not processed as shard routing is primary. - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } - public void testReplicaReceivesGenIncrease() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - final int numDocs = randomIntBetween(10, 100); - shards.indexDocs(numDocs); - assertEquals(numDocs, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(numDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(numDocs, primary.translogStats().getUncommittedOperations()); - assertEquals(numDocs, replica.translogStats().getUncommittedOperations()); - flushShard(primary, true); - replicateSegments(primary, shards.getReplicas()); - assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(0, primary.translogStats().getUncommittedOperations()); - assertEquals(0, replica.translogStats().getUncommittedOperations()); - - final int additionalDocs = shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); - - final int totalDocs = numDocs + additionalDocs; - primary.refresh("test"); - replicateSegments(primary, shards.getReplicas()); - assertEquals(additionalDocs, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(additionalDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(additionalDocs, primary.translogStats().getUncommittedOperations()); - assertEquals(additionalDocs, replica.translogStats().getUncommittedOperations()); - flushShard(primary, true); - replicateSegments(primary, shards.getReplicas()); - - assertEqualCommittedSegments(primary, replica); - assertDocCount(primary, totalDocs); - assertDocCount(replica, totalDocs); - assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(0, primary.translogStats().getUncommittedOperations()); - assertEquals(0, replica.translogStats().getUncommittedOperations()); - } - } - - public void testPrimaryRelocation() throws Exception { - final IndexShard primarySource = newStartedShard(true, settings); - int totalOps = randomInt(10); - for (int i = 0; i < totalOps; i++) { - indexDoc(primarySource, "_doc", Integer.toString(i)); - } - IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); - final IndexShard primaryTarget = newShard( - primarySource.routingEntry().getTargetRelocatingShard(), - settings, - new NRTReplicationEngineFactory() - ); - updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); - - Function<List<IndexShard>, List<SegmentReplicationTarget>> replicatePrimaryFunction = (shardList) -> { - try { - assert shardList.size() >= 2; - final IndexShard primary = shardList.get(0); - return replicateSegments(primary, shardList.subList(1, shardList.size())); - } catch (IOException | InterruptedException e) { - throw new RuntimeException(e); - } - }; - recoverReplica(primaryTarget, primarySource, true, replicatePrimaryFunction); - - // check that local checkpoint of new primary is properly tracked after primary relocation - assertThat(primaryTarget.getLocalCheckpoint(), equalTo(totalOps - 1L)); - assertThat( - primaryTarget.getReplicationTracker() - .getTrackedLocalCheckpointForShard(primaryTarget.routingEntry().allocationId().getId()) - .getLocalCheckpoint(), - equalTo(totalOps - 1L) - ); - assertDocCount(primaryTarget, totalOps); - closeShards(primarySource, primaryTarget); - } - - public void testPrimaryRelocationWithSegRepFailure() throws Exception { - final IndexShard primarySource = newStartedShard(true, settings); - int totalOps = randomInt(10); - for (int i = 0; i < totalOps; i++) { - indexDoc(primarySource, "_doc", Integer.toString(i)); - } - IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); - final IndexShard primaryTarget = newShard( - primarySource.routingEntry().getTargetRelocatingShard(), - settings, - new NRTReplicationEngineFactory() - ); - updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); - - Function<List<IndexShard>, List<SegmentReplicationTarget>> replicatePrimaryFunction = (shardList) -> { - try { - throw new IOException("Expected failure"); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - Exception e = expectThrows( - Exception.class, - () -> recoverReplica( - primaryTarget, - primarySource, - (primary, sourceNode) -> new RecoveryTarget(primary, sourceNode, new ReplicationListener() { - @Override - public void onDone(ReplicationState state) { - throw new AssertionError("recovery must fail"); - } - - @Override - public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); - } - }), - true, - true, - replicatePrimaryFunction - ) - ); - closeShards(primarySource, primaryTarget); - } - // Todo: Remove this test when there is a better mechanism to test a functionality passing in different replication // strategy. public void testLockingBeforeAndAfterRelocated() throws Exception { - final IndexShard shard = newStartedShard(true, settings); + final IndexShard shard = newStartedShard(true, getIndexSettings()); final ShardRouting routing = ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"); IndexShardTestCase.updateRoutingEntry(shard, routing); CountDownLatch latch = new CountDownLatch(1); @@ -702,7 +612,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // Todo: Remove this test when there is a better mechanism to test a functionality passing in different replication // strategy. public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { - final IndexShard shard = newStartedShard(true, settings); + final IndexShard shard = newStartedShard(true, getIndexSettings()); final ShardRouting routing = ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"); IndexShardTestCase.updateRoutingEntry(shard, routing); final CountDownLatch startRecovery = new CountDownLatch(1); @@ -776,294 +686,42 @@ public void onFailure(Exception e) { closeShards(shard); } - public void testReplicaReceivesLowerGeneration() throws Exception { - // when a replica gets incoming segments that are lower than what it currently has on disk. - - // start 3 nodes Gens: P [2], R [2], R[2] - // index some docs and flush twice, push to only 1 replica. - // State Gens: P [4], R-1 [3], R-2 [2] - // Promote R-2 as the new primary and demote the old primary. - // State Gens: R[4], R-1 [3], P [4] - *commit on close of NRTEngine, xlog replayed and commit made. - // index docs on new primary and flush - // replicate to all. - // Expected result: State Gens: P[4], R-1 [4], R-2 [4] - try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { + public void testCloseShardDuringFinalize() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); - final IndexShard primary = shards.getPrimary(); - final IndexShard replica_1 = shards.getReplicas().get(0); - final IndexShard replica_2 = shards.getReplicas().get(1); - int numDocs = randomIntBetween(10, 100); - shards.indexDocs(numDocs); - flushShard(primary, false); - replicateSegments(primary, List.of(replica_1)); - numDocs = randomIntBetween(numDocs + 1, numDocs + 10); - shards.indexDocs(numDocs); - flushShard(primary, false); - replicateSegments(primary, List.of(replica_1)); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final IndexShard replicaSpy = spy(replica); - assertEqualCommittedSegments(primary, replica_1); + primary.refresh("Test"); - shards.promoteReplicaToPrimary(replica_2).get(); - primary.close("demoted", false, false); - primary.store().close(); - IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); - shards.recoverReplica(oldPrimary); + doThrow(AlreadyClosedException.class).when(replicaSpy).finalizeReplication(any()); - numDocs = randomIntBetween(numDocs + 1, numDocs + 10); - shards.indexDocs(numDocs); - flushShard(replica_2, false); - replicateSegments(replica_2, shards.getReplicas()); - assertEqualCommittedSegments(replica_2, oldPrimary, replica_1); + replicateSegments(primary, List.of(replicaSpy)); } } - public void testReplicaRestarts() throws Exception { - try (ReplicationGroup shards = createGroup(3, settings, new NRTReplicationEngineFactory())) { + public void testBeforeIndexShardClosedWhileCopyingFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primary = shards.getPrimary(); - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); + final IndexShard replica = shards.getReplicas().get(0); - // refresh and copy the segments over. - if (randomBoolean()) { - flushShard(primary); - } primary.refresh("Test"); - replicateSegments(primary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(primary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, numDocs); - } - - final int i1 = randomInt(5); - for (int i = 0; i < i1; i++) { - shards.indexDocs(randomInt(10)); - - // randomly resetart a replica - final IndexShard replicaToRestart = getRandomReplica(shards); - replicaToRestart.close("restart", false, false); - replicaToRestart.store().close(); - shards.removeReplica(replicaToRestart); - final IndexShard newReplica = shards.addReplicaWithExistingPath( - replicaToRestart.shardPath(), - replicaToRestart.routingEntry().currentNodeId() - ); - shards.recoverReplica(newReplica); - - // refresh and push segments to our other replicas. - if (randomBoolean()) { - failAndPromoteRandomReplica(shards); - } - flushShard(shards.getPrimary()); - replicateSegments(shards.getPrimary(), shards.getReplicas()); - } - primary = shards.getPrimary(); - - // refresh and push segments to our other replica. - flushShard(primary); - replicateSegments(primary, shards.getReplicas()); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List<DocIdSeqNoAndSource> docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); - } - } - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshRefresh() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(false, false); - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshCommit() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(false, true); - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitRefresh() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(true, false); - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8817") - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitCommit() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(true, true); - } - - private void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushFirst, boolean performFlushSecond) throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") - .build(); - - try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { - shards.startAll(); - IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); - - // refresh but do not copy the segments over. - if (performFlushFirst) { - flushShard(oldPrimary, true); - } else { - oldPrimary.refresh("Test"); - } - // replicateSegments(primary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(oldPrimary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, 0); - } - - // 2. Create ops that are in the replica's xlog, not in the index. - // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs - // persisted. - final int additonalDocs = shards.indexDocs(randomInt(10)); - final int totalDocs = numDocs + additonalDocs; - - if (performFlushSecond) { - flushShard(oldPrimary, true); - } else { - oldPrimary.refresh("Test"); - } - assertDocCounts(oldPrimary, totalDocs, totalDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, totalDocs, 0); - } - assertTrue(nextPrimary.translogStats().estimatedNumberOfOperations() >= additonalDocs); - assertTrue(nextPrimary.translogStats().getUncommittedOperations() >= additonalDocs); - - int prevOperationCount = nextPrimary.translogStats().estimatedNumberOfOperations(); - - // promote the replica - shards.promoteReplicaToPrimary(nextPrimary).get(); - - // close oldPrimary. - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - - assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); - assertDocCounts(nextPrimary, totalDocs, totalDocs); - - // As we are downloading segments from remote segment store on failover, there should not be - // any operations replayed from translog - assertEquals(prevOperationCount, nextPrimary.translogStats().estimatedNumberOfOperations()); - - // refresh and push segments to our other replica. - nextPrimary.refresh("test"); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testNRTReplicaPromotedAsPrimary() throws Exception { - try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - final IndexShard replica = shards.getReplicas().get(1); - - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); - - // refresh and copy the segments over. - oldPrimary.refresh("Test"); - replicateSegments(oldPrimary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(oldPrimary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, numDocs); - } - assertEqualTranslogOperations(shards, oldPrimary); - - // 2. Create ops that are in the replica's xlog, not in the index. - // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs - // persisted. - final int additonalDocs = shards.indexDocs(randomInt(10)); - final int totalDocs = numDocs + additonalDocs; - - assertDocCounts(oldPrimary, totalDocs, totalDocs); - assertEqualTranslogOperations(shards, oldPrimary); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, totalDocs, numDocs); - } - assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, nextPrimary.translogStats().getUncommittedOperations()); - assertEquals(totalDocs, replica.translogStats().getUncommittedOperations()); - - // promote the replica - shards.syncGlobalCheckpoint(); - shards.promoteReplicaToPrimary(nextPrimary); - - // close and start the oldPrimary as a replica. - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); - shards.recoverReplica(oldPrimary); - - assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); - assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); - assertDocCounts(nextPrimary, totalDocs, totalDocs); - assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); - - // refresh and push segments to our other replica. - nextPrimary.refresh("test"); - replicateSegments(nextPrimary, asList(replica)); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testReplicaPromotedWhileReplicating() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - oldPrimary.refresh("Test"); - shards.syncGlobalCheckpoint(); final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); SegmentReplicationSource source = new TestReplicationSource() { + + ActionListener<GetSegmentFilesResponse> listener; + @Override public void getCheckpointMetadata( long replicationId, ReplicationCheckpoint checkpoint, ActionListener<CheckpointInfoResponse> listener ) { - resolveCheckpointInfoResponseListener(listener, oldPrimary); - ShardRouting oldRouting = nextPrimary.shardRouting; - try { - shards.promoteReplicaToPrimary(nextPrimary); - } catch (IOException e) { - Assert.fail("Promotion should not fail"); - } - targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + resolveCheckpointInfoResponseListener(listener, primary); } @Override @@ -1072,330 +730,325 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(nextPrimary, targetService); - // wait for replica to finish being promoted, and assert doc counts. - final CountDownLatch latch = new CountDownLatch(1); - nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { - @Override - public void onResponse(Releasable releasable) { - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError(e); - } - }, ThreadPool.Names.GENERIC, ""); - latch.await(); - assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); - nextPrimary.refresh("test"); - - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); - shards.recoverReplica(newReplica); - - assertDocCount(nextPrimary, numDocs); - assertDocCount(newReplica, numDocs); - - nextPrimary.refresh("test"); - replicateSegments(nextPrimary, shards.getReplicas()); - final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener<CheckpointInfoResponse> listener - ) { - // trigger a cancellation by closing the replica. + // set the listener, we will only fail it once we cancel the source. + this.listener = listener; + // shard is closing while we are copying files. targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - resolveCheckpointInfoResponseListener(listener, primary); } @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List<StoreFileMetadata> filesToFetch, - IndexShard indexShard, - ActionListener<GetSegmentFilesResponse> listener - ) { - Assert.fail("Should not be reached"); + public void cancel() { + // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . + final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); + listener.onFailure(exception); } }; when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); + startReplicationAndAssertCancellation(replica, primary, targetService); shards.removeReplica(replica); closeShards(replica); } } - public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); + protected SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory, + null, + null + ); + } - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); + public void testNoDuplicateSeqNo() throws Exception { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startPrimary(); + shards.startAll(); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener<CheckpointInfoResponse> listener - ) { - resolveCheckpointInfoResponseListener(listener, primary); - } + flushShard(primaryShard); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List<StoreFileMetadata> filesToFetch, - IndexShard indexShard, - ActionListener<GetSegmentFilesResponse> listener - ) { - // randomly resolve the listener, indicating the source has resolved. - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); + shards.indexDocs(10); + primaryShard.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); - shards.removeReplica(replica); - closeShards(replica); + CountDownLatch latch = new CountDownLatch(1); + shards.promoteReplicaToPrimary(replicaShard, (shard, listener) -> { + try { + assertAtMostOneLuceneDocumentPerSequenceNumber(replicaShard.getEngine()); + } catch (IOException e) { + throw new RuntimeException(e); + } + latch.countDown(); + }); + latch.await(); + for (IndexShard shard : shards) { + if (shard != null) { + closeShard(shard, false); + } } } - public void testCloseShardDuringFinalize() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + public void testQueryDuringEngineResetShowsDocs() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - final IndexShard replicaSpy = spy(replica); - - primary.refresh("Test"); - - doThrow(AlreadyClosedException.class).when(replicaSpy).finalizeReplication(any()); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); - replicateSegments(primary, List.of(replicaSpy)); + final AtomicReference<Throwable> failed = new AtomicReference<>(); + doAnswer(ans -> { + try { + final Engine engineOrNull = replicaShard.getEngineOrNull(); + assertNotNull(engineOrNull); + assertTrue(engineOrNull instanceof ReadOnlyEngine); + shards.assertAllEqual(10); + } catch (Throwable e) { + failed.set(e); + } + return ans.callRealMethod(); + }).when(spy).newReadWriteEngine(any()); + shards.promoteReplicaToPrimary(replicaShard).get(); + assertNull("Expected correct doc count during engine reset", failed.get()); } } - public void testCloseShardWhileGettingCheckpoint() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + public void testSegmentReplicationStats() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - primary.refresh("Test"); + assertReplicaCaughtUp(primaryShard); - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { + shards.indexDocs(10); + shards.refresh("test"); - ActionListener<CheckpointInfoResponse> listener; + final ReplicationCheckpoint primaryCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + final long initialCheckpointSize = primaryCheckpoint.getMetadataMap() + .values() + .stream() + .mapToLong(StoreFileMetadata::length) + .sum(); + + Set<SegmentReplicationShardStats> postRefreshStats = primaryShard.getReplicationStatsForTrackedReplicas(); + SegmentReplicationShardStats shardStats = postRefreshStats.stream().findFirst().get(); + assertEquals(1, shardStats.getCheckpointsBehindCount()); + assertEquals(initialCheckpointSize, shardStats.getBytesBehindCount()); + replicateSegments(primaryShard, shards.getReplicas()); + assertReplicaCaughtUp(primaryShard); + shards.assertAllEqual(10); + + final List<DocIdSeqNoAndSource> docIdAndSeqNos = getDocIdAndSeqNos(primaryShard); + for (DocIdSeqNoAndSource docIdAndSeqNo : docIdAndSeqNos.subList(0, 5)) { + deleteDoc(primaryShard, docIdAndSeqNo.getId()); + // delete on replica for xlog. + deleteDoc(replicaShard, docIdAndSeqNo.getId()); + } + primaryShard.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true)); - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener<CheckpointInfoResponse> listener - ) { - // set the listener, we will only fail it once we cancel the source. - this.listener = listener; - // shard is closing while we are copying files. - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - } + final Map<String, StoreFileMetadata> segmentMetadataMap = primaryShard.getSegmentMetadataMap(); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(segmentMetadataMap, replicaShard.getSegmentMetadataMap()); + final long sizeAfterDeleteAndCommit = diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List<StoreFileMetadata> filesToFetch, - IndexShard indexShard, - ActionListener<GetSegmentFilesResponse> listener - ) { - Assert.fail("Unreachable"); - } + final Set<SegmentReplicationShardStats> statsAfterFlush = primaryShard.getReplicationStatsForTrackedReplicas(); + shardStats = statsAfterFlush.stream().findFirst().get(); + assertEquals(sizeAfterDeleteAndCommit, shardStats.getBytesBehindCount()); + assertEquals(1, shardStats.getCheckpointsBehindCount()); - @Override - public void cancel() { - // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . - final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); - listener.onFailure(exception); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); + replicateSegments(primaryShard, shards.getReplicas()); + assertReplicaCaughtUp(primaryShard); + shards.assertAllEqual(5); } } - public void testBeforeIndexShardClosedWhileCopyingFiles() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + public void testSnapshotWhileFailoverIncomplete() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - - ActionListener<GetSegmentFilesResponse> listener; - - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener<CheckpointInfoResponse> listener - ) { - resolveCheckpointInfoResponseListener(listener, primary); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List<StoreFileMetadata> filesToFetch, - IndexShard indexShard, - ActionListener<GetSegmentFilesResponse> listener - ) { - // set the listener, we will only fail it once we cancel the source. - this.listener = listener; - // shard is closing while we are copying files. - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - } - - @Override - public void cancel() { - // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . - final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); - listener.onFailure(exception); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + + final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard); + final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))); + + final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT); + shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); + + CountDownLatch latch = new CountDownLatch(1); + doAnswer(ans -> { + final Engine engineOrNull = replicaShard.getEngineOrNull(); + assertNotNull(engineOrNull); + assertTrue(engineOrNull instanceof ReadOnlyEngine); + shards.assertAllEqual(10); + shardsService.clusterChanged( + new ClusterChangedEvent( + "test", + addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED), + initState + ) + ); + latch.countDown(); + return ans.callRealMethod(); + }).when(spy).newReadWriteEngine(any()); + shards.promoteReplicaToPrimary(replicaShard).get(); + latch.await(); + assertBusy(() -> { + final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot).get(replicaShard.shardId).asCopy(); + final IndexShardSnapshotStatus.Stage stage = copy.getStage(); + assertEquals(IndexShardSnapshotStatus.Stage.FAILURE, stage); + assertNotNull(copy.getFailure()); + assertTrue( + copy.getFailure() + .contains("snapshot triggered on a new primary following failover and cannot proceed until promotion is complete") + ); + }); } } - public void testPrimaryCancelsExecution() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + public void testReuseReplicationCheckpointWhenLatestInfosIsUnChanged() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener<CheckpointInfoResponse> listener - ) { - listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List<StoreFileMetadata> filesToFetch, - IndexShard indexShard, - ActionListener<GetSegmentFilesResponse> listener - ) {} - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + final ReplicationCheckpoint latestReplicationCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + try (GatedCloseable<SegmentInfos> segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(segmentInfosSnapshot.get())); + } + final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> latestSegmentInfosAndCheckpoint = primaryShard + .getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable<SegmentInfos> closeable = latestSegmentInfosAndCheckpoint.v1()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(closeable.get())); + } + } + } - shards.removeReplica(replica); - closeShards(replica); + public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + assertEquals(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard.computeReplicationCheckpoint(null)); } } - private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { - return new SegmentReplicationTargetService( - threadPool, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - mock(TransportService.class), - sourceFactory, + private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { + final TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + final IndicesService indicesService = mock(IndicesService.class); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard); + return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService); + } + + private ClusterState addSnapshotIndex( + ClusterState state, + Snapshot snapshot, + IndexShard shard, + SnapshotsInProgress.State snapshotState + ) { + final Map<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shardsBuilder = new HashMap<>(); + ShardRouting shardRouting = shard.shardRouting; + shardsBuilder.put( + shardRouting.shardId(), + new SnapshotsInProgress.ShardSnapshotStatus(state.getNodes().getLocalNode().getId(), "1") + ); + final SnapshotsInProgress.Entry entry = new SnapshotsInProgress.Entry( + snapshot, + randomBoolean(), + false, + snapshotState, + Collections.singletonList(new IndexId(index.getName(), index.getUUID())), + Collections.emptyList(), + randomNonNegativeLong(), + randomLong(), + shardsBuilder, null, - null + SnapshotInfoTests.randomUserMetadata(), + VersionUtils.randomVersion(random()), + false ); + return ClusterState.builder(state) + .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry))) + .build(); + } + + private void assertReplicaCaughtUp(IndexShard primaryShard) { + Set<SegmentReplicationShardStats> initialStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(initialStats.size(), 1); + SegmentReplicationShardStats shardStats = initialStats.stream().findFirst().get(); + assertEquals(0, shardStats.getCheckpointsBehindCount()); + assertEquals(0, shardStats.getBytesBehindCount()); } /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. */ - private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) throws IOException { + protected void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) + throws IOException { assertDocCount(indexShard, expectedSearchableDocCount); // assigned seqNos start at 0, so assert max & local seqNos are 1 less than our persisted doc count. assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getMaxSeqNo()); assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getLocalCheckpoint()); } - private void resolveCheckpointInfoResponseListener(ActionListener<CheckpointInfoResponse> listener, IndexShard primary) { + protected void resolveCheckpointInfoResponseListener(ActionListener<CheckpointInfoResponse> listener, IndexShard primary) { + final CopyState copyState; try { - final CopyState copyState = new CopyState( + copyState = new CopyState( ReplicationCheckpoint.empty(primary.shardId, primary.getLatestReplicationCheckpoint().getCodec()), primary ); - listener.onResponse( - new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) - ); } catch (IOException e) { logger.error("Unexpected error computing CopyState", e); Assert.fail("Failed to compute copyState"); + throw new UncheckedIOException(e); + } + + try { + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } finally { + copyState.decRef(); } } - private void startReplicationAndAssertCancellation(IndexShard replica, SegmentReplicationTargetService targetService) - throws InterruptedException { + protected void startReplicationAndAssertCancellation( + IndexShard replica, + IndexShard primary, + SegmentReplicationTargetService targetService + ) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); final SegmentReplicationTarget target = targetService.startReplication( replica, + primary.getLatestReplicationCheckpoint(), new SegmentReplicationTargetService.SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { @@ -1415,11 +1068,11 @@ public void onReplicationFailure(SegmentReplicationState state, ReplicationFaile assertNull(targetService.get(target.getId())); } - private IndexShard getRandomReplica(ReplicationGroup shards) { + protected IndexShard getRandomReplica(ReplicationGroup shards) { return shards.getReplicas().get(randomInt(shards.getReplicas().size() - 1)); } - private IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws IOException { + protected IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws IOException { IndexShard primary = shards.getPrimary(); final IndexShard newPrimary = getRandomReplica(shards); shards.promoteReplicaToPrimary(newPrimary); @@ -1430,7 +1083,7 @@ private IndexShard failAndPromoteRandomReplica(ReplicationGroup shards) throws I return newPrimary; } - private void assertEqualCommittedSegments(IndexShard primary, IndexShard... replicas) throws IOException { + protected void assertEqualCommittedSegments(IndexShard primary, IndexShard... replicas) throws IOException { for (IndexShard replica : replicas) { final SegmentInfos replicaInfos = replica.store().readLastCommittedSegmentsInfo(); final SegmentInfos primaryInfos = primary.store().readLastCommittedSegmentsInfo(); @@ -1442,7 +1095,7 @@ private void assertEqualCommittedSegments(IndexShard primary, IndexShard... repl } } - private void assertEqualTranslogOperations(ReplicationGroup shards, IndexShard primaryShard) throws IOException { + protected void assertEqualTranslogOperations(ReplicationGroup shards, IndexShard primaryShard) throws IOException { try (final Translog.Snapshot snapshot = getTranslog(primaryShard).newSnapshot()) { List<Translog.Operation> operations = new ArrayList<>(); Translog.Operation op; diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java new file mode 100644 index 0000000000000..f0950fe5392de --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -0,0 +1,718 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.index.SegmentInfos; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.replication.TestReplicationSource; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.junit.Assert; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SegmentReplicationWithNodeToNodeIndexShardTests extends SegmentReplicationIndexShardTests { + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, primary, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, primary, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testCloseShardWhileGettingCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + + ActionListener<CheckpointInfoResponse> listener; + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + // set the listener, we will only fail it once we cancel the source. + this.listener = listener; + // shard is closing while we are copying files. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + Assert.fail("Unreachable"); + } + + @Override + public void cancel() { + // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . + final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); + listener.onFailure(exception); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, primary, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, primary, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, oldPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaReceivesGenIncrease() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + assertEquals(numDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(numDocs, replica.translogStats().getUncommittedOperations()); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); + + final int additionalDocs = shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); + + final int totalDocs = numDocs + additionalDocs; + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertEquals(additionalDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(additionalDocs, replica.translogStats().getUncommittedOperations()); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + + assertEqualCommittedSegments(primary, replica); + assertDocCount(primary, totalDocs); + assertDocCount(replica, totalDocs); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); + } + } + + /** + * Verifies that commits on replica engine resulting from engine or reader close does not cleanup the temporary + * replication files from ongoing round of segment replication + */ + public void testTemporaryFilesNotCleanup() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primaryShard = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + // Step 1. Ingest numDocs documents, commit to create commit point on primary & replicate + final int numDocs = randomIntBetween(100, 200); + logger.info("--> Inserting documents {}", numDocs); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + // Step 2. Ingest numDocs documents again to create a new commit on primary + logger.info("--> Ingest {} docs again", numDocs); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + + // Step 3. Copy segment files to replica shard but prevent commit + final CountDownLatch countDownLatch = new CountDownLatch(1); + Map<String, StoreFileMetadata> primaryMetadata; + try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); + } + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final IndicesService indicesService = mock(IndicesService.class); + when(indicesService.getShardOrNull(replica.shardId)).thenReturn(replica); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory, + indicesService, + clusterService + ); + final Consumer<IndexShard> runnablePostGetFiles = (indexShard) -> { + try { + Collection<String> temporaryFiles = Stream.of(indexShard.store().directory().listAll()) + .filter(name -> name.startsWith(SegmentReplicationTarget.REPLICATION_PREFIX)) + .collect(Collectors.toList()); + + // Step 4. Perform a commit on replica shard. + NRTReplicationEngine engine = (NRTReplicationEngine) indexShard.getEngine(); + engine.updateSegments(engine.getSegmentInfosSnapshot().get()); + + // Step 5. Validate temporary files are not deleted from store. + Collection<String> replicaStoreFiles = List.of(indexShard.store().directory().listAll()); + assertTrue(replicaStoreFiles.containsAll(temporaryFiles)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + SegmentReplicationSource segmentReplicationSource = getSegmentReplicationSource( + primaryShard, + (repId) -> targetService.get(repId), + runnablePostGetFiles + ); + when(sourceFactory.get(any())).thenReturn(segmentReplicationSource); + targetService.startReplication( + replica, + primaryShard.getLatestReplicationCheckpoint(), + getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch) + ); + countDownLatch.await(30, TimeUnit.SECONDS); + assertEquals("Replication failed", 0, countDownLatch.getCount()); + shards.assertAllEqual(numDocs); + } + } + + public void testReplicaReceivesLowerGeneration() throws Exception { + // when a replica gets incoming segments that are lower than what it currently has on disk. + // this can happen when a replica is promoted that is further behind the other replicas. + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard behindReplicaBeforeRestart = shards.getReplicas().get(0); + final IndexShard replica_2 = shards.getReplicas().get(1); + int numDocs = randomIntBetween(10, 100); + int totalDocs = numDocs; + shards.indexDocs(numDocs); + flushShard(primary, true); + replicateSegments(primary, List.of(behindReplicaBeforeRestart)); + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + totalDocs += numDocs; + shards.indexDocs(numDocs); + flushShard(primary, true); + flushShard(primary, true); + flushShard(primary, true); + replicateSegments(primary, List.of(behindReplicaBeforeRestart)); + + // close behindReplicaBeforeRestart - we will re-open it after replica_2 is promoted as new primary. + assertEqualCommittedSegments(primary, behindReplicaBeforeRestart); + + assertDocCount(behindReplicaBeforeRestart, totalDocs); + assertDocCount(replica_2, 0); + + shards.promoteReplicaToPrimary(replica_2).get(); + primary.close("demoted", randomBoolean(), false); + primary.store().close(); + IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + behindReplicaBeforeRestart.close("restart", false, false); + behindReplicaBeforeRestart.store().close(); + shards.removeReplica(behindReplicaBeforeRestart); + final IndexShard behindReplicaAfterRestart = shards.addReplicaWithExistingPath( + behindReplicaBeforeRestart.shardPath(), + behindReplicaBeforeRestart.routingEntry().currentNodeId() + ); + shards.recoverReplica(behindReplicaAfterRestart); + + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + totalDocs += numDocs; + shards.indexDocs(numDocs); + flushShard(replica_2, false); + replicateSegments(replica_2, shards.getReplicas()); + assertEqualCommittedSegments(replica_2, oldPrimary, behindReplicaAfterRestart); + assertDocCount(replica_2, totalDocs); + assertDocCount(oldPrimary, totalDocs); + assertDocCount(behindReplicaAfterRestart, totalDocs); + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testPrimaryRelocation() throws Exception { + final IndexShard primarySource = newStartedShard(true, getIndexSettings()); + int totalOps = randomInt(10); + for (int i = 0; i < totalOps; i++) { + indexDoc(primarySource, "_doc", Integer.toString(i)); + } + IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); + final IndexShard primaryTarget = newShard( + primarySource.routingEntry().getTargetRelocatingShard(), + getIndexSettings(), + new NRTReplicationEngineFactory() + ); + updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); + + Function<List<IndexShard>, List<SegmentReplicationTarget>> replicatePrimaryFunction = (shardList) -> { + try { + assert shardList.size() >= 2; + final IndexShard primary = shardList.get(0); + return replicateSegments(primary, shardList.subList(1, shardList.size())); + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + }; + recoverReplica(primaryTarget, primarySource, true, replicatePrimaryFunction); + + // check that local checkpoint of new primary is properly tracked after primary relocation + assertThat(primaryTarget.getLocalCheckpoint(), equalTo(totalOps - 1L)); + assertThat( + primaryTarget.getReplicationTracker() + .getTrackedLocalCheckpointForShard(primaryTarget.routingEntry().allocationId().getId()) + .getLocalCheckpoint(), + equalTo(totalOps - 1L) + ); + assertDocCount(primaryTarget, totalOps); + closeShards(primarySource, primaryTarget); + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testNRTReplicaPromotedAsPrimary() throws Exception { + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + final IndexShard replica = shards.getReplicas().get(1); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + oldPrimary.refresh("Test"); + replicateSegments(oldPrimary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + assertEqualTranslogOperations(shards, oldPrimary); + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int additonalDocs = shards.indexDocs(randomInt(10)); + final int totalDocs = numDocs + additonalDocs; + + assertDocCounts(oldPrimary, totalDocs, totalDocs); + assertEqualTranslogOperations(shards, oldPrimary); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, numDocs); + } + assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, nextPrimary.translogStats().getUncommittedOperations()); + assertEquals(totalDocs, replica.translogStats().getUncommittedOperations()); + + // promote the replica + shards.syncGlobalCheckpoint(); + shards.promoteReplicaToPrimary(nextPrimary); + + // close and start the oldPrimary as a replica. + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, asList(replica)); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List<DocIdSeqNoAndSource> docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testReplicaRestarts() throws Exception { + try (ReplicationGroup shards = createGroup(3, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + logger.info("--> Index {} documents on primary", numDocs); + + // refresh and copy the segments over. + if (randomBoolean()) { + flushShard(primary); + } + primary.refresh("Test"); + logger.info("--> Replicate segments"); + replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + logger.info("--> Verify doc count"); + assertDocCounts(primary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + final int i1 = randomInt(5); + logger.info("--> Index {} more docs", i1); + for (int i = 0; i < i1; i++) { + shards.indexDocs(randomInt(10)); + + // randomly restart a replica + final IndexShard replicaToRestart = getRandomReplica(shards); + logger.info("--> Restarting replica {}", replicaToRestart.shardId); + replicaToRestart.close("restart", false, false); + replicaToRestart.store().close(); + shards.removeReplica(replicaToRestart); + final IndexShard newReplica = shards.addReplicaWithExistingPath( + replicaToRestart.shardPath(), + replicaToRestart.routingEntry().currentNodeId() + ); + logger.info("--> Recover newReplica {}", newReplica.shardId); + shards.recoverReplica(newReplica); + + // refresh and push segments to our other replicas. + if (randomBoolean()) { + failAndPromoteRandomReplica(shards); + } + flushShard(shards.getPrimary()); + replicateSegments(shards.getPrimary(), shards.getReplicas()); + } + primary = shards.getPrimary(); + + // refresh and push segments to our other replica. + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List<DocIdSeqNoAndSource> docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); + } + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testSegmentReplication_Index_Update_Delete() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + + final int numDocs = randomIntBetween(100, 200); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", MediaTypeRegistry.JSON)); + } + + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.assertAllEqual(numDocs); + + for (int i = 0; i < numDocs; i++) { + // randomly update docs. + if (randomBoolean()) { + shards.index( + new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", MediaTypeRegistry.JSON) + ); + } + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + final List<DocIdSeqNoAndSource> docs = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docs); + } + for (int i = 0; i < numDocs; i++) { + // randomly delete. + if (randomBoolean()) { + shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); + } + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + final List<DocIdSeqNoAndSource> docsAfterDelete = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); + } + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java deleted file mode 100644 index a67b60d6128d1..0000000000000 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.shard; - -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.NRTReplicationEngineFactory; -import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; -import org.opensearch.indices.replication.common.ReplicationType; - -import java.io.IOException; - -public class SegmentReplicationWithRemoteIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { - private static final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") - .build(); - - public void testReplicaSyncingFromRemoteStore() throws IOException { - ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); - final IndexShard primaryShard = shards.getPrimary(); - final IndexShard replicaShard = shards.getReplicas().get(0); - shards.startPrimary(); - shards.startAll(); - indexDoc(primaryShard, "_doc", "1"); - indexDoc(primaryShard, "_doc", "2"); - primaryShard.refresh("test"); - assertDocs(primaryShard, "1", "2"); - flushShard(primaryShard); - - replicaShard.syncSegmentsFromRemoteSegmentStore(true, true, false); - assertDocs(replicaShard, "1", "2"); - closeShards(primaryShard, replicaShard); - } -} diff --git a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java index 34d6233c8202f..0f27bc2bd126b 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardGetServiceTests.java @@ -34,8 +34,8 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.VersionConflictEngineException; @@ -74,7 +74,7 @@ public void testGetForUpdate() throws IOException { assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", MediaTypeRegistry.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); @@ -89,7 +89,7 @@ public void testGetForUpdate() throws IOException { } // now again from the reader - Engine.IndexResult test2 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", MediaTypeRegistry.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); @@ -157,7 +157,7 @@ private void runGetFromTranslogWithOptions( assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "1", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, "1", docToIndex, MediaTypeRegistry.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source() == null ? new byte[0] : testGet1.source(), StandardCharsets.UTF_8), expectedResult); @@ -171,7 +171,7 @@ private void runGetFromTranslogWithOptions( assertEquals(searcher.getIndexReader().maxDoc(), 2); } - Engine.IndexResult test2 = indexDoc(primary, "2", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, "2", docToIndex, MediaTypeRegistry.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet2 = primary.getService() .get("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardIdTests.java b/server/src/test/java/org/opensearch/index/shard/ShardIdTests.java index b124fd119e7e6..3a925602d7821 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardIdTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardIdTests.java @@ -33,8 +33,8 @@ package org.opensearch.index.shard; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; public class ShardIdTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java index 5bb74d7334113..3f9ab703d2336 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java @@ -33,18 +33,18 @@ import org.opensearch.cluster.routing.AllocationId; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.gateway.WriteStateException; -import org.opensearch.core.index.Index; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Path; +import static org.opensearch.env.Environment.PATH_SHARED_DATA_SETTING; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.opensearch.env.Environment.PATH_SHARED_DATA_SETTING; public class ShardPathTests extends OpenSearchTestCase { public void testLoadShardPath() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java index 940d9a4ead5f9..03b4ab684e763 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; @@ -46,6 +45,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java index c1a51bb780f61..846b975a9520e 100644 --- a/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/StoreRecoveryTests.java @@ -270,6 +270,7 @@ public void testStatsDirWrapper() throws IOException { IOUtils.close(dir, target); } + @SuppressWarnings("removal") public boolean hardLinksSupported(Path path) throws IOException { try { Files.createFile(path.resolve("foo.bar")); diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index 1d7b749433c65..7fb0a57c7d540 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -40,8 +40,8 @@ import org.opensearch.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java index 9bbdeab7e59af..1203e15fbebd6 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityTests.java @@ -45,18 +45,17 @@ import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Collection; @@ -275,17 +274,16 @@ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOExcept } public void testResolveSimilaritiesFromMapping_Unknown() throws IOException { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("field1") - .field("type", "text") - .field("similarity", "unknown_similarity") - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field1") + .field("type", "text") + .field("similarity", "unknown_similarity") + .endObject() + .endObject() + .endObject() + .toString(); IndexService indexService = createIndex("foo"); try { diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/FileInfoTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/FileInfoTests.java index 0f957517d48e1..3db9aeb9264a7 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/FileInfoTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/FileInfoTests.java @@ -34,14 +34,13 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.test.OpenSearchTestCase; @@ -73,7 +72,7 @@ public void testToFromXContent() throws IOException { ); ByteSizeValue size = new ByteSizeValue(Math.abs(randomLong())); BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON).prettyPrint(); BlobStoreIndexShardSnapshot.FileInfo.toXContent(info, builder, ToXContent.EMPTY_PARAMS); byte[] xcontent = BytesReference.toBytes(BytesReference.bytes(shuffleXContent(builder))); @@ -126,7 +125,7 @@ public void testInvalidFieldsInFromXContent() throws IOException { fail("shouldn't be here"); } - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject(); builder.field(FileInfo.NAME, name); builder.field(FileInfo.PHYSICAL_NAME, physicalName); diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index ccb89ec3d1547..38c4bb781ce06 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -8,11 +8,9 @@ package org.opensearch.index.snapshots.blobstore; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -57,11 +55,11 @@ public void testToXContent() throws IOException { fileNames ); String actual; - try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { builder.startObject(); shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - actual = Strings.toString(builder); + actual = builder.toString(); } String expectedXContent = "{\"version\":\"1\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" @@ -177,7 +175,7 @@ public void testFromXContentInvalid() throws IOException { fail("shouldn't be here"); } - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); builder.startObject(); builder.field(RemoteStoreShardShallowCopySnapshot.VERSION, version); builder.field(RemoteStoreShardShallowCopySnapshot.NAME, snapshot); diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java index 76fb8f62b5468..bf269e3951a74 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/SlicedInputStreamTests.java @@ -33,8 +33,8 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.hamcrest.MatcherAssert; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index 56d67820797a2..2fffebbcf5f1f 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -43,12 +43,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.io.IOException; @@ -72,7 +72,8 @@ public void testPreload() throws IOException { try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test default hybrid mmap extensions + // test default hybrid extensions + // true->mmap, false->nio assertTrue(hybridDirectory.useDelegate("foo.nvd")); assertTrue(hybridDirectory.useDelegate("foo.dvd")); assertTrue(hybridDirectory.useDelegate("foo.tim")); @@ -82,6 +83,7 @@ public void testPreload() throws IOException { assertTrue(hybridDirectory.useDelegate("foo.kdi")); assertTrue(hybridDirectory.useDelegate("foo.cfs")); assertTrue(hybridDirectory.useDelegate("foo.doc")); + assertTrue(hybridDirectory.useDelegate("foo.new")); assertFalse(hybridDirectory.useDelegate("foo.pos")); assertFalse(hybridDirectory.useDelegate("foo.pay")); MMapDirectory delegate = hybridDirectory.getDelegate(); @@ -94,23 +96,25 @@ public void testPreload() throws IOException { build = Settings.builder() .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") - .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos", "pay") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "tip", "dim", "kdd", "kdi", "cfs", "doc") .build(); try (Directory directory = newDirectory(build)) { assertTrue(FsDirectoryFactory.isHybridFs(directory)); FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; - // test custom hybrid mmap extensions + // test custom hybrid nio extensions + // true->mmap, false->nio assertTrue(hybridDirectory.useDelegate("foo.nvd")); assertTrue(hybridDirectory.useDelegate("foo.dvd")); assertTrue(hybridDirectory.useDelegate("foo.tim")); + assertTrue(hybridDirectory.useDelegate("foo.pos")); + assertTrue(hybridDirectory.useDelegate("foo.pay")); + assertTrue(hybridDirectory.useDelegate("foo.new")); assertFalse(hybridDirectory.useDelegate("foo.tip")); assertFalse(hybridDirectory.useDelegate("foo.dim")); assertFalse(hybridDirectory.useDelegate("foo.kdd")); assertFalse(hybridDirectory.useDelegate("foo.kdi")); assertFalse(hybridDirectory.useDelegate("foo.cfs")); assertFalse(hybridDirectory.useDelegate("foo.doc")); - assertTrue(hybridDirectory.useDelegate("foo.pos")); - assertTrue(hybridDirectory.useDelegate("foo.pay")); MMapDirectory delegate = hybridDirectory.getDelegate(); assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); FsDirectoryFactory.PreLoadMMapDirectory preLoadMMapDirectory = (FsDirectoryFactory.PreLoadMMapDirectory) delegate; @@ -119,6 +123,99 @@ public void testPreload() throws IOException { assertTrue(preLoadMMapDirectory.useDelegate("foo.cfs")); assertTrue(preLoadMMapDirectory.useDelegate("foo.nvd")); } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") + .build(); + try (Directory directory = newDirectory(build)) { + assertTrue(FsDirectoryFactory.isHybridFs(directory)); + FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; + // test custom hybrid mmap extensions + // true->mmap, false->nio + assertTrue(hybridDirectory.useDelegate("foo.nvd")); + assertTrue(hybridDirectory.useDelegate("foo.dvd")); + assertTrue(hybridDirectory.useDelegate("foo.tim")); + assertTrue(hybridDirectory.useDelegate("foo.pos")); + assertTrue(hybridDirectory.useDelegate("foo.new")); + assertFalse(hybridDirectory.useDelegate("foo.pay")); + assertFalse(hybridDirectory.useDelegate("foo.tip")); + assertFalse(hybridDirectory.useDelegate("foo.dim")); + assertFalse(hybridDirectory.useDelegate("foo.kdd")); + assertFalse(hybridDirectory.useDelegate("foo.kdi")); + assertFalse(hybridDirectory.useDelegate("foo.cfs")); + assertFalse(hybridDirectory.useDelegate("foo.doc")); + MMapDirectory delegate = hybridDirectory.getDelegate(); + assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); + assertWarnings( + "[index.store.hybrid.mmap.extensions] setting was deprecated in OpenSearch and will be removed in a future release!" + + " See the breaking changes documentation for the next major version." + ); + } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") + .build(); + try { + newDirectory(build); + } catch (final Exception e) { + assertEquals( + "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", + e.getMessage() + ); + } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") + .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey(), "nvd", "dvd", "tim", "pos") + .build(); + try { + newDirectory(build); + } catch (final Exception e) { + assertEquals( + "Settings index.store.hybrid.nio.extensions & index.store.hybrid.mmap.extensions cannot both be set. Use index.store.hybrid.nio.extensions only.", + e.getMessage() + ); + } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_NIO_EXTENSIONS.getKey()) + .build(); + try (Directory directory = newDirectory(build)) { + assertTrue(FsDirectoryFactory.isHybridFs(directory)); + FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; + // test custom hybrid mmap extensions + // true->mmap, false->nio + assertTrue(hybridDirectory.useDelegate("foo.new")); + assertTrue(hybridDirectory.useDelegate("foo.nvd")); + assertTrue(hybridDirectory.useDelegate("foo.dvd")); + assertTrue(hybridDirectory.useDelegate("foo.cfs")); + assertTrue(hybridDirectory.useDelegate("foo.doc")); + MMapDirectory delegate = hybridDirectory.getDelegate(); + assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); + } + build = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.HYBRIDFS.name().toLowerCase(Locale.ROOT)) + .putList(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), "nvd", "dvd", "cfs") + .putList(IndexModule.INDEX_STORE_HYBRID_MMAP_EXTENSIONS.getKey()) + .build(); + try (Directory directory = newDirectory(build)) { + assertTrue(FsDirectoryFactory.isHybridFs(directory)); + FsDirectoryFactory.HybridDirectory hybridDirectory = (FsDirectoryFactory.HybridDirectory) directory; + // test custom hybrid mmap extensions + // true->mmap, false->nio + assertTrue(hybridDirectory.useDelegate("foo.new")); + assertFalse(hybridDirectory.useDelegate("foo.nvd")); + assertFalse(hybridDirectory.useDelegate("foo.dvd")); + assertFalse(hybridDirectory.useDelegate("foo.cfs")); + assertFalse(hybridDirectory.useDelegate("foo.doc")); + MMapDirectory delegate = hybridDirectory.getDelegate(); + assertThat(delegate, Matchers.instanceOf(FsDirectoryFactory.PreLoadMMapDirectory.class)); + } } private Directory newDirectory(Settings settings) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteBufferedIndexOutputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteBufferedIndexOutputTests.java index e78f18d764746..b2d2809e12972 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteBufferedIndexOutputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteBufferedIndexOutputTests.java @@ -10,25 +10,25 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.OutputStreamIndexOutput; -import org.junit.After; -import org.junit.Before; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyLong; public class RemoteBufferedIndexOutputTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteBufferedOutputDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteBufferedOutputDirectoryTests.java index 4fec8c9bc37af..0595bb0001640 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteBufferedOutputDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteBufferedOutputDirectoryTests.java @@ -9,9 +9,9 @@ package org.opensearch.index.store; import org.apache.lucene.store.IOContext; -import org.junit.Before; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import static org.mockito.Mockito.mock; diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 8ee5fcf0da9d7..9e38e1749d434 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -8,36 +8,47 @@ package org.opensearch.index.store; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; -import org.junit.Before; -import org.opensearch.action.ActionListener; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; +import org.mockito.Mockito; + +import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; public class RemoteDirectoryTests extends OpenSearchTestCase { private BlobContainer blobContainer; @@ -58,6 +69,85 @@ public void testListAllEmpty() throws IOException { assertArrayEquals(expectedFileName, actualFileNames); } + public void testCopyFrom() throws IOException, InterruptedException { + AtomicReference<Boolean> postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener<Void> completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); + indexOutput.writeString("Hello World!"); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + storeDirectory.sync(List.of(filename)); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Listener responded with exception" + e); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertTrue(postUploadInvoked.get()); + storeDirectory.close(); + } + + public void testCopyFromWithException() throws IOException, InterruptedException { + AtomicReference<Boolean> postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener<Void> completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + fail("Listener responded with success"); + } + + @Override + public void onFailure(Exception e) { + countDownLatch.countDown(); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertFalse(postUploadInvoked.get()); + storeDirectory.close(); + } + public void testListAll() throws IOException { Map<String, BlobMetadata> fileNames = Stream.of("abc", "xyz", "pqr", "lmn", "jkl") .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); @@ -115,13 +205,29 @@ public void testCreateOutput() { public void testOpenInput() throws IOException { InputStream mockInputStream = mock(InputStream.class); when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); - Map<String, BlobMetadata> fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); assertTrue(indexInput instanceof RemoteIndexInput); assertEquals(100, indexInput.length()); + verify(blobContainer).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); + } + + public void testOpenInputWithLength() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", 100, IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + verify(blobContainer, times(0)).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); } public void testOpenInputIOException() throws IOException { @@ -139,9 +245,8 @@ public void testOpenInputNoSuchFileException() throws IOException { } public void testFileLength() throws IOException { - Map<String, BlobMetadata> fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); assertEquals(100, remoteDirectory.fileLength("segment_1")); } @@ -157,13 +262,7 @@ public void testListFilesByPrefixInLexicographicOrder() throws IOException { LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of(new PlainBlobMetadata("metadata_1", 1))); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of("metadata_1"), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -173,13 +272,7 @@ public void testListFilesByPrefixInLexicographicOrderEmpty() throws IOException LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of()); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of(), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -189,13 +282,7 @@ public void testListFilesByPrefixInLexicographicOrderException() { LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Error")); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java index cd35349e33b59..16f983253d058 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -8,20 +8,20 @@ package org.opensearch.index.store; -import org.junit.Before; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteIndexInputTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java index e7eb3231bf87d..d440dfca24f84 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java @@ -9,18 +9,18 @@ package org.opensearch.index.store; import org.apache.lucene.store.IndexInput; -import org.junit.Before; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.lucene.store.InputStreamIndexInput; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.doThrow; public class RemoteIndexOutputTests extends OpenSearchTestCase { private static final String FILENAME = "segment_1"; diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index 682c9c53d10a0..cad5e47531cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -9,9 +9,6 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; -import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobContainer; @@ -19,8 +16,9 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.settings.Settings; -import org.opensearch.index.IndexSettings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryMissingException; @@ -28,19 +26,23 @@ import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; import java.io.IOException; import java.nio.file.Path; import java.util.List; import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; + +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; -import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase { @@ -77,7 +79,12 @@ public void testNewDirectory() throws IOException { latchedActionListener.onResponse(List.of()); return null; }).when(blobContainer) - .listBlobsByPrefixInSortedOrder(any(), eq(1), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any(ActionListener.class)); + .listBlobsByPrefixInSortedOrder( + any(), + eq(METADATA_FILES_TO_FETCH), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); @@ -92,7 +99,7 @@ public void testNewDirectory() throws IOException { verify(blobContainer).listBlobsByPrefixInSortedOrder( eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), - eq(1), + eq(METADATA_FILES_TO_FETCH), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any() ); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 7c765cf5df0be..8b69c15dac9d3 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,13 +8,14 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.store.ByteBuffersDataOutput; -import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -22,20 +23,19 @@ import org.apache.lucene.store.OutputStreamIndexOutput; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.Version; -import org.junit.After; -import org.junit.Before; -import org.mockito.Mockito; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; @@ -43,35 +43,49 @@ import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.HashMap; -import java.util.Collection; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.startsWith; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.times; +import org.mockito.Mockito; + +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; +import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; +import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; +import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.doReturn; -import static org.hamcrest.CoreMatchers.is; +import static org.mockito.Mockito.startsWith; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectoryTests.class); private RemoteDirectory remoteDataDirectory; private RemoteDirectory remoteMetadataDirectory; private RemoteStoreMetadataLockManager mdLockManager; @@ -82,9 +96,47 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private SegmentInfos segmentInfos; private ThreadPool threadPool; - private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1); - private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1); - private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1); + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1" + ); + private final String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 36, + 34, + 1, + 1, + "node-1" + ); @Before public void setup() throws IOException { @@ -92,24 +144,29 @@ public void setup() throws IOException { remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); threadPool = mock(ThreadPool.class); + testUploadTracker = new TestUploadListener(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); + + indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( remoteDataDirectory, remoteMetadataDirectory, mdLockManager, - threadPool + threadPool, + indexShard.shardId() ); - testUploadTracker = new TestUploadListener(); - - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); - ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); - - indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.SAME)).thenReturn(executorService); } @After @@ -164,15 +221,13 @@ public void testUploadedSegmentMetadataFromStringException() { } public void testGetPrimaryTermGenerationUuid() { - String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR - ); + String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split(SEPARATOR); assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); } public void testInitException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, 1)).thenThrow( + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow( new IOException("Error") ); @@ -191,92 +246,11 @@ public void testInitNoMetadataFile() throws IOException { assertEquals(Set.of(), actualCache.keySet()); } - private Map<String, String> getDummyMetadata(String prefix, int commitGeneration) { - Map<String, String> metadata = new HashMap<>(); - - metadata.put( - prefix + ".cfe", - prefix - + ".cfe::" - + prefix - + ".cfe__" - + UUIDs.base64UUID() - + "::" - + randomIntBetween(1000, 5000) - + "::" - + randomIntBetween(512000, 1024000) - + "::" - + Version.MIN_SUPPORTED_MAJOR - ); - metadata.put( - prefix + ".cfs", - prefix - + ".cfs::" - + prefix - + ".cfs__" - + UUIDs.base64UUID() - + "::" - + randomIntBetween(1000, 5000) - + "::" - + randomIntBetween(512000, 1024000) - + "::" - + Version.MIN_SUPPORTED_MAJOR - ); - metadata.put( - prefix + ".si", - prefix - + ".si::" - + prefix - + ".si__" - + UUIDs.base64UUID() - + "::" - + randomIntBetween(1000, 5000) - + "::" - + randomIntBetween(512000, 1024000) - + "::" - + Version.LATEST.major + public void testInitMultipleMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn( + List.of(metadataFilename, metadataFilenameDup) ); - metadata.put( - "segments_" + commitGeneration, - "segments_" - + commitGeneration - + "::segments_" - + commitGeneration - + "__" - + UUIDs.base64UUID() - + "::" - + randomIntBetween(1000, 5000) - + "::" - + randomIntBetween(1024, 5120) - + "::" - + Version.LATEST.major - ); - return metadata; - } - - /** - * Prepares metadata file bytes with header and footer - * @param segmentFilesMap: actual metadata content - * @return ByteArrayIndexInput: metadata file bytes with header and footer - * @throws IOException IOException - */ - private ByteArrayIndexInput createMetadataFileBytes(Map<String, String> segmentFilesMap, long generation, long primaryTerm) - throws IOException { - ByteBuffersDataOutput byteBuffersIndexOutput = new ByteBuffersDataOutput(); - segmentInfos.write(new ByteBuffersIndexOutput(byteBuffersIndexOutput, "", "")); - byte[] byteArray = byteBuffersIndexOutput.toArrayCopy(); - - BytesStreamOutput output = new BytesStreamOutput(); - OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); - CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, RemoteSegmentMetadata.CURRENT_VERSION); - indexOutput.writeMapOfStrings(segmentFilesMap); - indexOutput.writeLong(generation); - indexOutput.writeLong(primaryTerm); - indexOutput.writeLong(byteArray.length); - indexOutput.writeBytes(byteArray, byteArray.length); - CodecUtil.writeFooter(indexOutput); - indexOutput.close(); - return new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); + assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init()); } private Map<String, Map<String, String>> populateMetadata() throws IOException { @@ -289,7 +263,7 @@ private Map<String, Map<String, String>> populateMetadata() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); when( @@ -308,14 +282,26 @@ private Map<String, Map<String, String>> populateMetadata() throws IOException { getDummyMetadata("_0", 1) ); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename), 23, 12) + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenAnswer( + I -> createMetadataFileBytes( + metadataFilenameContentMapping.get(metadataFilename), + indexShard.getLatestReplicationCheckpoint(), + segmentInfos + ) ); - when(remoteMetadataDirectory.openInput(metadataFilename2, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename2), 13, 12) + when(remoteMetadataDirectory.getBlobStream(metadataFilename2)).thenAnswer( + I -> createMetadataFileBytes( + metadataFilenameContentMapping.get(metadataFilename2), + indexShard.getLatestReplicationCheckpoint(), + segmentInfos + ) ); - when(remoteMetadataDirectory.openInput(metadataFilename3, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename3), 38, 10) + when(remoteMetadataDirectory.getBlobStream(metadataFilename3)).thenAnswer( + I -> createMetadataFileBytes( + metadataFilenameContentMapping.get(metadataFilename3), + indexShard.getLatestReplicationCheckpoint(), + segmentInfos + ) ); return metadataFilenameContentMapping; @@ -327,7 +313,7 @@ public void testInit() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); @@ -383,7 +369,7 @@ public void testFileLength() throws IOException { assertEquals(uploadedSegments.get("_0.si").getLength(), remoteSegmentStoreDirectory.fileLength("_0.si")); } - public void testFileLenghtNoSuchFile() throws IOException { + public void testFileLengthNoSuchFile() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -412,7 +398,7 @@ public void testOpenInput() throws IOException { remoteSegmentStoreDirectory.init(); IndexInput indexInput = mock(IndexInput.class); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenReturn(indexInput); assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -425,7 +411,7 @@ public void testOpenInputException() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -516,6 +502,75 @@ public void testIsAcquiredException() throws IOException { assertThrows(NoSuchFileException.class, () -> remoteSegmentStoreDirectory.isLockAcquired(testPrimaryTerm, testGeneration)); } + private List<String> getDummyMetadataFiles(int count) { + List<String> sortedMetadataFiles = new ArrayList<>(); + for (int counter = 0; counter < count; counter++) { + sortedMetadataFiles.add(RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(counter, 23, 34, 1, 1, "node-1")); + } + return sortedMetadataFiles; + } + + public void testGetMetadataFilesForActiveSegments() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // scenario 1: if activeSegments([[0, 1, 2], 3(l), 4(l), 5(l), 6(l), 7(l), 8(l), 9]) => [9] + List<String> sortedMdFiles = getDummyMetadataFiles(10); + Set<String> lockedMdFiles = new HashSet<>(); + for (int idx = 3; idx <= 8; idx++) { + lockedMdFiles.add(sortedMdFiles.get(idx)); + } + Set<String> expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(8)); + assertEquals( + "scenario 1 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 2: if activeSegments([[0, 1, 2], 3, 4, 5, 6(l), 7(l), 8(l), 9]) => [2, 6, 8] + lockedMdFiles.clear(); + lockedMdFiles.add(sortedMdFiles.get(6)); + lockedMdFiles.add(sortedMdFiles.get(7)); + lockedMdFiles.add(sortedMdFiles.get(8)); + expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(2), sortedMdFiles.get(6), sortedMdFiles.get(8)); + assertEquals( + "scenario 2 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 3: if activeSegments([[0, 1, 2], 3, 4, 5(l), 6, 7(l), 8(l), 9]) => [3, 5, 7, 8] + lockedMdFiles.clear(); + lockedMdFiles.add(sortedMdFiles.get(5)); + lockedMdFiles.add(sortedMdFiles.get(7)); + lockedMdFiles.add(sortedMdFiles.get(8)); + expectedMdFilesForActiveSegments = Set.of(sortedMdFiles.get(2), sortedMdFiles.get(5), sortedMdFiles.get(7), sortedMdFiles.get(8)); + assertEquals( + "scenario 3 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(3, sortedMdFiles, lockedMdFiles) + ); + + // scenario 3: if activeSegments([[0(l), 1(l), 2(l), 3(l), 4(l), 5(l), 6(l), 7(l), 8(l), 9(l)]) + lockedMdFiles.addAll(sortedMdFiles); + expectedMdFilesForActiveSegments = Set.of(); + assertEquals( + "scenario 4 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(0, sortedMdFiles, lockedMdFiles) + ); + + // scenario 5: if (activeSegments([[0, 1, 2, 3]]) => [] + sortedMdFiles = sortedMdFiles.subList(0, 4); + lockedMdFiles.clear(); + expectedMdFilesForActiveSegments = Set.of(); + assertEquals( + "scenario 5 failed", + expectedMdFilesForActiveSegments, + remoteSegmentStoreDirectory.getMetadataFilesToFilterActiveSegments(4, sortedMdFiles, lockedMdFiles) + ); + } + public void testGetMetadataFileForCommit() throws IOException { long testPrimaryTerm = 2; long testGeneration = 3; @@ -528,7 +583,6 @@ public void testGetMetadataFileForCommit() throws IOException { String output = remoteSegmentStoreDirectory.getMetadataFileForCommit(testPrimaryTerm, testGeneration); assertEquals("metadata__" + testPrimaryTerm + "__" + testGeneration + "__pqr", output); - } public void testCopyFrom() throws IOException { @@ -564,7 +618,7 @@ public void testCopyFilesFromMultipart() throws Exception { assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { ActionListener<Void> completionListener = invocation.getArgument(1); @@ -590,6 +644,16 @@ public void onFailure(Exception e) {} public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + remoteDataDirectory = new RemoteDirectory(blobContainer); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); + populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -601,9 +665,6 @@ public void testCopyFilesFromMultipartIOException() throws Exception { storeDirectory.sync(List.of(filename)); assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { ActionListener<Void> completionListener = invocation.getArgument(1); completionListener.onFailure(new Exception("Test exception")); @@ -626,6 +687,29 @@ public void onFailure(Exception e) { storeDirectory.close(); } + public void testCleanupAsync() throws Exception { + populateMetadata(); + RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory = mock(RemoteSegmentStoreDirectoryFactory.class); + RemoteSegmentStoreDirectory remoteSegmentDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); + when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any())).thenReturn(remoteSegmentDirectory); + String repositoryName = "test-repository"; + String indexUUID = "test-idx-uuid"; + ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); + + RemoteSegmentStoreDirectory.remoteDirectoryCleanup(remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId); + verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); + verify(remoteMetadataDirectory).delete(); + verify(remoteDataDirectory).delete(); + verify(mdLockManager).delete(); + } + public void testCopyFromException() throws IOException { String filename = "_100.si"; Directory storeDirectory = LuceneTestCase.newDirectory(); @@ -643,7 +727,7 @@ public void testContainsFile() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -651,7 +735,9 @@ public void testContainsFile() throws IOException { metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata, 1, 5)); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn( + createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) + ); remoteSegmentStoreDirectory.init(); @@ -676,12 +762,20 @@ public void testContainsFile() throws IOException { public void testUploadMetadataEmpty() throws IOException { Directory storeDirectory = mock(Directory.class); IndexOutput indexOutput = mock(IndexOutput.class); - when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + final long primaryTerm = indexShard.getOperationPrimaryTerm(); + when(storeDirectory.createOutput(startsWith("metadata__" + primaryTerm + "__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); Collection<String> segmentFiles = List.of("_s1.si", "_s1.cfe", "_s3.cfs"); assertThrows( NoSuchFileException.class, - () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L, 34L) + () -> remoteSegmentStoreDirectory.uploadMetadata( + segmentFiles, + segmentInfos, + storeDirectory, + 34L, + indexShard.getLatestReplicationCheckpoint(), + "" + ) ); } @@ -689,7 +783,7 @@ public void testUploadMetadataNonEmpty() throws IOException { indexDocs(142364, 5); flushShard(indexShard, true); SegmentInfos segInfos = indexShard.store().readLastCommittedSegmentsInfo(); - long primaryTerm = 12; + long primaryTerm = indexShard.getLatestReplicationCheckpoint().getPrimaryTerm(); String primaryTermLong = RemoteStoreUtils.invertLong(primaryTerm); long generation = segInfos.getGeneration(); String generationLong = RemoteStoreUtils.invertLong(generation); @@ -698,15 +792,19 @@ public void testUploadMetadataNonEmpty() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); Map<String, Map<String, String>> metadataFilenameContentMapping = Map.of( latestMetadataFileName, getDummyMetadata("_0", (int) generation) ); - when(remoteMetadataDirectory.openInput(latestMetadataFileName, IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get(latestMetadataFileName), generation, primaryTerm) + when(remoteMetadataDirectory.getBlobStream(latestMetadataFileName)).thenReturn( + createMetadataFileBytes( + metadataFilenameContentMapping.get(latestMetadataFileName), + indexShard.getLatestReplicationCheckpoint(), + segmentInfos + ) ); remoteSegmentStoreDirectory.init(); @@ -717,7 +815,14 @@ public void testUploadMetadataNonEmpty() throws IOException { when(storeDirectory.createOutput(startsWith("metadata__" + primaryTermLong + "__" + generationLong), eq(IOContext.DEFAULT))) .thenReturn(indexOutput); - remoteSegmentStoreDirectory.uploadMetadata(segInfos.files(true), segInfos, storeDirectory, primaryTerm, generation); + remoteSegmentStoreDirectory.uploadMetadata( + segInfos.files(true), + segInfos, + storeDirectory, + generation, + indexShard.getLatestReplicationCheckpoint(), + "" + ); verify(remoteMetadataDirectory).copyFrom( eq(storeDirectory), @@ -741,6 +846,36 @@ public void testUploadMetadataNonEmpty() throws IOException { } } + public void testUploadMetadataMissingSegment() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + + String generation = RemoteStoreUtils.invertLong(segmentInfos.getGeneration()); + long primaryTermLong = indexShard.getLatestReplicationCheckpoint().getPrimaryTerm(); + String primaryTerm = RemoteStoreUtils.invertLong(primaryTermLong); + when(storeDirectory.createOutput(startsWith("metadata__" + primaryTerm + "__" + generation), eq(IOContext.DEFAULT))).thenReturn( + indexOutput + ); + + Collection<String> segmentFiles = List.of("_123.si"); + assertThrows( + NoSuchFileException.class, + () -> remoteSegmentStoreDirectory.uploadMetadata( + segmentFiles, + segmentInfos, + storeDirectory, + 12L, + indexShard.getLatestReplicationCheckpoint(), + "" + ) + ); + verify(indexOutput).close(); + verify(storeDirectory).deleteFile(startsWith("metadata__" + primaryTerm + "__" + generation)); + } + public void testUploadMetadataNoSegmentCommitInfos() throws IOException { SegmentInfos segInfos = indexShard.store().readLastCommittedSegmentsInfo(); int numSegCommitInfos = segInfos.size(); @@ -757,7 +892,7 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -769,8 +904,8 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); indexOutput.writeMapOfStrings(metadata); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -780,7 +915,7 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -794,8 +929,8 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -805,7 +940,7 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -819,8 +954,8 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(IndexFormatTooOldException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -830,7 +965,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -844,8 +979,8 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(IndexFormatTooNewException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -855,7 +990,7 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -873,8 +1008,8 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { CodecUtil.writeFooter(indexOutputSpy); indexOutputSpy.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -934,21 +1069,130 @@ public void testDeleteStaleCommitsWithinThreshold() throws Exception { verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); } + @TestLogging(value = "_root:debug", reason = "Validate logging output") public void testDeleteStaleCommitsActualDelete() throws Exception { + try (final MockLogAppender appender = MockLogAppender.createForLoggers(LogManager.getRootLogger())) { + appender.addExpectation( + new MockLogAppender.PatternSeenWithLoggerPrefixExpectation( + "Metadata files to delete message", + "org.opensearch.index.store.RemoteSegmentStoreDirectory", + Level.DEBUG, + "metadataFilesEligibleToDelete=\\[" + metadataFilename3 + "\\] metadataFilesToBeDeleted=\\[" + metadataFilename3 + "\\]" + ) + ); + + final Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); + final List<String> filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + appender.assertAllExpectationsMatched(); + } + } + + public void testDeleteStaleCommitsActualDeleteWithLocks() throws Exception { Map<String, Map<String, String>> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2)); + // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } - ; assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } + + public void testDeleteStaleCommitsNoDeletesDueToLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking all the old metadata files to ensure that none of the segment files are getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenReturn(Set.of(metadataFilename2, metadataFilename3)); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsExceptionWhileFetchingLocks() throws Exception { + remoteSegmentStoreDirectory.init(); + + // Locking one of the metadata files to ensure that it is not getting deleted. + when(mdLockManager.fetchLockedMetadataFiles(any())).thenThrow(new RuntimeException("Rate limit exceeded")); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsDeleteDedup() throws Exception { + Map<String, Map<String, String>> metadataFilenameContentMapping = new HashMap<>(populateMetadata()); + metadataFilenameContentMapping.put(metadataFilename4, metadataFilenameContentMapping.get(metadataFilename3)); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(new ArrayList<>(List.of(metadataFilename, metadataFilename2, metadataFilename3, metadataFilename4))); + + when(remoteMetadataDirectory.getBlobStream(metadataFilename4)).thenAnswer( + I -> createMetadataFileBytes( + metadataFilenameContentMapping.get(metadataFilename4), + indexShard.getLatestReplicationCheckpoint(), + segmentInfos + ) + ); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 4 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 2 metadata files will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + Set<String> staleSegmentFiles = new HashSet<>(); + for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { + staleSegmentFiles.add(metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]); + } + for (String metadata : metadataFilenameContentMapping.get(metadataFilename4).values()) { + staleSegmentFiles.add(metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]); + } + staleSegmentFiles.forEach(file -> { + try { + // Even with the same files in 2 stale metadata files, delete should be called only once. + verify(remoteDataDirectory, times(1)).deleteFile(file); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory).deleteFile(metadataFilename4); } public void testDeleteStaleCommitsActualDeleteIOException() throws Exception { @@ -1016,17 +1260,21 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { } public void testMetadataFileNameOrder() { - String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1); - String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1); - String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1); - String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1); - String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1); - String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1); + String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1, ""); + String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1, ""); + String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1, ""); + String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1, ""); + String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1, ""); + String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1, ""); List<String> actualList = new ArrayList<>(List.of(file1, file2, file3, file4, file5, file6)); actualList.sort(String::compareTo); assertEquals(List.of(file3, file2, file4, file6, file5, file1), actualList); + + long count = file1.chars().filter(ch -> ch == SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(14, count); } private static class WrapperIndexOutput extends IndexOutput { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java new file mode 100644 index 0000000000000..6d8b3fe4d69fb --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.NIOFSDirectory; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class RemoteStoreFileDownloaderTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private Directory source; + private Directory destination; + private Directory secondDestination; + private RemoteStoreFileDownloader fileDownloader; + private Map<String, Integer> files = new HashMap<>(); + + @Before + public void setup() throws IOException { + final int streamLimit = randomIntBetween(1, 20); + final RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder().put("indices.recovery.max_concurrent_remote_store_streams", streamLimit).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + threadPool = new TestThreadPool(getTestName()); + source = new NIOFSDirectory(createTempDir()); + destination = new NIOFSDirectory(createTempDir()); + secondDestination = new NIOFSDirectory(createTempDir()); + for (int i = 0; i < 10; i++) { + final String filename = "file_" + i; + final int content = randomInt(); + try (IndexOutput output = source.createOutput(filename, IOContext.DEFAULT)) { + output.writeInt(content); + } + files.put(filename, content); + } + fileDownloader = new RemoteStoreFileDownloader( + ShardId.fromString("[RemoteStoreFileDownloaderTests][0]"), + threadPool, + recoverySettings + ); + } + + @After + public void stopThreadPool() throws Exception { + threadPool.shutdown(); + assertTrue(threadPool.awaitTermination(5, TimeUnit.SECONDS)); + } + + public void testDownload() throws IOException { + final PlainActionFuture<Void> l = new PlainActionFuture<>(); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, files.keySet(), l); + l.actionGet(); + assertContent(files, destination); + } + + public void testDownloadWithSecondDestination() throws IOException, InterruptedException { + fileDownloader.download(source, destination, secondDestination, files.keySet(), () -> {}); + assertContent(files, destination); + assertContent(files, secondDestination); + } + + public void testDownloadWithFileCompletionHandler() throws IOException, InterruptedException { + final AtomicInteger counter = new AtomicInteger(0); + fileDownloader.download(source, destination, null, files.keySet(), counter::incrementAndGet); + assertContent(files, destination); + assertEquals(files.size(), counter.get()); + } + + public void testDownloadNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, Set.of("not real"), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testDownloadExtraNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final List<String> filesWithExtra = new ArrayList<>(files.keySet()); + filesWithExtra.add("not real"); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, filesWithExtra, new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testCancellable() { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture<Void> blockingListener = new PlainActionFuture<>(); + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + fileDownloader.downloadAsync(cancellableThreads, source, blockingDestination, files.keySet(), blockingListener); + assertThrows( + "Expected to timeout due to blocking directory", + OpenSearchTimeoutException.class, + () -> blockingListener.actionGet(TimeValue.timeValueMillis(500)) + ); + cancellableThreads.cancel("test"); + assertThrows( + "Expected to complete with cancellation failure", + CancellableThreads.ExecutionCancelledException.class, + blockingListener::actionGet + ); + } + + public void testBlockingCallCanBeInterrupted() throws Exception { + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + final AtomicReference<Exception> capturedException = new AtomicReference<>(); + final Thread thread = new Thread(() -> { + try { + fileDownloader.download(source, blockingDestination, null, files.keySet(), () -> {}); + } catch (Exception e) { + capturedException.set(e); + } + }); + thread.start(); + thread.interrupt(); + thread.join(); + assertEquals(InterruptedException.class, capturedException.get().getClass()); + } + + public void testIOException() throws IOException, InterruptedException { + final Directory failureDirectory = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + throw new IOException("test"); + } + }; + assertThrows(IOException.class, () -> fileDownloader.download(source, failureDirectory, null, files.keySet(), () -> {})); + + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, failureDirectory, files.keySet(), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(IOException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + private static void assertContent(Map<String, Integer> expected, Directory destination) throws IOException { + // Note that Lucene will randomly write extra files (see org.apache.lucene.tests.mockfile.ExtraFS) + // so we just need to check that all the expected files are present but not that _only_ the expected + // files are present + final Set<String> actualFiles = Set.of(destination.listAll()); + for (String file : expected.keySet()) { + assertTrue(actualFiles.contains(file)); + try (IndexInput input = destination.openInput(file, IOContext.DEFAULT)) { + assertEquals(expected.get(file), Integer.valueOf(input.readInt())); + assertThrows(EOFException.class, input::readByte); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index 9043dcce1b779..ab30a4c1c435f 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -64,33 +64,35 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; -import org.hamcrest.Matchers; import org.opensearch.ExceptionsHelper; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; -import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.env.ShardLock; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.ShardLock; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; +import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; import org.opensearch.test.DummyShardLock; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.Matchers; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -100,7 +102,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashMap; @@ -109,10 +110,11 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; +import static org.opensearch.test.VersionUtils.randomVersion; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -124,9 +126,6 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; -import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; -import static org.opensearch.test.VersionUtils.randomVersion; public class StoreTests extends OpenSearchTestCase { @@ -800,7 +799,7 @@ public void testOnCloseCallback() throws IOException { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); - }); + }, null); assertEquals(count.get(), 0); final int iters = randomIntBetween(1, 10); @@ -811,6 +810,26 @@ public void testOnCloseCallback() throws IOException { assertEquals(count.get(), 1); } + public void testStoreShardPath() { + final ShardId shardId = new ShardId("index", "_na_", 1); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)) + .build(); + final Path path = createTempDir().resolve(shardId.getIndex().getUUID()).resolve(String.valueOf(shardId.id())); + final ShardPath shardPath = new ShardPath(false, path, path, shardId); + final Store store = new Store( + shardId, + IndexSettingsModule.newIndexSettings("index", settings), + StoreTests.newDirectory(random()), + new DummyShardLock(shardId), + Store.OnClose.EMPTY, + shardPath + ); + assertEquals(shardPath, store.shardPath()); + store.close(); + } + public void testStoreStats() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Settings settings = Settings.builder() @@ -961,12 +980,11 @@ public void testStreamStoreFilesMetadata() throws Exception { ) ); } - TransportNodesListShardStoreMetadata.StoreFilesMetadata outStoreFileMetadata = - new TransportNodesListShardStoreMetadata.StoreFilesMetadata( - new ShardId("test", "_na_", 0), - metadataSnapshot, - peerRecoveryRetentionLeases - ); + StoreFilesMetadata outStoreFileMetadata = new StoreFilesMetadata( + new ShardId("test", "_na_", 0), + metadataSnapshot, + peerRecoveryRetentionLeases + ); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); org.opensearch.Version targetNodeVersion = randomVersion(random()); @@ -975,8 +993,7 @@ public void testStreamStoreFilesMetadata() throws Exception { ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); in.setVersion(targetNodeVersion); - TransportNodesListShardStoreMetadata.StoreFilesMetadata inStoreFileMetadata = - new TransportNodesListShardStoreMetadata.StoreFilesMetadata(in); + StoreFilesMetadata inStoreFileMetadata = new StoreFilesMetadata(in); Iterator<StoreFileMetadata> outFiles = outStoreFileMetadata.iterator(); for (StoreFileMetadata inFile : inStoreFileMetadata) { assertThat(inFile.name(), equalTo(outFiles.next().name())); @@ -1169,47 +1186,40 @@ public void testGetMetadataWithSegmentInfos() throws IOException { store.close(); } - public void testCleanupAndPreserveLatestCommitPoint() throws IOException { + public void testCreateEmptyStore() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); - Store store = new Store( - shardId, - SEGMENT_REPLICATION_INDEX_SETTINGS, - StoreTests.newDirectory(random()), - new DummyShardLock(shardId) - ); - commitRandomDocs(store); - - Store.MetadataSnapshot commitMetadata = store.getMetadata(); - - // index more docs but only IW.flush, this will create additional files we'll clean up. - final IndexWriter writer = indexRandomDocs(store); - writer.flush(); - writer.close(); - - final List<String> additionalSegments = new ArrayList<>(); - for (String file : store.directory().listAll()) { - if (commitMetadata.contains(file) == false) { - additionalSegments.add(file); - } - } - assertFalse(additionalSegments.isEmpty()); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertFalse(segmentInfos.getUserData().containsKey(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } - Collection<String> filesToConsiderForCleanUp = Stream.of(store.readLastCommittedSegmentsInfo().files(true), additionalSegments) - .flatMap(Collection::stream) - .collect(Collectors.toList()); + public void testCreateEmptyStoreWithTranlogUUID() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST, "dummy-translog-UUID"); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertEquals("dummy-translog-UUID", segmentInfos.getUserData().get(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } - // clean up everything not in the latest commit point. - store.cleanupAndPreserveLatestCommitPoint(filesToConsiderForCleanUp, "test"); + public void testCreateEmptyWithNullTranlogUUID() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST, null); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + assertFalse(segmentInfos.getUserData().containsKey(Translog.TRANSLOG_UUID_KEY)); + testDefaultUserData(segmentInfos); + store.close(); + } - // we want to ensure commitMetadata files are preserved after calling cleanup - for (String existingFile : store.directory().listAll()) { - if (!IndexWriter.WRITE_LOCK_NAME.equals(existingFile)) { - assertTrue(commitMetadata.contains(existingFile)); - assertFalse(additionalSegments.contains(existingFile)); - } - } - deleteContent(store.directory()); - IOUtils.close(store); + private void testDefaultUserData(SegmentInfos segmentInfos) { + assertEquals("-1", segmentInfos.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + assertEquals("-1", segmentInfos.getUserData().get(SequenceNumbers.MAX_SEQ_NO)); + assertEquals("-1", segmentInfos.getUserData().get(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)); } public void testGetSegmentMetadataMap() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/store/StoreUtils.java b/server/src/test/java/org/opensearch/index/store/StoreUtils.java index 757ae6825c0bf..f8c80bdc44c9f 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreUtils.java +++ b/server/src/test/java/org/opensearch/index/store/StoreUtils.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.env.NodeEnvironment; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.NodeEnvironment; import java.nio.file.Path; diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java index f3a2f1859923e..80413d4cb6612 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java @@ -15,10 +15,24 @@ public class FileLockInfoTests extends OpenSearchTestCase { String testMetadata = "testMetadata"; String testAcquirerId = "testAcquirerId"; + String testAcquirerId2 = "ZxZ4Wh89SXyEPmSYAHrIrQ"; + String testAcquirerId3 = "ZxZ4Wh89SXyEPmSYAHrItS"; + String testMetadata1 = "metadata__9223372036854775806__9223372036854775803__9223372036854775790" + + "__9223372036854775800___Hf3Dbw2QQagfGLlVBOUrg__9223370340398865071__1"; + + String oldLock = testMetadata1 + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + testAcquirerId2 + + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + String newLock = testMetadata1 + RemoteStoreLockManagerUtils.SEPARATOR + testAcquirerId3 + + RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; public void testGenerateLockName() { FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata).withAcquirerId(testAcquirerId).build(); assertEquals(fileLockInfo.generateLockName(), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate that lock generated will be the new version lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata1).withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.generateLockName(), newLock); + } public void testGenerateLockNameFailureCase1() { @@ -41,13 +55,33 @@ public void testGetLockPrefixFailureCase() { assertThrows(IllegalArgumentException.class, fileLockInfo::getLockPrefix); } + public void testGetFileToLockNameFromLock() { + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(oldLock)); + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(newLock)); + } + + public void testGetAcquirerIdFromLock() { + assertEquals(testAcquirerId2, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(oldLock)); + assertEquals(testAcquirerId3, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(newLock)); + } + public void testGetLocksForAcquirer() throws NoSuchFileException { + String[] locks = new String[] { FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId), - FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2") }; + FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2"), + oldLock, + newLock }; FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); - assertEquals(fileLockInfo.getLockForAcquirer(locks), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate old lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId2).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), oldLock); + + // validate new lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), newLock); } } diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 61b4cc2176134..897785849cf7b 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -8,25 +8,26 @@ package org.opensearch.index.store.lockmanager; -import org.junit.Before; -import org.mockito.ArgumentCaptor; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; + import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteStoreLockManagerFactoryTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java index 2a3851514db3c..299100b65a43e 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManagerTests.java @@ -8,21 +8,23 @@ package org.opensearch.index.store.lockmanager; -import junit.framework.TestCase; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.junit.Before; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Set; + +import junit.framework.TestCase; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteStoreMetadataLockManagerTests extends OpenSearchTestCase { private RemoteBufferedOutputDirectory lockDirectory; @@ -95,4 +97,26 @@ public void testIsAcquiredExceptionCase() { // metadata file is not passed durin FileLockInfo testLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); assertThrows(IllegalArgumentException.class, () -> remoteStoreMetadataLockManager.isAcquired(testLockInfo)); } + + public void testFetchLocksEmpty() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn(Set.of()); + assertEquals(0, remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata").size()); + } + + public void testFetchLocksNonEmpty() throws IOException { + String metadata1 = "metadata_1_2_3"; + String metadata2 = "metadata_4_5_6"; + when(lockDirectory.listFilesByPrefix("metadata")).thenReturn( + Set.of( + FileLockInfo.LockFileUtils.generateLockName(metadata1, "snapshot1"), + FileLockInfo.LockFileUtils.generateLockName(metadata2, "snapshot2") + ) + ); + assertEquals(Set.of(metadata1, metadata2), remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } + + public void testFetchLocksException() throws IOException { + when(lockDirectory.listFilesByPrefix("metadata")).thenThrow(new IOException("Something went wrong")); + assertThrows(IOException.class, () -> remoteStoreMetadataLockManager.fetchLockedMetadataFiles("metadata")); + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInputLifecycleTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInputLifecycleTests.java index 4c6138d66d2f0..b6e8c9c1b536a 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInputLifecycleTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInputLifecycleTests.java @@ -8,18 +8,18 @@ package org.opensearch.index.store.remote.file; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.apache.lucene.store.IndexInput; +import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; import org.junit.After; -import org.opensearch.test.OpenSearchTestCase; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import static org.hamcrest.Matchers.hasSize; diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index ab0046c9554f5..a135802c5f49c 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store.remote.file; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -17,23 +19,26 @@ import org.apache.lucene.store.SimpleFSLockFactory; import org.apache.lucene.util.Constants; import org.apache.lucene.util.Version; -import org.junit.Before; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.remote.utils.BlobFetchRequest; import org.opensearch.index.store.remote.utils.TransferManager; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.EOFException; import java.io.IOException; import java.nio.file.Path; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @@ -43,7 +48,6 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { private static final String FILE_NAME = "File_Name"; private static final String BLOCK_FILE_PREFIX = FILE_NAME; private static final boolean IS_CLONE = false; - private static final ByteSizeValue BYTE_SIZE_VALUE = new ByteSizeValue(1L); private static final int FILE_SIZE = 29360128; private TransferManager transferManager; private LockFactory lockFactory; @@ -74,12 +78,65 @@ public void test4MBBlock() throws Exception { runAllTestsFor(22); } - public void runAllTestsFor(int blockSizeShift) throws Exception { + public void testChunkedRepositoryWithBlockSizeGreaterThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(8, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeLessThanChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(1, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(3, ByteSizeUnit.KB).getBytes() // file size + ); + } + + public void testChunkedRepositoryWithBlockSizeEqualToChunkSize() throws IOException { + verifyChunkedRepository( + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // block Size + new ByteSizeValue(2, ByteSizeUnit.KB).getBytes(), // repository chunk size + new ByteSizeValue(15, ByteSizeUnit.KB).getBytes() // file size + ); + } + + private void verifyChunkedRepository(long blockSize, long repositoryChunkSize, long fileSize) throws IOException { + when(transferManager.fetchBlob(any())).thenReturn(new ByteArrayIndexInput("test", new byte[(int) blockSize])); + try ( + FSDirectory directory = new MMapDirectory(path, lockFactory); + IndexInput indexInput = new OnDemandBlockSnapshotIndexInput( + OnDemandBlockIndexInput.builder() + .resourceDescription(RESOURCE_DESCRIPTION) + .offset(BLOCK_SNAPSHOT_FILE_OFFSET) + .length(FILE_SIZE) + .blockSizeShift((int) (Math.log(blockSize) / Math.log(2))) + .isClone(IS_CLONE), + new BlobStoreIndexShardSnapshot.FileInfo( + FILE_NAME, + new StoreFileMetadata(FILE_NAME, fileSize, "", Version.LATEST), + new ByteSizeValue(repositoryChunkSize) + ), + directory, + transferManager + ) + ) { + // Seek to the position past the first repository chunk + indexInput.seek(repositoryChunkSize); + } + + // Verify all the chunks related to block are added to the fetchBlob request + verify(transferManager).fetchBlob(argThat(request -> request.getBlobLength() == blockSize)); + } + + private void runAllTestsFor(int blockSizeShift) throws Exception { final OnDemandBlockSnapshotIndexInput blockedSnapshotFile = createOnDemandBlockSnapshotIndexInput(blockSizeShift); final int blockSize = 1 << blockSizeShift; TestGroup.testGetBlock(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockOffset(blockedSnapshotFile, blockSize, FILE_SIZE); TestGroup.testGetBlockStart(blockedSnapshotFile, blockSize); + TestGroup.testGetBlobParts(blockedSnapshotFile); TestGroup.testCurrentBlockStart(blockedSnapshotFile, blockSize); TestGroup.testCurrentBlockPosition(blockedSnapshotFile, blockSize); TestGroup.testClone(blockedSnapshotFile, blockSize); @@ -106,7 +163,7 @@ private OnDemandBlockSnapshotIndexInput createOnDemandBlockSnapshotIndexInput(in fileInfo = new BlobStoreIndexShardSnapshot.FileInfo( FILE_NAME, new StoreFileMetadata(FILE_NAME, FILE_SIZE, "", Version.LATEST), - BYTE_SIZE_VALUE + null ); int blockSize = 1 << blockSizeShift; @@ -182,7 +239,7 @@ private void initBlockFiles(int blockSize, FSDirectory fsDirectory) { } - public static class TestGroup { + private static class TestGroup { public static void testGetBlock(OnDemandBlockSnapshotIndexInput blockedSnapshotFile, int blockSize, int fileSize) { // block 0 @@ -217,6 +274,35 @@ public static void testGetBlockStart(OnDemandBlockSnapshotIndexInput blockedSnap assertEquals(blockSize * 2, blockedSnapshotFile.getBlockStart(2)); } + public static void testGetBlobParts(OnDemandBlockSnapshotIndexInput blockedSnapshotFile) { + // block id 0 + int blockId = 0; + long blockStart = blockedSnapshotFile.getBlockStart(blockId); + long blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 1 + blockId = 1; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + + // block 2 + blockId = 2; + blockStart = blockedSnapshotFile.getBlockStart(blockId); + blockEnd = blockStart + blockedSnapshotFile.getActualBlockSize(blockId); + assertEquals( + (blockEnd - blockStart), + blockedSnapshotFile.getBlobParts(blockStart, blockEnd).stream().mapToLong(o -> o.getLength()).sum() + ); + } + public static void testCurrentBlockStart(OnDemandBlockSnapshotIndexInput blockedSnapshotFile, int blockSize) throws IOException { // block 0 blockedSnapshotFile.seek(blockSize - 1); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java index eec9666cc06ba..e2a6a4011a6b7 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -8,30 +8,29 @@ package org.opensearch.index.store.remote.filecache; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; - import org.apache.lucene.store.IndexInput; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; -import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; +import static org.hamcrest.Matchers.equalTo; public class FileCacheCleanerTests extends OpenSearchTestCase { private static final ShardId SHARD_0 = new ShardId("index", "uuid-0", 0); @@ -59,7 +58,7 @@ public class FileCacheCleanerTests extends OpenSearchTestCase { @Before public void setUpFileCache() throws IOException { env = newNodeEnvironment(SETTINGS); - cleaner = new FileCacheCleaner(env, fileCache); + cleaner = new FileCacheCleaner(() -> fileCache); files.put(SHARD_0, addFile(fileCache, env, SHARD_0)); files.put(SHARD_1, addFile(fileCache, env, SHARD_1)); MatcherAssert.assertThat(fileCache.size(), equalTo(2L)); @@ -103,12 +102,11 @@ public void testShardRemoved() { final Path cachePath = ShardPath.loadFileCachePath(env, SHARD_0).getDataPath(); assertTrue(Files.exists(cachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(1L)); assertNull(fileCache.get(files.get(SHARD_0))); assertFalse(Files.exists(files.get(SHARD_0))); assertTrue(Files.exists(files.get(SHARD_1))); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); assertFalse(Files.exists(cachePath)); } @@ -116,15 +114,9 @@ public void testIndexRemoved() { final Path indexCachePath = env.fileCacheNodePath().fileCachePath.resolve(SHARD_0.getIndex().getUUID()); assertTrue(Files.exists(indexCachePath)); - cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); - cleaner.beforeIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexShardDeleted(SHARD_1, SETTINGS); - cleaner.afterIndexRemoved( - SHARD_0.getIndex(), - INDEX_SETTINGS, - IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED - ); + cleaner.beforeShardPathDeleted(SHARD_0, INDEX_SETTINGS, env); + cleaner.beforeShardPathDeleted(SHARD_1, INDEX_SETTINGS, env); + cleaner.beforeIndexPathDeleted(SHARD_0.getIndex(), INDEX_SETTINGS, env); MatcherAssert.assertThat(fileCache.size(), equalTo(0L)); assertFalse(Files.exists(indexCachePath)); } diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 02b6a48b6f48e..c1e3636cc9928 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -9,16 +9,16 @@ package org.opensearch.index.store.remote.filecache; import org.apache.lucene.store.IndexInput; -import org.junit.Before; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.breaker.TestCircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index 2fee77ab563c0..f5d54dc790e76 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -13,18 +13,20 @@ import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.OutputStreamIndexOutput; import org.apache.lucene.util.Version; -import org.junit.After; -import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.Store; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationType; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.HashMap; @@ -38,16 +40,23 @@ public class RemoteSegmentMetadataHandlerTests extends IndexShardTestCase { private IndexShard indexShard; private SegmentInfos segmentInfos; + private ReplicationCheckpoint replicationCheckpoint; + @Before public void setup() throws IOException { remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(); - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } + replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); } @After @@ -61,8 +70,7 @@ public void testReadContentNoSegmentInfos() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); Map<String, String> expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); - indexOutput.writeLong(1234); - indexOutput.writeLong(1234); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); indexOutput.writeLong(0); indexOutput.writeBytes(new byte[0], 0); indexOutput.close(); @@ -70,7 +78,7 @@ public void testReadContentNoSegmentInfos() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); } public void testReadContentWithSegmentInfos() throws IOException { @@ -78,8 +86,7 @@ public void testReadContentWithSegmentInfos() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); Map<String, String> expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); - indexOutput.writeLong(1234); - indexOutput.writeLong(1234); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); ByteBuffersIndexOutput segmentInfosOutput = new ByteBuffersIndexOutput(new ByteBuffersDataOutput(), "test", "resource"); segmentInfos.write(segmentInfosOutput); byte[] segmentInfosBytes = segmentInfosOutput.toArrayCopy(); @@ -90,7 +97,7 @@ public void testReadContentWithSegmentInfos() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } @@ -106,8 +113,7 @@ public void testWriteContent() throws IOException { RemoteSegmentMetadata remoteSegmentMetadata = new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(expectedOutput), segmentInfosBytes, - 1234, - 1234 + indexShard.getLatestReplicationCheckpoint() ); remoteSegmentMetadataHandler.writeContent(indexOutput, remoteSegmentMetadata); indexOutput.close(); @@ -116,8 +122,8 @@ public void testWriteContent() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); - assertEquals(1234, metadata.getPrimaryTerm()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); + assertEquals(replicationCheckpoint.getPrimaryTerm(), metadata.getPrimaryTerm()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java index 962b3a58c0658..7ae3944eb6944 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java @@ -8,31 +8,31 @@ package org.opensearch.index.store.remote.utils; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.SimpleFSLockFactory; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -163,17 +163,11 @@ public void testUsageExceedsCapacity() throws Exception { public void testDownloadFails() throws Exception { doThrow(new IOException("Expected test exception")).when(blobContainer).readBlob(eq("failure-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("failure-blob", 0, EIGHT_MB)); expectThrows( IOException.class, - () -> transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("failure-blob") - .position(0) - .fileName("file") - .directory(directory) - .length(EIGHT_MB) - .build() - ) + () -> transferManager.fetchBlob(BlobFetchRequest.builder().fileName("file").directory(directory).blobParts(blobParts).build()) ); MatcherAssert.assertThat(fileCache.usage().activeUsage(), equalTo(0L)); MatcherAssert.assertThat(fileCache.usage().usage(), equalTo(0L)); @@ -187,16 +181,13 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception latch.await(); return new ByteArrayInputStream(createData()); }).when(blobContainer).readBlob(eq("blocking-blob"), anyLong(), anyLong()); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blocking-blob", 0, EIGHT_MB)); + final Thread blockingThread = new Thread(() -> { try { transferManager.fetchBlob( - BlobFetchRequest.builder() - .blobName("blocking-blob") - .position(0) - .fileName("blocking-file") - .directory(directory) - .length(EIGHT_MB) - .build() + BlobFetchRequest.builder().fileName("blocking-file").directory(directory).blobParts(blobParts).build() ); } catch (IOException e) { throw new RuntimeException(e); @@ -216,9 +207,9 @@ public void testFetchesToDifferentBlobsDoNotBlockOnEachOther() throws Exception } private IndexInput fetchBlobWithName(String blobname) throws IOException { - return transferManager.fetchBlob( - BlobFetchRequest.builder().blobName("blob").position(0).fileName(blobname).directory(directory).length(EIGHT_MB).build() - ); + List<BlobFetchRequest.BlobPart> blobParts = new ArrayList<>(); + blobParts.add(new BlobFetchRequest.BlobPart("blob", 0, EIGHT_MB)); + return transferManager.fetchBlob(BlobFetchRequest.builder().fileName(blobname).directory(directory).blobParts(blobParts).build()); } private static void assertIndexInputIsFunctional(IndexInput indexInput) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 4b2cd23a677a0..c098d11a3487f 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -23,9 +23,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; +import static org.hamcrest.Matchers.equalTo; public class InternalTranslogManagerTests extends TranslogManagerTestCase { @@ -38,7 +38,7 @@ public void testRecoveryFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -68,7 +68,7 @@ public void testRecoveryFromTranslog() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -117,7 +117,7 @@ public void testTranslogRollsGeneration() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -147,7 +147,7 @@ public void testTranslogRollsGeneration() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -182,7 +182,7 @@ public void testTrimOperationsFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -214,7 +214,7 @@ public void testTrimOperationsFromTranslog() throws IOException { translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -253,7 +253,7 @@ public void testTranslogSync() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); AtomicReference<InternalTranslogManager> translogManagerAtomicReference = new AtomicReference<>(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index 788391f0e42c0..4997067b75198 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -33,6 +33,7 @@ package org.opensearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil; import org.apache.lucene.codecs.CodecUtil; @@ -41,38 +42,38 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.Term; -import org.apache.lucene.tests.mockfile.FilterFileChannel; -import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.DataOutput; +import org.apache.lucene.tests.mockfile.FilterFileChannel; +import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.tests.util.LineFileDocs; import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.core.Assertions; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Randomness; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; @@ -86,10 +87,9 @@ import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.LocalCheckpointTrackerTests; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog.Location; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import org.junit.After; @@ -291,7 +291,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private Location addToTranslogAndList(Translog translog, List<Translog.Operation> list, Translog.Operation op) throws IOException { @@ -519,17 +519,18 @@ public void testStats() throws IOException { builder.startObject(); copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertThat( - Strings.toString(builder), - equalTo( - "{\"translog\":{\"operations\":4,\"size_in_bytes\":" - + 326 - + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" - + 271 - + ",\"earliest_last_modified_age\":" - + stats.getEarliestLastModifiedAge() - + "}}" - ) + assertEquals( + "{\"translog\":{\"operations\":4,\"size_in_bytes\":" + + 326 + + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + + 271 + + ",\"earliest_last_modified_age\":" + + stats.getEarliestLastModifiedAge() + + ",\"remote_store\":{\"upload\":{" + + "\"total_uploads\":{\"started\":0,\"failed\":0,\"succeeded\":0}," + + "\"total_upload_size\":{\"started_bytes\":0,\"failed_bytes\":0,\"succeeded_bytes\":0}" + + "}}}}", + builder.toString() ); } } @@ -1451,7 +1452,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set<Long> persistedSeqNos = new HashSet<>(); @@ -1549,7 +1551,8 @@ public void testTranslogWriterFsyncedWithLocalTranslog() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set<Long> persistedSeqNos = new HashSet<>(); @@ -3506,7 +3509,7 @@ public void testTranslogOpSerialization() throws Exception { document.add(seqID.seqNo); document.add(seqID.seqNoDocValue); document.add(seqID.primaryTerm); - ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", null, Arrays.asList(document), B_1, XContentType.JSON, null); + ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", null, Arrays.asList(document), B_1, MediaTypeRegistry.JSON, null); Engine.Index eIndex = new Engine.Index( newUid(doc), diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java deleted file mode 100644 index d26379eaefa5c..0000000000000 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java +++ /dev/null @@ -1,1529 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.translog; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.ByteArrayDataOutput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.tests.mockfile.FilterFileChannel; -import org.apache.lucene.tests.util.LuceneTestCase; -import org.junit.After; -import org.junit.Before; -import org.opensearch.OpenSearchException; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.fs.FsBlobContainer; -import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.TestEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.engine.MissingHistoryOperationsException; -import org.opensearch.index.seqno.LocalCheckpointTracker; -import org.opensearch.index.seqno.LocalCheckpointTrackerTests; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.translog.transfer.BlobStoreTransferService; -import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.repositories.blobstore.BlobStoreRepository; -import org.opensearch.repositories.blobstore.BlobStoreTestUtil; -import org.opensearch.repositories.fs.FsRepository; -import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.opensearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.LongConsumer; -import java.util.zip.CRC32; -import java.util.zip.CheckedInputStream; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; -import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; -import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; -import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; - -@LuceneTestCase.SuppressFileSystems("ExtrasFS") - -public class RemoteFSTranslogTests extends OpenSearchTestCase { - - protected final ShardId shardId = new ShardId("index", "_na_", 1); - - protected RemoteFsTranslog translog; - private AtomicLong globalCheckpoint; - protected Path translogDir; - // A default primary term is used by translog instances created in this test. - private final AtomicLong primaryTerm = new AtomicLong(); - private final AtomicBoolean primaryMode = new AtomicBoolean(true); - private final AtomicReference<LongConsumer> persistedSeqNoConsumer = new AtomicReference<>(); - private ThreadPool threadPool; - private final static String METADATA_DIR = "metadata"; - private final static String DATA_DIR = "data"; - BlobStoreRepository repository; - - BlobStoreTransferService blobStoreTransferService; - - TestTranslog.FailSwitch fail; - - private LongConsumer getPersistedSeqNoConsumer() { - return seqNo -> { - final LongConsumer consumer = persistedSeqNoConsumer.get(); - if (consumer != null) { - consumer.accept(seqNo); - } - }; - } - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE)); - // if a previous test failed we clean up things here - translogDir = createTempDir(); - translog = create(translogDir); - } - - @Override - @After - public void tearDown() throws Exception { - try { - translog.getDeletionPolicy().assertNoOpenTranslogRefs(); - translog.close(); - } finally { - super.tearDown(); - terminate(threadPool); - } - } - - private RemoteFsTranslog create(Path path) throws IOException { - final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return create(path, createRepository(), translogUUID); - } - - private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { - this.repository = repository; - globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final TranslogConfig translogConfig = getTranslogConfig(path); - final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); - threadPool = new TestThreadPool(getClass().getName()); - blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); - return new RemoteFsTranslog( - translogConfig, - translogUUID, - deletionPolicy, - () -> globalCheckpoint.get(), - primaryTerm::get, - getPersistedSeqNoConsumer(), - repository, - threadPool, - primaryMode::get - ); - - } - - private TranslogConfig getTranslogConfig(final Path path) { - final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) - // only randomize between nog age retention and a long one, so failures will have a chance of reproducing - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h") - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .build(); - return getTranslogConfig(path, settings); - } - - private TranslogConfig getTranslogConfig(final Path path, final Settings settings) { - final ByteSizeValue bufferSize = randomFrom( - TranslogConfig.DEFAULT_BUFFER_SIZE, - new ByteSizeValue(8, ByteSizeUnit.KB), - new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) - ); - - final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); - } - - private BlobStoreRepository createRepository() { - Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); - RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); - final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); - fail = new TestTranslog.FailSwitch(); - fail.failNever(); - final FsRepository repository = new ThrowingBlobRepository( - repositoryMetadata, - createEnvironment(), - xContentRegistry(), - clusterService, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - fail - ) { - @Override - protected void assertSnapshotOrGenericThread() { - // eliminate thread name check as we create repo manually - } - }; - clusterService.addStateApplier(event -> repository.updateState(event.state())); - // Apply state once to initialize repo properly like RepositoriesService would - repository.updateState(clusterService.state()); - repository.start(); - return repository; - } - - /** Create a {@link Environment} with random path.home and path.repo **/ - private Environment createEnvironment() { - Path home = createTempDir(); - return TestEnvironment.newEnvironment( - Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) - .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) - .build() - ); - } - - private Translog.Location addToTranslogAndList(Translog translog, List<Translog.Operation> list, Translog.Operation op) - throws IOException { - Translog.Location loc = translog.add(op); - Random random = random(); - if (random.nextBoolean()) { - translog.ensureSynced(loc); - } - list.add(op); - return loc; - } - - private Translog.Location addToTranslogAndListAndUpload(Translog translog, List<Translog.Operation> list, Translog.Operation op) - throws IOException { - Translog.Location loc = translog.add(op); - translog.ensureSynced(loc); - list.add(op); - return loc; - } - - public void testUploadWithPrimaryModeFalse() { - // Test setup - primaryMode.set(false); - - // Validate - assertTrue(translog.syncNeeded()); - assertFalse(primaryMode.get()); - try { - translog.sync(); - } catch (IOException e) { - throw new RuntimeException(e); - } - assertTrue(translog.syncNeeded()); - } - - public void testUploadWithPrimaryModeTrue() { - // Validate - assertTrue(translog.syncNeeded()); - assertTrue(primaryMode.get()); - try { - translog.sync(); - } catch (IOException e) { - throw new RuntimeException(e); - } - assertFalse(translog.syncNeeded()); - } - - public void testSimpleOperations() throws IOException { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.size(0)); - } - - addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); - assertThat(snapshot.totalOperations(), equalTo(ops.size())); - } - - addToTranslogAndList(translog, ops, new Translog.Delete("2", 1, primaryTerm.get())); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot.totalOperations(), equalTo(ops.size())); - assertThat(snapshot, containsOperationsInAnyOrder(ops)); - } - - final long seqNo = randomLongBetween(0, Integer.MAX_VALUE); - final String reason = randomAlphaOfLength(16); - final long noopTerm = randomLongBetween(1, primaryTerm.get()); - addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason)); - - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot, containsOperationsInAnyOrder(ops)); - assertThat(snapshot.totalOperations(), equalTo(ops.size())); - } - - try (Translog.Snapshot snapshot = translog.newSnapshot(seqNo + 1, randomLongBetween(seqNo + 1, Long.MAX_VALUE))) { - assertThat(snapshot, SnapshotMatchers.size(0)); - assertThat(snapshot.totalOperations(), equalTo(0)); - } - - } - - public void testReadLocation() throws IOException { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - ArrayList<Translog.Location> locs = new ArrayList<>(); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); - translog.sync(); - int i = 0; - for (Translog.Operation op : ops) { - assertEquals(op, translog.readOperation(locs.get(i++))); - } - assertNull(translog.readOperation(new Translog.Location(100, 0, 0))); - } - - public void testReadLocationDownload() throws IOException { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - ArrayList<Translog.Location> locs = new ArrayList<>(); - locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); - locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); - locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); - translog.sync(); - int i = 0; - for (Translog.Operation op : ops) { - assertEquals(op, translog.readOperation(locs.get(i++))); - } - - String translogUUID = translog.translogUUID; - try { - translog.getDeletionPolicy().assertNoOpenTranslogRefs(); - translog.close(); - } finally { - terminate(threadPool); - } - - // Delete translog files to test download flow - for (Path file : FileSystemUtils.files(translogDir)) { - Files.delete(file); - } - - // Creating RemoteFsTranslog with the same location - Translog newTranslog = create(translogDir, repository, translogUUID); - i = 0; - for (Translog.Operation op : ops) { - assertEquals(op, newTranslog.readOperation(locs.get(i++))); - } - try { - newTranslog.close(); - } catch (Exception e) { - // Ignoring this exception for now. Once the download flow populates FileTracker, - // we can remove this try-catch block - } - } - - public void testSnapshotWithNewTranslog() throws IOException { - List<Closeable> toClose = new ArrayList<>(); - try { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - Translog.Snapshot snapshot = translog.newSnapshot(); - toClose.add(snapshot); - assertThat(snapshot, SnapshotMatchers.size(0)); - - addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); - Translog.Snapshot snapshot1 = translog.newSnapshot(); - toClose.add(snapshot1); - - addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 2 })); - - assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); - - translog.rollGeneration(); - addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 3 })); - - Translog.Snapshot snapshot2 = translog.newSnapshot(); - toClose.add(snapshot2); - translog.getDeletionPolicy().setLocalCheckpointOfSafeCommit(2); - assertThat(snapshot2, containsOperationsInAnyOrder(ops)); - assertThat(snapshot2.totalOperations(), equalTo(ops.size())); - } finally { - IOUtils.closeWhileHandlingException(toClose); - } - } - - public void testSnapshotOnClosedTranslog() throws IOException { - assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); - translog.add(new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); - translog.close(); - AlreadyClosedException ex = expectThrows(AlreadyClosedException.class, () -> translog.newSnapshot()); - assertEquals(ex.getMessage(), "translog is already closed"); - } - - public void testRangeSnapshot() throws Exception { - long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final int generations = between(2, 20); - Map<Long, List<Translog.Operation>> operationsByGen = new HashMap<>(); - for (int gen = 0; gen < generations; gen++) { - Set<Long> seqNos = new HashSet<>(); - int numOps = randomIntBetween(1, 100); - for (int i = 0; i < numOps; i++) { - final long seqNo = randomValueOtherThanMany(seqNos::contains, () -> randomLongBetween(0, 1000)); - minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); - maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); - seqNos.add(seqNo); - } - List<Translog.Operation> ops = new ArrayList<>(seqNos.size()); - for (long seqNo : seqNos) { - Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); - translog.add(op); - ops.add(op); - } - operationsByGen.put(translog.currentFileGeneration(), ops); - translog.rollGeneration(); - if (rarely()) { - translog.rollGeneration(); // empty generation - } - } - - if (minSeqNo > 0) { - long fromSeqNo = randomLongBetween(0, minSeqNo - 1); - long toSeqNo = randomLongBetween(fromSeqNo, minSeqNo - 1); - try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { - assertThat(snapshot.totalOperations(), equalTo(0)); - assertNull(snapshot.next()); - } - } - - long fromSeqNo = randomLongBetween(maxSeqNo + 1, Long.MAX_VALUE); - long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); - try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { - assertThat(snapshot.totalOperations(), equalTo(0)); - assertNull(snapshot.next()); - } - - fromSeqNo = randomLongBetween(0, 2000); - toSeqNo = randomLongBetween(fromSeqNo, 2000); - try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { - Set<Long> seenSeqNos = new HashSet<>(); - List<Translog.Operation> expectedOps = new ArrayList<>(); - for (long gen = translog.currentFileGeneration(); gen > 0; gen--) { - for (Translog.Operation op : operationsByGen.getOrDefault(gen, Collections.emptyList())) { - if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && seenSeqNos.add(op.seqNo())) { - expectedOps.add(op); - } - } - } - assertThat(TestTranslog.drainSnapshot(snapshot, false), equalTo(expectedOps)); - } - } - - public void testSimpleOperationsUpload() throws Exception { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.size(0)); - } - - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); - assertThat(snapshot.totalOperations(), equalTo(ops.size())); - } - - assertEquals(4, translog.allUploaded().size()); - - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); - assertEquals(6, translog.allUploaded().size()); - - translog.rollGeneration(); - assertEquals(6, translog.allUploaded().size()); - - Set<String> mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); - assertEquals(2, mdFiles.size()); - logger.info("All md files {}", mdFiles); - - Set<String> tlogFiles = blobStoreTransferService.listAll( - getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get())) - ); - logger.info("All data files {}", tlogFiles); - - // assert content of ckp and tlog files - BlobPath path = getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get())); - for (TranslogReader reader : translog.readers) { - final long readerGeneration = reader.getGeneration(); - logger.error("Asserting content of {}", readerGeneration); - Path translogPath = reader.path(); - try ( - InputStream stream = new CheckedInputStream(Files.newInputStream(translogPath), new CRC32()); - InputStream tlogStream = blobStoreTransferService.downloadBlob(path, Translog.getFilename(readerGeneration)); - ) { - byte[] content = stream.readAllBytes(); - byte[] tlog = tlogStream.readAllBytes(); - assertArrayEquals(tlog, content); - } - - Path checkpointPath = translog.location().resolve(Translog.getCommitCheckpointFileName(readerGeneration)); - try ( - CheckedInputStream stream = new CheckedInputStream(Files.newInputStream(checkpointPath), new CRC32()); - InputStream ckpStream = blobStoreTransferService.downloadBlob(path, Translog.getCommitCheckpointFileName(readerGeneration)) - ) { - byte[] content = stream.readAllBytes(); - byte[] ckp = ckpStream.readAllBytes(); - assertArrayEquals(ckp, content); - } - } - - // expose the new checkpoint (simulating a commit), before we trim the translog - // simulating the remote segment upload . - translog.setMinSeqNoToKeep(0); - // This should not trim anything from local - translog.trimUnreferencedReaders(); - assertEquals(2, translog.readers.size()); - assertBusy(() -> { - assertEquals(4, translog.allUploaded().size()); - assertEquals( - 4, - blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() - ); - }); - - // This should trim tlog-2 from local - // This should not trim tlog-2.* files from remote as we not uploading any more translog to remote - translog.setMinSeqNoToKeep(1); - translog.trimUnreferencedReaders(); - assertEquals(1, translog.readers.size()); - assertBusy(() -> { - assertEquals(4, translog.allUploaded().size()); - assertEquals( - 4, - blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() - ); - }); - - // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); - assertEquals(2, translog.stats().estimatedNumberOfOperations()); - - translog.setMinSeqNoToKeep(2); - - translog.trimUnreferencedReaders(); - assertEquals(1, translog.readers.size()); - assertEquals(1, translog.stats().estimatedNumberOfOperations()); - assertBusy(() -> assertEquals(4, translog.allUploaded().size())); - } - - public void testMetadataFileDeletion() throws Exception { - ArrayList<Translog.Operation> ops = new ArrayList<>(); - // Test deletion of metadata files - int numDocs = randomIntBetween(6, 10); - for (int i = 0; i < numDocs; i++) { - addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); - translog.setMinSeqNoToKeep(i); - translog.trimUnreferencedReaders(); - assertEquals(1, translog.readers.size()); - } - assertBusy(() -> assertEquals(4, translog.allUploaded().size())); - assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); - int moreDocs = randomIntBetween(3, 10); - logger.info("numDocs={} moreDocs={}", numDocs, moreDocs); - for (int i = numDocs; i < numDocs + moreDocs; i++) { - addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); - } - translog.trimUnreferencedReaders(); - assertEquals(1 + moreDocs, translog.readers.size()); - assertBusy(() -> assertEquals(2 + 2L * moreDocs, translog.allUploaded().size())); - assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); - - int totalDocs = numDocs + moreDocs; - translog.setMinSeqNoToKeep(totalDocs - 1); - translog.trimUnreferencedReaders(); - - addToTranslogAndListAndUpload( - translog, - ops, - new Translog.Index(String.valueOf(totalDocs), totalDocs, primaryTerm.get(), new byte[] { 1 }) - ); - translog.setMinSeqNoToKeep(totalDocs); - translog.trimUnreferencedReaders(); - assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); - - // Change primary term and test the deletion of older primaries - String translogUUID = translog.translogUUID; - try { - translog.getDeletionPolicy().assertNoOpenTranslogRefs(); - translog.close(); - } finally { - terminate(threadPool); - } - - // Increase primary term - long oldPrimaryTerm = primaryTerm.get(); - long newPrimaryTerm = primaryTerm.incrementAndGet(); - - // Creating RemoteFsTranslog with the same location - Translog newTranslog = create(translogDir, repository, translogUUID); - int newPrimaryTermDocs = randomIntBetween(5, 10); - for (int i = totalDocs + 1; i <= totalDocs + newPrimaryTermDocs; i++) { - addToTranslogAndListAndUpload(newTranslog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); - // newTranslog.deletionPolicy.setLocalCheckpointOfSafeCommit(i - 1); - newTranslog.setMinSeqNoToKeep(i); - newTranslog.trimUnreferencedReaders(); - } - - try { - newTranslog.close(); - } catch (Exception e) { - // Ignoring this exception for now. Once the download flow populates FileTracker, - // we can remove this try-catch block - } - } - - private BlobPath getTranslogDirectory() { - return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); - } - - private Long populateTranslogOps(boolean withMissingOps) throws IOException { - long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final int generations = between(2, 20); - long currentSeqNo = 0L; - List<Translog.Operation> firstGenOps = null; - Map<Long, List<Translog.Operation>> operationsByGen = new HashMap<>(); - for (int gen = 0; gen < generations; gen++) { - List<Long> seqNos = new ArrayList<>(); - int numOps = randomIntBetween(4, 10); - for (int i = 0; i < numOps; i++, currentSeqNo++) { - minSeqNo = SequenceNumbers.min(minSeqNo, currentSeqNo); - maxSeqNo = SequenceNumbers.max(maxSeqNo, currentSeqNo); - seqNos.add(currentSeqNo); - } - Collections.shuffle(seqNos, new Random(100)); - List<Translog.Operation> ops = new ArrayList<>(seqNos.size()); - for (long seqNo : seqNos) { - Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); - boolean shouldAdd = !withMissingOps || seqNo % 4 != 0; - if (shouldAdd) { - translog.add(op); - ops.add(op); - } - } - operationsByGen.put(translog.currentFileGeneration(), ops); - if (firstGenOps == null) { - firstGenOps = ops; - } - translog.rollGeneration(); - if (rarely()) { - translog.rollGeneration(); // empty generation - } - } - return currentSeqNo; - } - - public void testFullRangeSnapshot() throws Exception { - // Successful snapshot - long nextSeqNo = populateTranslogOps(false); - long fromSeqNo = 0L; - long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); - try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { - int totOps = 0; - for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { - totOps++; - } - assertEquals(totOps, toSeqNo - fromSeqNo + 1); - } - } - - public void testFullRangeSnapshotWithFailures() throws Exception { - long nextSeqNo = populateTranslogOps(true); - long fromSeqNo = 0L; - long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); - try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { - int totOps = 0; - for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { - totOps++; - } - fail("Should throw exception for missing operations"); - } catch (MissingHistoryOperationsException e) { - assertTrue(e.getMessage().contains("Not all operations between from_seqno")); - } - } - - public void testConcurrentWritesWithVaryingSize() throws Throwable { - final int opsPerThread = randomIntBetween(10, 200); - int threadCount = 2 + randomInt(5); - - logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread); - final BlockingQueue<TestTranslog.LocationOperation> writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); - - Thread[] threads = new Thread[threadCount]; - final Exception[] threadExceptions = new Exception[threadCount]; - final AtomicLong seqNoGenerator = new AtomicLong(); - final CountDownLatch downLatch = new CountDownLatch(1); - for (int i = 0; i < threadCount; i++) { - final int threadId = i; - threads[i] = new TranslogThread( - translog, - downLatch, - opsPerThread, - threadId, - writtenOperations, - seqNoGenerator, - threadExceptions - ); - threads[i].setDaemon(true); - threads[i].start(); - } - - downLatch.countDown(); - - for (int i = 0; i < threadCount; i++) { - if (threadExceptions[i] != null) { - throw threadExceptions[i]; - } - threads[i].join(60 * 1000); - } - - List<TestTranslog.LocationOperation> collect = new ArrayList<>(writtenOperations); - collect.sort(Comparator.comparing(op -> op.operation.seqNo())); - - List<Translog.Operation> opsList = new ArrayList<>(threadCount * opsPerThread); - try (Translog.Snapshot snapshot = translog.newSnapshot()) { - for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { - opsList.add(op); - } - } - opsList.sort(Comparator.comparing(op -> op.seqNo())); - - for (int i = 0; i < threadCount * opsPerThread; i++) { - assertEquals(opsList.get(i), collect.get(i).operation); - } - } - - /** - * Tests that concurrent readers and writes maintain view and snapshot semantics - */ - public void testConcurrentWriteViewsAndSnapshot() throws Throwable { - final Thread[] writers = new Thread[randomIntBetween(1, 3)]; - final Thread[] readers = new Thread[randomIntBetween(1, 3)]; - final int flushEveryOps = randomIntBetween(5, 10); - final int maxOps = randomIntBetween(20, 100); - final Object signalReaderSomeDataWasIndexed = new Object(); - final AtomicLong idGenerator = new AtomicLong(); - final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1); - - // a map of all written ops and their returned location. - final Map<Translog.Operation, Translog.Location> writtenOps = ConcurrentCollections.newConcurrentMap(); - - // a signal for all threads to stop - final AtomicBoolean run = new AtomicBoolean(true); - - final Object flushMutex = new Object(); - final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker(); - final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - // any errors on threads - final List<Exception> errors = new CopyOnWriteArrayList<>(); - logger.info("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); - for (int i = 0; i < writers.length; i++) { - final String threadName = "writer_" + i; - final int threadId = i; - writers[i] = new Thread(new AbstractRunnable() { - @Override - public void doRun() throws BrokenBarrierException, InterruptedException, IOException { - barrier.await(); - int counter = 0; - while (run.get() && idGenerator.get() < maxOps) { - long id = idGenerator.getAndIncrement(); - final Translog.Operation op; - final Translog.Operation.Type type = Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type - .values().length))]; - switch (type) { - case CREATE: - case INDEX: - op = new Translog.Index("" + id, id, primaryTerm.get(), new byte[] { (byte) id }); - break; - case DELETE: - op = new Translog.Delete(Long.toString(id), id, primaryTerm.get()); - break; - case NO_OP: - op = new Translog.NoOp(id, 1, Long.toString(id)); - break; - default: - throw new AssertionError("unsupported operation type [" + type + "]"); - } - Translog.Location location = translog.add(op); - tracker.markSeqNoAsProcessed(id); - Translog.Location existing = writtenOps.put(op, location); - if (existing != null) { - fail("duplicate op [" + op + "], old entry at " + location); - } - if (id % writers.length == threadId) { - translog.ensureSynced(location); - } - if (id % flushEveryOps == 0) { - synchronized (flushMutex) { - // we need not do this concurrently as we need to make sure that the generation - // we're committing - is still present when we're committing - long localCheckpoint = tracker.getProcessedCheckpoint(); - translog.rollGeneration(); - // expose the new checkpoint (simulating a commit), before we trim the translog - lastCommittedLocalCheckpoint.set(localCheckpoint); - // deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); - translog.setMinSeqNoToKeep(localCheckpoint + 1); - translog.trimUnreferencedReaders(); - } - } - if (id % 7 == 0) { - synchronized (signalReaderSomeDataWasIndexed) { - signalReaderSomeDataWasIndexed.notifyAll(); - } - } - counter++; - } - logger.info("--> [{}] done. wrote [{}] ops.", threadName, counter); - } - - @Override - public void onFailure(Exception e) { - logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); - errors.add(e); - } - }, threadName); - writers[i].start(); - } - - for (int i = 0; i < readers.length; i++) { - final String threadId = "reader_" + i; - readers[i] = new Thread(new AbstractRunnable() { - Closeable retentionLock = null; - long committedLocalCheckpointAtView; - - @Override - public void onFailure(Exception e) { - logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); - errors.add(e); - try { - closeRetentionLock(); - } catch (IOException inner) { - inner.addSuppressed(e); - logger.error("unexpected error while closing view, after failure", inner); - } - } - - void closeRetentionLock() throws IOException { - if (retentionLock != null) { - retentionLock.close(); - } - } - - void acquireRetentionLock() throws IOException { - closeRetentionLock(); - retentionLock = translog.acquireRetentionLock(); - // captures the last committed checkpoint, while holding the view, simulating - // recovery logic which captures a view and gets a lucene commit - committedLocalCheckpointAtView = lastCommittedLocalCheckpoint.get(); - logger.info("--> [{}] min gen after acquiring lock [{}]", threadId, translog.getMinFileGeneration()); - } - - @Override - protected void doRun() throws Exception { - barrier.await(); - int iter = 0; - while (idGenerator.get() < maxOps) { - if (iter++ % 10 == 0) { - acquireRetentionLock(); - } - - // captures al views that are written since the view was created (with a small caveat see bellow) - // these are what we expect the snapshot to return (and potentially some more). - Set<Translog.Operation> expectedOps = new HashSet<>(writtenOps.keySet()); - expectedOps.removeIf(op -> op.seqNo() <= committedLocalCheckpointAtView); - try (Translog.Snapshot snapshot = translog.newSnapshot(committedLocalCheckpointAtView + 1L, Long.MAX_VALUE)) { - Translog.Operation op; - while ((op = snapshot.next()) != null) { - expectedOps.remove(op); - } - } - if (expectedOps.isEmpty() == false) { - StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()) - .append(" operations from [") - .append(committedLocalCheckpointAtView + 1L) - .append("]"); - boolean failed = false; - for (Translog.Operation expectedOp : expectedOps) { - final Translog.Location loc = writtenOps.get(expectedOp); - failed = true; - missed.append("\n --> [").append(expectedOp).append("] written at ").append(loc); - } - if (failed) { - fail(missed.toString()); - } - } - // slow down things a bit and spread out testing.. - synchronized (signalReaderSomeDataWasIndexed) { - if (idGenerator.get() < maxOps) { - signalReaderSomeDataWasIndexed.wait(); - } - } - } - closeRetentionLock(); - logger.info("--> [{}] done. tested [{}] snapshots", threadId, iter); - } - }, threadId); - readers[i].start(); - } - - barrier.await(); - logger.debug("--> waiting for threads to stop"); - for (Thread thread : writers) { - thread.join(); - } - logger.debug("--> waiting for readers to stop"); - // force stopping, if all writers crashed - synchronized (signalReaderSomeDataWasIndexed) { - idGenerator.set(Long.MAX_VALUE); - signalReaderSomeDataWasIndexed.notifyAll(); - } - for (Thread thread : readers) { - thread.join(); - } - if (errors.size() > 0) { - Throwable e = errors.get(0); - for (Throwable suppress : errors.subList(1, errors.size())) { - e.addSuppressed(suppress); - } - throw e; - } - logger.info("--> test done. total ops written [{}]", writtenOps.size()); - } - - public void testSyncUpTo() throws IOException { - int translogOperations = randomIntBetween(10, 100); - int count = 0; - for (int op = 0; op < translogOperations; op++) { - int seqNo = ++count; - final Translog.Location location = translog.add( - new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) - ); - if (randomBoolean()) { - assertTrue("at least one operation pending", translog.syncNeeded()); - assertTrue("this operation has not been synced", translog.ensureSynced(location)); - // we are the last location so everything should be synced - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); - seqNo = ++count; - translog.add( - new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) - ); - assertTrue("one pending operation", translog.syncNeeded()); - assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now - assertTrue("we only synced a previous operation yet", translog.syncNeeded()); - } - if (rarely()) { - translog.rollGeneration(); - assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now - assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); - } - - if (randomBoolean()) { - translog.sync(); - assertFalse("translog has been synced already", translog.ensureSynced(location)); - } - } - } - - public void testSyncUpFailure() throws IOException { - int translogOperations = randomIntBetween(1, 20); - int count = 0; - fail.failAlways(); - ArrayList<Translog.Location> locations = new ArrayList<>(); - for (int op = 0; op < translogOperations; op++) { - int seqNo = ++count; - final Translog.Location location = translog.add( - new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) - ); - if (randomBoolean()) { - fail.failAlways(); - try { - translog.ensureSynced(location); - fail("io exception expected"); - } catch (IOException e) { - assertTrue("at least one operation pending", translog.syncNeeded()); - } - } else { - fail.failNever(); - translog.ensureSynced(location); - assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); - } - locations.add(location); - - } - // clean up - fail.failNever(); - - // writes should get synced up now - translog.sync(); - assertFalse(translog.syncNeeded()); - for (Translog.Location location : locations) { - assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); - } - - } - - public void testSyncUpToStream() throws IOException { - int iters = randomIntBetween(5, 10); - for (int i = 0; i < iters; i++) { - int translogOperations = randomIntBetween(10, 100); - int count = 0; - ArrayList<Translog.Location> locations = new ArrayList<>(); - for (int op = 0; op < translogOperations; op++) { - if (rarely()) { - translog.rollGeneration(); - } - final Translog.Location location = translog.add( - new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) - ); - locations.add(location); - } - Collections.shuffle(locations, random()); - if (randomBoolean()) { - assertTrue("at least one operation pending", translog.syncNeeded()); - assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); - // we are the last location so everything should be synced - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); - } else if (rarely()) { - translog.rollGeneration(); - // not syncing now - assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); - assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); - } else { - translog.sync(); - assertFalse("translog has been synced already", translog.ensureSynced(locations.stream())); - } - for (Translog.Location location : locations) { - assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); - } - } - } - - public void testLocationComparison() throws IOException { - List<Translog.Location> locations = new ArrayList<>(); - int translogOperations = randomIntBetween(10, 100); - int count = 0; - for (int op = 0; op < translogOperations; op++) { - locations.add( - translog.add( - new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) - ) - ); - if (randomBoolean()) { - translog.ensureSynced(locations.get(op)); - } - if (rarely() && translogOperations > op + 1) { - translog.rollGeneration(); - } - } - Collections.shuffle(locations, random()); - Translog.Location max = locations.get(0); - for (Translog.Location location : locations) { - max = max(max, location); - } - - try (Translog.Snapshot snap = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { - Translog.Operation next; - Translog.Operation maxOp = null; - while ((next = snap.next()) != null) { - maxOp = next; - } - assertNotNull(maxOp); - assertEquals(maxOp.getSource().source.utf8ToString(), Integer.toString(count)); - } - } - - public static Translog.Location max(Translog.Location a, Translog.Location b) { - if (a.compareTo(b) > 0) { - return a; - } - return b; - } - - public void testTranslogWriter() throws IOException { - final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); - final Set<Long> persistedSeqNos = new HashSet<>(); - persistedSeqNoConsumer.set(persistedSeqNos::add); - final int numOps = scaledRandomIntBetween(8, 250000); - final Set<Long> seenSeqNos = new HashSet<>(); - boolean opsHaveValidSequenceNumbers = randomBoolean(); - for (int i = 0; i < numOps; i++) { - byte[] bytes = new byte[4]; - DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); - out.writeInt(i); - long seqNo; - do { - seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; - opsHaveValidSequenceNumbers = opsHaveValidSequenceNumbers || !rarely(); - } while (seenSeqNos.contains(seqNo)); - if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - seenSeqNos.add(seqNo); - } - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), seqNo); - } - assertThat(persistedSeqNos, empty()); - writer.sync(); - persistedSeqNos.remove(SequenceNumbers.UNASSIGNED_SEQ_NO); - assertEquals(seenSeqNos, persistedSeqNos); - - final BaseTranslogReader reader = randomBoolean() - ? writer - : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); - for (int i = 0; i < numOps; i++) { - ByteBuffer buffer = ByteBuffer.allocate(4); - reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); - buffer.flip(); - final int value = buffer.getInt(); - assertEquals(i, value); - } - final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); - final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); - assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo)); - assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo)); - - byte[] bytes = new byte[4]; - DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); - out.writeInt(2048); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); - - if (reader instanceof TranslogReader) { - ByteBuffer buffer = ByteBuffer.allocate(4); - try { - reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * numOps); - fail("read past EOF?"); - } catch (EOFException ex) { - // expected - } - ((TranslogReader) reader).close(); - } else { - // live reader! - ByteBuffer buffer = ByteBuffer.allocate(4); - final long pos = reader.getFirstOperationOffset() + 4 * numOps; - reader.readBytes(buffer, pos); - buffer.flip(); - final int value = buffer.getInt(); - assertEquals(2048, value); - } - IOUtils.close(writer); - } - - public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { - Path tempDir = createTempDir(); - final TranslogConfig temp = getTranslogConfig(tempDir); - final TranslogConfig config = new TranslogConfig( - temp.getShardId(), - temp.getTranslogPath(), - temp.getIndexSettings(), - temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) - ); - - final Set<Long> persistedSeqNos = new HashSet<>(); - final AtomicInteger writeCalls = new AtomicInteger(); - - final ChannelFactory channelFactory = (file, openOption) -> { - FileChannel delegate = FileChannel.open(file, openOption); - boolean success = false; - try { - // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); - - final FileChannel channel; - if (isCkpFile) { - channel = delegate; - } else { - channel = new FilterFileChannel(delegate) { - - @Override - public int write(ByteBuffer src) throws IOException { - writeCalls.incrementAndGet(); - return super.write(src); - } - }; - } - success = true; - return channel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(delegate); - } - } - }; - - String translogUUID = Translog.createEmptyTranslog( - config.getTranslogPath(), - SequenceNumbers.NO_OPS_PERFORMED, - shardId, - channelFactory, - primaryTerm.get() - ); - - try ( - Translog translog = new RemoteFsTranslog( - config, - translogUUID, - new DefaultTranslogDeletionPolicy(-1, -1, 0), - () -> SequenceNumbers.NO_OPS_PERFORMED, - primaryTerm::get, - persistedSeqNos::add, - repository, - threadPool, - () -> Boolean.TRUE - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } - ) { - TranslogWriter writer = translog.getCurrent(); - int initialWriteCalls = writeCalls.get(); - byte[] bytes = new byte[256]; - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 3); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 4); - assertThat(persistedSeqNos, empty()); - assertEquals(initialWriteCalls, writeCalls.get()); - - if (randomBoolean()) { - // Since the buffer is full, this will flush before performing the add. - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); - assertThat(persistedSeqNos, empty()); - assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); - } else { - // Will flush on read - writer.readBytes(ByteBuffer.allocate(256), 0); - assertThat(persistedSeqNos, empty()); - assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); - - // Add after we the read flushed the buffer - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); - } - - writer.sync(); - - // Sequence numbers are marked as persisted after sync - assertThat(persistedSeqNos, contains(1L, 2L, 3L, 4L, 5L)); - } - } - - public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOException { - Path tempDir = createTempDir(); - final TranslogConfig temp = getTranslogConfig(tempDir); - final TranslogConfig config = new TranslogConfig( - temp.getShardId(), - temp.getTranslogPath(), - temp.getIndexSettings(), - temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) - ); - - final Set<Long> persistedSeqNos = new HashSet<>(); - final AtomicInteger translogFsyncCalls = new AtomicInteger(); - final AtomicInteger checkpointFsyncCalls = new AtomicInteger(); - - final ChannelFactory channelFactory = (file, openOption) -> { - FileChannel delegate = FileChannel.open(file, openOption); - boolean success = false; - try { - // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); - - final FileChannel channel; - if (isCkpFile) { - channel = new FilterFileChannel(delegate) { - @Override - public void force(boolean metaData) throws IOException { - checkpointFsyncCalls.incrementAndGet(); - } - }; - } else { - channel = new FilterFileChannel(delegate) { - - @Override - public void force(boolean metaData) throws IOException { - translogFsyncCalls.incrementAndGet(); - } - }; - } - success = true; - return channel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(delegate); - } - } - }; - - String translogUUID = Translog.createEmptyTranslog( - config.getTranslogPath(), - SequenceNumbers.NO_OPS_PERFORMED, - shardId, - channelFactory, - primaryTerm.get() - ); - - try ( - Translog translog = new RemoteFsTranslog( - config, - translogUUID, - new DefaultTranslogDeletionPolicy(-1, -1, 0), - () -> SequenceNumbers.NO_OPS_PERFORMED, - primaryTerm::get, - persistedSeqNos::add, - repository, - threadPool, - () -> Boolean.TRUE - ) { - @Override - ChannelFactory getChannelFactory() { - return channelFactory; - } - } - ) { - TranslogWriter writer = translog.getCurrent(); - byte[] bytes = new byte[256]; - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 3); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 4); - writer.sync(); - // Fsync is still enabled during empty translog creation. - assertEquals(2, checkpointFsyncCalls.get()); - assertEquals(1, translogFsyncCalls.get()); - // Sequence numbers are marked as persisted after sync - assertThat(persistedSeqNos, contains(1L, 2L, 3L, 4L)); - } - } - - public void testCloseIntoReader() throws IOException { - try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) { - final int numOps = randomIntBetween(8, 128); - for (int i = 0; i < numOps; i++) { - final byte[] bytes = new byte[4]; - final DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); - out.writeInt(i); - writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); - } - writer.sync(); - final Checkpoint writerCheckpoint = writer.getCheckpoint(); - TranslogReader reader = writer.closeIntoReader(); - try { - if (randomBoolean()) { - reader.close(); - reader = translog.openReader(reader.path(), writerCheckpoint); - } - for (int i = 0; i < numOps; i++) { - final ByteBuffer buffer = ByteBuffer.allocate(4); - reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); - buffer.flip(); - final int value = buffer.getInt(); - assertEquals(i, value); - } - final Checkpoint readerCheckpoint = reader.getCheckpoint(); - assertThat(readerCheckpoint, equalTo(writerCheckpoint)); - } finally { - IOUtils.close(reader); - } - } - } - - public class ThrowingBlobRepository extends FsRepository { - private final Environment environment; - - private TestTranslog.FailSwitch fail; - - public ThrowingBlobRepository( - RepositoryMetadata metadata, - Environment environment, - NamedXContentRegistry namedXContentRegistry, - ClusterService clusterService, - RecoverySettings recoverySettings, - TestTranslog.FailSwitch fail - ) { - super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); - this.environment = environment; - this.fail = fail; - } - - protected BlobStore createBlobStore() throws Exception { - final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); - final Path locationFile = environment.resolveRepoFile(location); - return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail); - } - } - - private class ThrowingBlobStore extends FsBlobStore { - - private TestTranslog.FailSwitch fail; - - public ThrowingBlobStore(int bufferSizeInBytes, Path path, boolean readonly, TestTranslog.FailSwitch fail) throws IOException { - super(bufferSizeInBytes, path, readonly); - this.fail = fail; - } - - @Override - public BlobContainer blobContainer(BlobPath path) { - try { - return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail); - } catch (IOException ex) { - throw new OpenSearchException("failed to create blob container", ex); - } - } - } - - private class ThrowingBlobContainer extends FsBlobContainer { - - private TestTranslog.FailSwitch fail; - - public ThrowingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, TestTranslog.FailSwitch fail) { - super(blobStore, blobPath, path); - this.fail = fail; - } - - public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) - throws IOException { - if (fail.fail()) { - throw new IOException("blob container throwing error"); - } - super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); - } - } - - class TranslogThread extends Thread { - private final CountDownLatch downLatch; - private final int opsPerThread; - private final int threadId; - private final Collection<TestTranslog.LocationOperation> writtenOperations; - private final Exception[] threadExceptions; - private final Translog translog; - private final AtomicLong seqNoGenerator; - - TranslogThread( - Translog translog, - CountDownLatch downLatch, - int opsPerThread, - int threadId, - Collection<TestTranslog.LocationOperation> writtenOperations, - AtomicLong seqNoGenerator, - Exception[] threadExceptions - ) { - this.translog = translog; - this.downLatch = downLatch; - this.opsPerThread = opsPerThread; - this.threadId = threadId; - this.writtenOperations = writtenOperations; - this.seqNoGenerator = seqNoGenerator; - this.threadExceptions = threadExceptions; - } - - @Override - public void run() { - try { - downLatch.await(); - for (int opCount = 0; opCount < opsPerThread; opCount++) { - Translog.Operation op; - final Translog.Operation.Type type = randomFrom(Translog.Operation.Type.values()); - switch (type) { - case CREATE: - case INDEX: - op = new Translog.Index( - threadId + "_" + opCount, - seqNoGenerator.getAndIncrement(), - primaryTerm.get(), - randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8") - ); - break; - case DELETE: - op = new Translog.Delete( - threadId + "_" + opCount, - seqNoGenerator.getAndIncrement(), - primaryTerm.get(), - 1 + randomInt(100000) - ); - break; - case NO_OP: - op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16)); - break; - default: - throw new AssertionError("unsupported operation type [" + type + "]"); - } - - Translog.Location loc = add(op); - writtenOperations.add(new TestTranslog.LocationOperation(op, loc)); - if (rarely()) { // lets verify we can concurrently read this - assertEquals(op, translog.readOperation(loc)); - } - afterAdd(); - } - } catch (Exception t) { - threadExceptions[threadId] = t; - } - } - - protected Translog.Location add(Translog.Operation op) throws IOException { - Translog.Location location = translog.add(op); - if (randomBoolean()) { - translog.ensureSynced(location); - } - return location; - } - - protected void afterAdd() {} - } - -} diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java new file mode 100644 index 0000000000000..a83e737dc25c1 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -0,0 +1,1884 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.tests.mockfile.FilterFileChannel; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.engine.MissingHistoryOperationsException; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.seqno.LocalCheckpointTracker; +import org.opensearch.index.seqno.LocalCheckpointTrackerTests; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.index.translog.transfer.TranslogTransferManager; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.LongConsumer; +import java.util.zip.CRC32; +import java.util.zip.CheckedInputStream; + +import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; +import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; +import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; +import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@LuceneTestCase.SuppressFileSystems("ExtrasFS") +public class RemoteFsTranslogTests extends OpenSearchTestCase { + + protected final ShardId shardId = new ShardId("index", "_na_", 1); + + protected RemoteFsTranslog translog; + private AtomicLong globalCheckpoint; + protected Path translogDir; + // A default primary term is used by translog instances created in this test. + private final AtomicLong primaryTerm = new AtomicLong(); + private final AtomicBoolean primaryMode = new AtomicBoolean(true); + private final AtomicReference<LongConsumer> persistedSeqNoConsumer = new AtomicReference<>(); + private ThreadPool threadPool; + private final static String METADATA_DIR = "metadata"; + private final static String DATA_DIR = "data"; + AtomicInteger writeCalls = new AtomicInteger(); + BlobStoreRepository repository; + + BlobStoreTransferService blobStoreTransferService; + + TestTranslog.FailSwitch fail; + + TestTranslog.SlowDownWriteSwitch slowDown; + + private LongConsumer getPersistedSeqNoConsumer() { + return seqNo -> { + final LongConsumer consumer = persistedSeqNoConsumer.get(); + if (consumer != null) { + consumer.accept(seqNo); + } + }; + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE)); + // if a previous test failed we clean up things here + translogDir = createTempDir(); + translog = create(translogDir); + } + + @Override + @After + public void tearDown() throws Exception { + try { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + translog.close(); + } finally { + super.tearDown(); + terminate(threadPool); + } + } + + private RemoteFsTranslog create(Path path) throws IOException { + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + return create(path, createRepository(), translogUUID, 0); + } + + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID, int extraGenToKeep) throws IOException { + this.repository = repository; + globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final TranslogConfig translogConfig = getTranslogConfig(path, extraGenToKeep); + final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); + threadPool = new TestThreadPool(getClass().getName()); + blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); + return new RemoteFsTranslog( + translogConfig, + translogUUID, + deletionPolicy, + () -> globalCheckpoint.get(), + primaryTerm::get, + getPersistedSeqNoConsumer(), + repository, + threadPool, + primaryMode::get, + new RemoteTranslogTransferTracker(shardId, 10) + ); + } + + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + return create(path, repository, translogUUID, 0); + } + + private TranslogConfig getTranslogConfig(final Path path) { + return getTranslogConfig(path, 0); + } + + private TranslogConfig getTranslogConfig(final Path path, int gensToKeep) { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + // only randomize between nog age retention and a long one, so failures will have a chance of reproducing + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h") + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), gensToKeep) + .build(); + return getTranslogConfig(path, settings); + } + + private TranslogConfig getTranslogConfig(final Path path, final Settings settings) { + final ByteSizeValue bufferSize = randomFrom( + TranslogConfig.DEFAULT_BUFFER_SIZE, + new ByteSizeValue(8, ByteSizeUnit.KB), + new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) + ); + + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); + } + + private BlobStoreRepository createRepository() { + Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + fail = new TestTranslog.FailSwitch(); + fail.failNever(); + slowDown = new TestTranslog.SlowDownWriteSwitch(); + final FsRepository repository = new ThrowingBlobRepository( + repositoryMetadata, + createEnvironment(), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + fail, + slowDown + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + /** Create a {@link Environment} with random path.home and path.repo **/ + private Environment createEnvironment() { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath()) + .build() + ); + } + + private Translog.Location addToTranslogAndList(Translog translog, List<Translog.Operation> list, Translog.Operation op) + throws IOException { + Translog.Location loc = translog.add(op); + Random random = random(); + if (random.nextBoolean()) { + translog.ensureSynced(loc); + } + list.add(op); + return loc; + } + + private Translog.Location addToTranslogAndListAndUpload(Translog translog, List<Translog.Operation> list, Translog.Operation op) + throws IOException { + Translog.Location loc = translog.add(op); + translog.ensureSynced(loc); + list.add(op); + return loc; + } + + private static void assertUploadStatsNoFailures(RemoteTranslogTransferTracker statsTracker) { + assertTrue(statsTracker.getUploadBytesStarted() > 0); + assertTrue(statsTracker.getTotalUploadsStarted() > 0); + assertEquals(0, statsTracker.getUploadBytesFailed()); + assertEquals(0, statsTracker.getTotalUploadsFailed()); + assertTrue(statsTracker.getUploadBytesSucceeded() > 0); + assertTrue(statsTracker.getTotalUploadsSucceeded() > 0); + assertTrue(statsTracker.getTotalUploadTimeInMillis() > 0); + assertTrue(statsTracker.getLastSuccessfulUploadTimestamp() > 0); + } + + private static void assertUploadStatsNoUploads(RemoteTranslogTransferTracker statsTracker) { + assertEquals(0, statsTracker.getUploadBytesStarted()); + assertEquals(0, statsTracker.getUploadBytesFailed()); + assertEquals(0, statsTracker.getUploadBytesSucceeded()); + assertEquals(0, statsTracker.getTotalUploadsStarted()); + assertEquals(0, statsTracker.getTotalUploadsFailed()); + assertEquals(0, statsTracker.getTotalUploadsSucceeded()); + assertEquals(0, statsTracker.getTotalUploadTimeInMillis()); + assertEquals(0, statsTracker.getLastSuccessfulUploadTimestamp()); + } + + private static void assertDownloadStatsPopulated(RemoteTranslogTransferTracker statsTracker) { + assertTrue(statsTracker.getDownloadBytesSucceeded() > 0); + assertTrue(statsTracker.getTotalDownloadsSucceeded() > 0); + // TODO: Need to simulate a delay for this assertion to avoid flakiness + // assertTrue(statsTracker.getTotalDownloadTimeInMillis() > 0); + assertTrue(statsTracker.getLastSuccessfulDownloadTimestamp() > 0); + } + + private static void assertDownloadStatsNoDownloads(RemoteTranslogTransferTracker statsTracker) { + assertEquals(0, statsTracker.getDownloadBytesSucceeded()); + assertEquals(0, statsTracker.getTotalDownloadsSucceeded()); + assertEquals(0, statsTracker.getTotalDownloadTimeInMillis()); + assertEquals(0, statsTracker.getLastSuccessfulDownloadTimestamp()); + } + + public void testUploadWithPrimaryModeFalse() { + // Test setup + primaryMode.set(false); + + // Validate + assertTrue(translog.syncNeeded()); + assertFalse(primaryMode.get()); + try { + translog.sync(); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertTrue(translog.syncNeeded()); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoUploads(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + } + + public void testUploadWithPrimaryModeTrue() { + // Validate + assertTrue(translog.syncNeeded()); + assertTrue(primaryMode.get()); + try { + translog.sync(); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertFalse(translog.syncNeeded()); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + } + + public void testSimpleOperations() throws IOException { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + + addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + addToTranslogAndList(translog, ops, new Translog.Delete("2", 1, primaryTerm.get())); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + assertThat(snapshot, containsOperationsInAnyOrder(ops)); + } + + final long seqNo = randomLongBetween(0, Integer.MAX_VALUE); + final String reason = randomAlphaOfLength(16); + final long noopTerm = randomLongBetween(1, primaryTerm.get()); + addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason)); + + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, containsOperationsInAnyOrder(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + try (Translog.Snapshot snapshot = translog.newSnapshot(seqNo + 1, randomLongBetween(seqNo + 1, Long.MAX_VALUE))) { + assertThat(snapshot, SnapshotMatchers.size(0)); + assertThat(snapshot.totalOperations(), equalTo(0)); + } + + } + + private TranslogConfig getConfig(int gensToKeep) { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir, gensToKeep); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB), + "" + ); + return config; + } + + private ChannelFactory getChannelFactory() { + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = delegate; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public int write(ByteBuffer src) throws IOException { + writeCalls.incrementAndGet(); + return super.write(src); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + return channelFactory; + } + + public void testExtraGenToKeep() throws Exception { + TranslogConfig config = getConfig(1); + ChannelFactory channelFactory = getChannelFactory(); + final Set<Long> persistedSeqNos = new HashSet<>(); + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); + ArrayList<Translog.Operation> ops = new ArrayList<>(); + try ( + RemoteFsTranslog translog = new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 })); + + // expose the new checkpoint (simulating a commit), before we trim the translog + translog.setMinSeqNoToKeep(2); + + // Trims from local + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 3, primaryTerm.get(), new byte[] { 1 })); + + // Trims from remote now + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + + } + } + + public void testReadLocation() throws IOException { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + ArrayList<Translog.Location> locs = new ArrayList<>(); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); + translog.sync(); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + assertNull(translog.readOperation(new Translog.Location(100, 0, 0))); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + } + + public void testReadLocationDownload() throws IOException { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + ArrayList<Translog.Location> locs = new ArrayList<>(); + locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); + locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); + + translog.sync(); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + + String translogUUID = translog.translogUUID; + try { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + translog.close(); + } finally { + terminate(threadPool); + } + + // Delete translog files to test download flow + for (Path file : FileSystemUtils.files(translogDir)) { + Files.delete(file); + } + + // Creating RemoteFsTranslog with the same location + RemoteFsTranslog newTranslog = create(translogDir, repository, translogUUID); + i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, newTranslog.readOperation(locs.get(i++))); + } + + statsTracker = newTranslog.getRemoteTranslogTracker(); + assertUploadStatsNoUploads(statsTracker); + assertDownloadStatsPopulated(statsTracker); + + try { + newTranslog.close(); + } catch (Exception e) { + // Ignoring this exception for now. Once the download flow populates FileTracker, + // we can remove this try-catch block + } + } + + public void testSnapshotWithNewTranslog() throws IOException { + List<Closeable> toClose = new ArrayList<>(); + try { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + Translog.Snapshot snapshot = translog.newSnapshot(); + toClose.add(snapshot); + assertThat(snapshot, SnapshotMatchers.size(0)); + + addToTranslogAndList(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + Translog.Snapshot snapshot1 = translog.newSnapshot(); + toClose.add(snapshot1); + + addToTranslogAndList(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 2 })); + + assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); + + translog.rollGeneration(); + addToTranslogAndList(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 3 })); + + Translog.Snapshot snapshot2 = translog.newSnapshot(); + toClose.add(snapshot2); + translog.getDeletionPolicy().setLocalCheckpointOfSafeCommit(2); + assertThat(snapshot2, containsOperationsInAnyOrder(ops)); + assertThat(snapshot2.totalOperations(), equalTo(ops.size())); + } finally { + IOUtils.closeWhileHandlingException(toClose); + } + } + + public void testSnapshotOnClosedTranslog() throws IOException { + assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); + translog.add(new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + translog.close(); + AlreadyClosedException ex = expectThrows(AlreadyClosedException.class, () -> translog.newSnapshot()); + assertEquals(ex.getMessage(), "translog is already closed"); + } + + public void testRangeSnapshot() throws Exception { + long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final int generations = between(2, 20); + Map<Long, List<Translog.Operation>> operationsByGen = new HashMap<>(); + for (int gen = 0; gen < generations; gen++) { + Set<Long> seqNos = new HashSet<>(); + int numOps = randomIntBetween(1, 100); + for (int i = 0; i < numOps; i++) { + final long seqNo = randomValueOtherThanMany(seqNos::contains, () -> randomLongBetween(0, 1000)); + minSeqNo = SequenceNumbers.min(minSeqNo, seqNo); + maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNo); + seqNos.add(seqNo); + } + List<Translog.Operation> ops = new ArrayList<>(seqNos.size()); + for (long seqNo : seqNos) { + Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); + translog.add(op); + ops.add(op); + } + operationsByGen.put(translog.currentFileGeneration(), ops); + translog.rollGeneration(); + if (rarely()) { + translog.rollGeneration(); // empty generation + } + } + + if (minSeqNo > 0) { + long fromSeqNo = randomLongBetween(0, minSeqNo - 1); + long toSeqNo = randomLongBetween(fromSeqNo, minSeqNo - 1); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + } + + long fromSeqNo = randomLongBetween(maxSeqNo + 1, Long.MAX_VALUE); + long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + + fromSeqNo = randomLongBetween(0, 2000); + toSeqNo = randomLongBetween(fromSeqNo, 2000); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo)) { + Set<Long> seenSeqNos = new HashSet<>(); + List<Translog.Operation> expectedOps = new ArrayList<>(); + for (long gen = translog.currentFileGeneration(); gen > 0; gen--) { + for (Translog.Operation op : operationsByGen.getOrDefault(gen, Collections.emptyList())) { + if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && seenSeqNos.add(op.seqNo())) { + expectedOps.add(op); + } + } + } + assertThat(TestTranslog.drainSnapshot(snapshot, false), equalTo(expectedOps)); + } + } + + public void testSimpleOperationsUpload() throws Exception { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.size(0)); + } + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.totalOperations(), equalTo(ops.size())); + } + + assertEquals(4, translog.allUploaded().size()); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); + assertEquals(6, translog.allUploaded().size()); + + translog.rollGeneration(); + assertEquals(6, translog.allUploaded().size()); + + Set<String> mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); + assertEquals(2, mdFiles.size()); + logger.info("All md files {}", mdFiles); + + Set<String> tlogFiles = blobStoreTransferService.listAll( + getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get())) + ); + logger.info("All data files {}", tlogFiles); + + // assert content of ckp and tlog files + BlobPath path = getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get())); + for (TranslogReader reader : translog.readers) { + final long readerGeneration = reader.getGeneration(); + logger.error("Asserting content of {}", readerGeneration); + Path translogPath = reader.path(); + try ( + InputStream stream = new CheckedInputStream(Files.newInputStream(translogPath), new CRC32()); + InputStream tlogStream = blobStoreTransferService.downloadBlob(path, Translog.getFilename(readerGeneration)); + ) { + byte[] content = stream.readAllBytes(); + byte[] tlog = tlogStream.readAllBytes(); + assertArrayEquals(tlog, content); + } + + Path checkpointPath = translog.location().resolve(Translog.getCommitCheckpointFileName(readerGeneration)); + try ( + CheckedInputStream stream = new CheckedInputStream(Files.newInputStream(checkpointPath), new CRC32()); + InputStream ckpStream = blobStoreTransferService.downloadBlob(path, Translog.getCommitCheckpointFileName(readerGeneration)) + ) { + byte[] content = stream.readAllBytes(); + byte[] ckp = ckpStream.readAllBytes(); + assertArrayEquals(ckp, content); + } + } + + // expose the new checkpoint (simulating a commit), before we trim the translog + // simulating the remote segment upload . + translog.setMinSeqNoToKeep(0); + // This should not trim anything from local + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(2, translog.readers.size()); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + // This should trim tlog-2 from local + // This should not trim tlog-2.* files from remote as we not uploading any more translog to remote + translog.setMinSeqNoToKeep(1); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); + assertEquals(2, translog.stats().estimatedNumberOfOperations()); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + translog.setMinSeqNoToKeep(2); + // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + assertEquals(1, translog.stats().estimatedNumberOfOperations()); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + + } + + public void testMetadataFileDeletion() throws Exception { + ArrayList<Translog.Operation> ops = new ArrayList<>(); + // Test deletion of metadata files + int numDocs = randomIntBetween(6, 10); + for (int i = 0; i < numDocs; i++) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); + translog.setMinSeqNoToKeep(i); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + } + assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + int moreDocs = randomIntBetween(3, 10); + logger.info("numDocs={} moreDocs={}", numDocs, moreDocs); + for (int i = numDocs; i < numDocs + moreDocs; i++) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); + } + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1 + moreDocs, translog.readers.size()); + assertBusy(() -> assertEquals(2 + 2L * moreDocs, translog.allUploaded().size())); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + + int totalDocs = numDocs + moreDocs; + translog.setMinSeqNoToKeep(totalDocs - 1); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + addToTranslogAndListAndUpload( + translog, + ops, + new Translog.Index(String.valueOf(totalDocs), totalDocs, primaryTerm.get(), new byte[] { 1 }) + ); + translog.setMinSeqNoToKeep(totalDocs); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + + // Change primary term and test the deletion of older primaries + String translogUUID = translog.translogUUID; + try { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + translog.close(); + } finally { + terminate(threadPool); + } + + // Increase primary term + long oldPrimaryTerm = primaryTerm.get(); + long newPrimaryTerm = primaryTerm.incrementAndGet(); + + // Creating RemoteFsTranslog with the same location + Translog newTranslog = create(translogDir, repository, translogUUID); + int newPrimaryTermDocs = randomIntBetween(5, 10); + for (int i = totalDocs + 1; i <= totalDocs + newPrimaryTermDocs; i++) { + addToTranslogAndListAndUpload(newTranslog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); + // newTranslog.deletionPolicy.setLocalCheckpointOfSafeCommit(i - 1); + newTranslog.setMinSeqNoToKeep(i); + newTranslog.trimUnreferencedReaders(); + } + + try { + newTranslog.close(); + } catch (Exception e) { + // Ignoring this exception for now. Once the download flow populates FileTracker, + // we can remove this try-catch block + } + } + + public void testDrainSync() throws Exception { + // This test checks following scenarios - + // 1. During ongoing uploads, the available permits are 0. + // 2. During an upload, if drainSync is called, it will wait for it to acquire and available permits are 0. + // 3. After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. + // 4. After drainSync, if an upload is an attempted, we do not upload to remote store. + ArrayList<Translog.Operation> ops = new ArrayList<>(); + assertEquals(0, translog.allUploaded().size()); + assertEquals(1, translog.readers.size()); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(0), 0, primaryTerm.get(), new byte[] { 1 })); + assertEquals(4, translog.allUploaded().size()); + assertEquals(2, translog.readers.size()); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + + translog.setMinSeqNoToKeep(0); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + + // Case 1 - During ongoing uploads, the available permits are 0. + slowDown.setSleepSeconds(2); + CountDownLatch latch = new CountDownLatch(1); + Thread thread1 = new Thread(() -> { + try { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(1), 1, primaryTerm.get(), new byte[] { 1 })); + assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + latch.countDown(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread1.start(); + assertBusy(() -> assertEquals(0, translog.availablePermits())); + // Case 2 - During an upload, if drainSync is called, it will wait for it to acquire and available permits are 0. + Releasable releasable = translog.drainSync(); + assertBusy(() -> assertEquals(0, latch.getCount())); + assertEquals(0, translog.availablePermits()); + slowDown.setSleepSeconds(0); + assertEquals(6, translog.allUploaded().size()); + assertEquals(2, translog.readers.size()); + Set<String> mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); + + // Case 3 - After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. + translog.setMinSeqNoToKeep(1); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + assertEquals(6, translog.allUploaded().size()); + assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); + + // Case 4 - After drainSync, if an upload is an attempted, we do not upload to remote store. + Translog.Location loc = addToTranslogAndListAndUpload( + translog, + ops, + new Translog.Index(String.valueOf(2), 2, primaryTerm.get(), new byte[] { 1 }) + ); + assertEquals(1, translog.readers.size()); + assertEquals(6, translog.allUploaded().size()); + assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); + + // Refill the permits back + Releasables.close(releasable); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(3), 3, primaryTerm.get(), new byte[] { 1 })); + assertEquals(2, translog.readers.size()); + assertEquals(8, translog.allUploaded().size()); + assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + + translog.setMinSeqNoToKeep(3); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals(1, translog.readers.size()); + assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + } + + private BlobPath getTranslogDirectory() { + return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); + } + + private Long populateTranslogOps(boolean withMissingOps) throws IOException { + long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; + final int generations = between(2, 20); + long currentSeqNo = 0L; + List<Translog.Operation> firstGenOps = null; + Map<Long, List<Translog.Operation>> operationsByGen = new HashMap<>(); + for (int gen = 0; gen < generations; gen++) { + List<Long> seqNos = new ArrayList<>(); + int numOps = randomIntBetween(4, 10); + for (int i = 0; i < numOps; i++, currentSeqNo++) { + minSeqNo = SequenceNumbers.min(minSeqNo, currentSeqNo); + maxSeqNo = SequenceNumbers.max(maxSeqNo, currentSeqNo); + seqNos.add(currentSeqNo); + } + Collections.shuffle(seqNos, new Random(100)); + List<Translog.Operation> ops = new ArrayList<>(seqNos.size()); + for (long seqNo : seqNos) { + Translog.Index op = new Translog.Index(randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[] { randomByte() }); + boolean shouldAdd = !withMissingOps || seqNo % 4 != 0; + if (shouldAdd) { + translog.add(op); + ops.add(op); + } + } + operationsByGen.put(translog.currentFileGeneration(), ops); + if (firstGenOps == null) { + firstGenOps = ops; + } + translog.rollGeneration(); + if (rarely()) { + translog.rollGeneration(); // empty generation + } + } + return currentSeqNo; + } + + public void testFullRangeSnapshot() throws Exception { + // Successful snapshot + long nextSeqNo = populateTranslogOps(false); + long fromSeqNo = 0L; + long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { + int totOps = 0; + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + totOps++; + } + assertEquals(totOps, toSeqNo - fromSeqNo + 1); + } + } + + public void testFullRangeSnapshotWithFailures() throws Exception { + long nextSeqNo = populateTranslogOps(true); + long fromSeqNo = 0L; + long toSeqNo = Math.min(nextSeqNo - 1, fromSeqNo + 15); + try (Translog.Snapshot snapshot = translog.newSnapshot(fromSeqNo, toSeqNo, true)) { + int totOps = 0; + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + totOps++; + } + fail("Should throw exception for missing operations"); + } catch (MissingHistoryOperationsException e) { + assertTrue(e.getMessage().contains("Not all operations between from_seqno")); + } + } + + public void testConcurrentWritesWithVaryingSize() throws Throwable { + final int opsPerThread = randomIntBetween(10, 200); + int threadCount = 2 + randomInt(5); + + logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread); + final BlockingQueue<TestTranslog.LocationOperation> writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); + + Thread[] threads = new Thread[threadCount]; + final Exception[] threadExceptions = new Exception[threadCount]; + final AtomicLong seqNoGenerator = new AtomicLong(); + final CountDownLatch downLatch = new CountDownLatch(1); + for (int i = 0; i < threadCount; i++) { + final int threadId = i; + threads[i] = new TranslogThread( + translog, + downLatch, + opsPerThread, + threadId, + writtenOperations, + seqNoGenerator, + threadExceptions + ); + threads[i].setDaemon(true); + threads[i].start(); + } + + downLatch.countDown(); + + for (int i = 0; i < threadCount; i++) { + if (threadExceptions[i] != null) { + throw threadExceptions[i]; + } + threads[i].join(60 * 1000); + } + + List<TestTranslog.LocationOperation> collect = new ArrayList<>(writtenOperations); + collect.sort(Comparator.comparing(op -> op.operation.seqNo())); + + List<Translog.Operation> opsList = new ArrayList<>(threadCount * opsPerThread); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + for (Translog.Operation op = snapshot.next(); op != null; op = snapshot.next()) { + opsList.add(op); + } + } + opsList.sort(Comparator.comparing(op -> op.seqNo())); + + for (int i = 0; i < threadCount * opsPerThread; i++) { + assertEquals(opsList.get(i), collect.get(i).operation); + } + } + + /** + * Tests that concurrent readers and writes maintain view and snapshot semantics + */ + public void testConcurrentWriteViewsAndSnapshot() throws Throwable { + final Thread[] writers = new Thread[randomIntBetween(1, 3)]; + final Thread[] readers = new Thread[randomIntBetween(1, 3)]; + final int flushEveryOps = randomIntBetween(5, 10); + final int maxOps = randomIntBetween(20, 100); + final Object signalReaderSomeDataWasIndexed = new Object(); + final AtomicLong idGenerator = new AtomicLong(); + final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1); + + // a map of all written ops and their returned location. + final Map<Translog.Operation, Translog.Location> writtenOps = ConcurrentCollections.newConcurrentMap(); + + // a signal for all threads to stop + final AtomicBoolean run = new AtomicBoolean(true); + + final Object flushMutex = new Object(); + final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker(); + final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); + // any errors on threads + final List<Exception> errors = new CopyOnWriteArrayList<>(); + logger.info("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); + for (int i = 0; i < writers.length; i++) { + final String threadName = "writer_" + i; + final int threadId = i; + writers[i] = new Thread(new AbstractRunnable() { + @Override + public void doRun() throws Exception { + barrier.await(); + int counter = 0; + while (run.get() && idGenerator.get() < maxOps) { + long id = idGenerator.getAndIncrement(); + final Translog.Operation op; + final Translog.Operation.Type type = Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type + .values().length))]; + switch (type) { + case CREATE: + case INDEX: + op = new Translog.Index("" + id, id, primaryTerm.get(), new byte[] { (byte) id }); + break; + case DELETE: + op = new Translog.Delete(Long.toString(id), id, primaryTerm.get()); + break; + case NO_OP: + op = new Translog.NoOp(id, 1, Long.toString(id)); + break; + default: + throw new AssertionError("unsupported operation type [" + type + "]"); + } + Translog.Location location = translog.add(op); + tracker.markSeqNoAsProcessed(id); + Translog.Location existing = writtenOps.put(op, location); + if (existing != null) { + fail("duplicate op [" + op + "], old entry at " + location); + } + if (id % writers.length == threadId) { + translog.ensureSynced(location); + } + if (id % flushEveryOps == 0) { + synchronized (flushMutex) { + // we need not do this concurrently as we need to make sure that the generation + // we're committing - is still present when we're committing + long localCheckpoint = tracker.getProcessedCheckpoint(); + translog.rollGeneration(); + // expose the new checkpoint (simulating a commit), before we trim the translog + lastCommittedLocalCheckpoint.set(localCheckpoint); + // deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); + translog.setMinSeqNoToKeep(localCheckpoint + 1); + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + } + } + if (id % 7 == 0) { + synchronized (signalReaderSomeDataWasIndexed) { + signalReaderSomeDataWasIndexed.notifyAll(); + } + } + counter++; + } + logger.info("--> [{}] done. wrote [{}] ops.", threadName, counter); + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); + errors.add(e); + } + }, threadName); + writers[i].start(); + } + + for (int i = 0; i < readers.length; i++) { + final String threadId = "reader_" + i; + readers[i] = new Thread(new AbstractRunnable() { + Closeable retentionLock = null; + long committedLocalCheckpointAtView; + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); + errors.add(e); + try { + closeRetentionLock(); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.error("unexpected error while closing view, after failure", inner); + } + } + + void closeRetentionLock() throws IOException { + if (retentionLock != null) { + retentionLock.close(); + } + } + + void acquireRetentionLock() throws IOException { + closeRetentionLock(); + retentionLock = translog.acquireRetentionLock(); + // captures the last committed checkpoint, while holding the view, simulating + // recovery logic which captures a view and gets a lucene commit + committedLocalCheckpointAtView = lastCommittedLocalCheckpoint.get(); + logger.info("--> [{}] min gen after acquiring lock [{}]", threadId, translog.getMinFileGeneration()); + } + + @Override + protected void doRun() throws Exception { + barrier.await(); + int iter = 0; + while (idGenerator.get() < maxOps) { + if (iter++ % 10 == 0) { + acquireRetentionLock(); + } + + // captures al views that are written since the view was created (with a small caveat see bellow) + // these are what we expect the snapshot to return (and potentially some more). + Set<Translog.Operation> expectedOps = new HashSet<>(writtenOps.keySet()); + expectedOps.removeIf(op -> op.seqNo() <= committedLocalCheckpointAtView); + try (Translog.Snapshot snapshot = translog.newSnapshot(committedLocalCheckpointAtView + 1L, Long.MAX_VALUE)) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + expectedOps.remove(op); + } + } + if (expectedOps.isEmpty() == false) { + StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()) + .append(" operations from [") + .append(committedLocalCheckpointAtView + 1L) + .append("]"); + boolean failed = false; + for (Translog.Operation expectedOp : expectedOps) { + final Translog.Location loc = writtenOps.get(expectedOp); + failed = true; + missed.append("\n --> [").append(expectedOp).append("] written at ").append(loc); + } + if (failed) { + fail(missed.toString()); + } + } + // slow down things a bit and spread out testing.. + synchronized (signalReaderSomeDataWasIndexed) { + if (idGenerator.get() < maxOps) { + signalReaderSomeDataWasIndexed.wait(); + } + } + } + closeRetentionLock(); + logger.info("--> [{}] done. tested [{}] snapshots", threadId, iter); + } + }, threadId); + readers[i].start(); + } + + barrier.await(); + logger.debug("--> waiting for threads to stop"); + for (Thread thread : writers) { + thread.join(); + } + logger.debug("--> waiting for readers to stop"); + // force stopping, if all writers crashed + synchronized (signalReaderSomeDataWasIndexed) { + idGenerator.set(Long.MAX_VALUE); + signalReaderSomeDataWasIndexed.notifyAll(); + } + for (Thread thread : readers) { + thread.join(); + } + if (errors.size() > 0) { + Throwable e = errors.get(0); + for (Throwable suppress : errors.subList(1, errors.size())) { + e.addSuppressed(suppress); + } + throw e; + } + logger.info("--> test done. total ops written [{}]", writtenOps.size()); + } + + public void testSyncUpTo() throws IOException { + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + int seqNo = ++count; + final Translog.Location location = translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + if (randomBoolean()) { + assertTrue("at least one operation pending", translog.syncNeeded()); + assertTrue("this operation has not been synced", translog.ensureSynced(location)); + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + seqNo = ++count; + translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + assertTrue("one pending operation", translog.syncNeeded()); + assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now + assertTrue("we only synced a previous operation yet", translog.syncNeeded()); + } + if (rarely()) { + translog.rollGeneration(); + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } + + if (randomBoolean()) { + translog.sync(); + assertFalse("translog has been synced already", translog.ensureSynced(location)); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + } + } + } + + public void testSyncUpLocationFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + ArrayList<Translog.Location> locations = new ArrayList<>(); + boolean shouldFailAlways = randomBoolean(); + for (int op = 0; op < translogOperations; op++) { + int seqNo = ++count; + final Translog.Location location = translog.add( + new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) + ); + if (shouldFailAlways) { + fail.failAlways(); + try { + translog.ensureSynced(location); + fail("io exception expected"); + } catch (IOException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } else { + fail.failNever(); + translog.ensureSynced(location); + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } + locations.add(location); + + } + // clean up + fail.failNever(); + + // writes should get synced up now + translog.sync(); + assertFalse(translog.syncNeeded()); + for (Translog.Location location : locations) { + assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); + } + + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertTrue(statsTracker.getUploadBytesStarted() > 0); + assertTrue(statsTracker.getTotalUploadsStarted() > 0); + + if (shouldFailAlways) { + assertTrue(statsTracker.getTotalUploadsFailed() > 0); + } else { + assertEquals(0, statsTracker.getTotalUploadsFailed()); + } + + assertTrue(statsTracker.getTotalUploadsSucceeded() > 0); + assertTrue(statsTracker.getLastSuccessfulUploadTimestamp() > 0); + assertDownloadStatsNoDownloads(statsTracker); + } + + public void testSyncUpAlwaysFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + for (int op = 0; op < translogOperations; op++) { + translog.add( + new Translog.Index(String.valueOf(op), count, primaryTerm.get(), Integer.toString(count).getBytes(StandardCharsets.UTF_8)) + ); + try { + translog.sync(); + fail("io exception expected"); + } catch (TranslogUploadFailedException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } + assertTrue(translog.isOpen()); + fail.failNever(); + translog.sync(); + } + + public void testSyncUpToStream() throws IOException { + int iters = randomIntBetween(5, 10); + for (int i = 0; i < iters; i++) { + int translogOperations = randomIntBetween(10, 100); + int count = 0; + ArrayList<Translog.Location> locations = new ArrayList<>(); + for (int op = 0; op < translogOperations; op++) { + if (rarely()) { + translog.rollGeneration(); + } + final Translog.Location location = translog.add( + new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) + ); + locations.add(location); + } + Collections.shuffle(locations, random()); + if (randomBoolean()) { + assertTrue("at least one operation pending", translog.syncNeeded()); + assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); + } else if (rarely()) { + translog.rollGeneration(); + // not syncing now + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } else { + translog.sync(); + assertFalse("translog has been synced already", translog.ensureSynced(locations.stream())); + } + + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + + for (Translog.Location location : locations) { + assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); + } + } + } + + public void testLocationComparison() throws IOException { + List<Translog.Location> locations = new ArrayList<>(); + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + locations.add( + translog.add( + new Translog.Index("" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))) + ) + ); + if (randomBoolean()) { + translog.ensureSynced(locations.get(op)); + } + if (rarely() && translogOperations > op + 1) { + translog.rollGeneration(); + } + } + Collections.shuffle(locations, random()); + Translog.Location max = locations.get(0); + for (Translog.Location location : locations) { + max = max(max, location); + } + + try (Translog.Snapshot snap = new TestTranslog.SortedSnapshot(translog.newSnapshot())) { + Translog.Operation next; + Translog.Operation maxOp = null; + while ((next = snap.next()) != null) { + maxOp = next; + } + assertNotNull(maxOp); + assertEquals(maxOp.getSource().source.utf8ToString(), Integer.toString(count)); + } + } + + public static Translog.Location max(Translog.Location a, Translog.Location b) { + if (a.compareTo(b) > 0) { + return a; + } + return b; + } + + public void testTranslogWriter() throws IOException { + final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1); + final Set<Long> persistedSeqNos = new HashSet<>(); + persistedSeqNoConsumer.set(persistedSeqNos::add); + final int numOps = scaledRandomIntBetween(8, 250000); + final Set<Long> seenSeqNos = new HashSet<>(); + boolean opsHaveValidSequenceNumbers = randomBoolean(); + for (int i = 0; i < numOps; i++) { + byte[] bytes = new byte[4]; + DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(i); + long seqNo; + do { + seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; + opsHaveValidSequenceNumbers = opsHaveValidSequenceNumbers || !rarely(); + } while (seenSeqNos.contains(seqNo)); + if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + seenSeqNos.add(seqNo); + } + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), seqNo); + } + assertThat(persistedSeqNos, empty()); + writer.sync(); + persistedSeqNos.remove(SequenceNumbers.UNASSIGNED_SEQ_NO); + assertEquals(seenSeqNos, persistedSeqNos); + + final BaseTranslogReader reader = randomBoolean() + ? writer + : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); + for (int i = 0; i < numOps; i++) { + ByteBuffer buffer = ByteBuffer.allocate(4); + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(i, value); + } + final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); + final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbers.NO_OPS_PERFORMED); + assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo)); + assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo)); + + byte[] bytes = new byte[4]; + DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(2048); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + + if (reader instanceof TranslogReader) { + ByteBuffer buffer = ByteBuffer.allocate(4); + try { + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * numOps); + fail("read past EOF?"); + } catch (EOFException ex) { + // expected + } + ((TranslogReader) reader).close(); + } else { + // live reader! + ByteBuffer buffer = ByteBuffer.allocate(4); + final long pos = reader.getFirstOperationOffset() + 4 * numOps; + reader.readBytes(buffer, pos); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(2048, value); + } + IOUtils.close(writer); + } + + public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { + final TranslogConfig config = getConfig(1); + final Set<Long> persistedSeqNos = new HashSet<>(); + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = getChannelFactory(); + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + + try ( + Translog translog = new RemoteFsTranslog( + config, + translogUUID, + new DefaultTranslogDeletionPolicy(-1, -1, 0), + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + TranslogWriter writer = translog.getCurrent(); + int initialWriteCalls = writeCalls.get(); + byte[] bytes = new byte[256]; + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 3); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 4); + assertThat(persistedSeqNos, empty()); + assertEquals(initialWriteCalls, writeCalls.get()); + + if (randomBoolean()) { + // Since the buffer is full, this will flush before performing the add. + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); + assertThat(persistedSeqNos, empty()); + assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); + } else { + // Will flush on read + writer.readBytes(ByteBuffer.allocate(256), 0); + assertThat(persistedSeqNos, empty()); + assertThat(writeCalls.get(), greaterThan(initialWriteCalls)); + + // Add after we the read flushed the buffer + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 5); + } + + writer.sync(); + + // Sequence numbers are marked as persisted after sync + assertThat(persistedSeqNos, contains(1L, 2L, 3L, 4L, 5L)); + } + } + + public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOException { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB), + "" + ); + + final Set<Long> persistedSeqNos = new HashSet<>(); + final AtomicInteger translogFsyncCalls = new AtomicInteger(); + final AtomicInteger checkpointFsyncCalls = new AtomicInteger(); + + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = new FilterFileChannel(delegate) { + @Override + public void force(boolean metaData) throws IOException { + checkpointFsyncCalls.incrementAndGet(); + } + }; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public void force(boolean metaData) throws IOException { + translogFsyncCalls.incrementAndGet(); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + + try ( + Translog translog = new RemoteFsTranslog( + config, + translogUUID, + new DefaultTranslogDeletionPolicy(-1, -1, 0), + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + TranslogWriter writer = translog.getCurrent(); + byte[] bytes = new byte[256]; + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 1); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 2); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 3); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), 4); + writer.sync(); + // Fsync is still enabled during empty translog creation. + assertEquals(2, checkpointFsyncCalls.get()); + assertEquals(1, translogFsyncCalls.get()); + // Sequence numbers are marked as persisted after sync + assertThat(persistedSeqNos, contains(1L, 2L, 3L, 4L)); + } + } + + public void testCloseIntoReader() throws IOException { + try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) { + final int numOps = randomIntBetween(8, 128); + for (int i = 0; i < numOps; i++) { + final byte[] bytes = new byte[4]; + final DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(bytes)); + out.writeInt(i); + writer.add(ReleasableBytesReference.wrap(new BytesArray(bytes)), randomNonNegativeLong()); + } + writer.sync(); + final Checkpoint writerCheckpoint = writer.getCheckpoint(); + TranslogReader reader = writer.closeIntoReader(); + try { + if (randomBoolean()) { + reader.close(); + reader = translog.openReader(reader.path(), writerCheckpoint); + } + for (int i = 0; i < numOps; i++) { + final ByteBuffer buffer = ByteBuffer.allocate(4); + reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); + buffer.flip(); + final int value = buffer.getInt(); + assertEquals(i, value); + } + final Checkpoint readerCheckpoint = reader.getCheckpoint(); + assertThat(readerCheckpoint, equalTo(writerCheckpoint)); + } finally { + IOUtils.close(reader); + } + } + } + + public void testDownloadWithRetries() throws IOException { + long generation = 1, primaryTerm = 1; + Path location = createTempDir(); + TranslogTransferMetadata translogTransferMetadata = new TranslogTransferMetadata(primaryTerm, generation, generation, 1); + Map<String, String> generationToPrimaryTermMapper = new HashMap<>(); + generationToPrimaryTermMapper.put(String.valueOf(generation), String.valueOf(primaryTerm)); + translogTransferMetadata.setGenerationToPrimaryTermMapper(generationToPrimaryTermMapper); + + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(translogTransferMetadata); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + // Always File not found + when(mockTransfer.downloadTranslog(any(), any(), any())).thenThrow(new NoSuchFileException("File not found")); + TranslogTransferManager finalMockTransfer = mockTransfer; + assertThrows(NoSuchFileException.class, () -> RemoteFsTranslog.download(finalMockTransfer, location, logger)); + + // File not found in first attempt . File found in second attempt. + mockTransfer = mock(TranslogTransferManager.class); + when(mockTransfer.readMetadata()).thenReturn(translogTransferMetadata); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + String msg = "File not found"; + Exception toThrow = randomBoolean() ? new NoSuchFileException(msg) : new FileNotFoundException(msg); + when(mockTransfer.downloadTranslog(any(), any(), any())).thenThrow(toThrow).thenReturn(true); + + AtomicLong downloadCounter = new AtomicLong(); + doAnswer(invocation -> { + if (downloadCounter.incrementAndGet() <= 1) { + throw new NoSuchFileException("File not found"); + } else if (downloadCounter.get() == 2) { + Files.createFile(location.resolve(Translog.getCommitCheckpointFileName(generation))); + } + return true; + }).when(mockTransfer).downloadTranslog(any(), any(), any()); + + // no exception thrown + RemoteFsTranslog.download(mockTransfer, location, logger); + } + + public class ThrowingBlobRepository extends FsRepository { + + private final Environment environment; + private final TestTranslog.FailSwitch fail; + private final TestTranslog.SlowDownWriteSwitch slowDown; + + public ThrowingBlobRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings, + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + this.environment = environment; + this.fail = fail; + this.slowDown = slowDown; + } + + protected BlobStore createBlobStore() throws Exception { + final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings()); + final Path locationFile = environment.resolveRepoFile(location); + return new ThrowingBlobStore(bufferSize, locationFile, isReadOnly(), fail, slowDown); + } + } + + private class ThrowingBlobStore extends FsBlobStore { + + private final TestTranslog.FailSwitch fail; + private final TestTranslog.SlowDownWriteSwitch slowDown; + + public ThrowingBlobStore( + int bufferSizeInBytes, + Path path, + boolean readonly, + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown + ) throws IOException { + super(bufferSizeInBytes, path, readonly); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + try { + return new ThrowingBlobContainer(this, path, buildAndCreate(path), fail, slowDown); + } catch (IOException ex) { + throw new OpenSearchException("failed to create blob container", ex); + } + } + } + + private class ThrowingBlobContainer extends FsBlobContainer { + + private TestTranslog.FailSwitch fail; + private final TestTranslog.SlowDownWriteSwitch slowDown; + + public ThrowingBlobContainer( + FsBlobStore blobStore, + BlobPath blobPath, + Path path, + TestTranslog.FailSwitch fail, + TestTranslog.SlowDownWriteSwitch slowDown + ) { + super(blobStore, blobPath, path); + this.fail = fail; + this.slowDown = slowDown; + } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + if (fail.fail()) { + throw new IOException("blob container throwing error"); + } + if (slowDown.getSleepSeconds() > 0) { + try { + Thread.sleep(slowDown.getSleepSeconds() * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); + } + } + + class TranslogThread extends Thread { + private final CountDownLatch downLatch; + private final int opsPerThread; + private final int threadId; + private final Collection<TestTranslog.LocationOperation> writtenOperations; + private final Exception[] threadExceptions; + private final Translog translog; + private final AtomicLong seqNoGenerator; + + TranslogThread( + Translog translog, + CountDownLatch downLatch, + int opsPerThread, + int threadId, + Collection<TestTranslog.LocationOperation> writtenOperations, + AtomicLong seqNoGenerator, + Exception[] threadExceptions + ) { + this.translog = translog; + this.downLatch = downLatch; + this.opsPerThread = opsPerThread; + this.threadId = threadId; + this.writtenOperations = writtenOperations; + this.seqNoGenerator = seqNoGenerator; + this.threadExceptions = threadExceptions; + } + + @Override + public void run() { + try { + downLatch.await(); + for (int opCount = 0; opCount < opsPerThread; opCount++) { + Translog.Operation op; + final Translog.Operation.Type type = randomFrom(Translog.Operation.Type.values()); + switch (type) { + case CREATE: + case INDEX: + op = new Translog.Index( + threadId + "_" + opCount, + seqNoGenerator.getAndIncrement(), + primaryTerm.get(), + randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8") + ); + break; + case DELETE: + op = new Translog.Delete( + threadId + "_" + opCount, + seqNoGenerator.getAndIncrement(), + primaryTerm.get(), + 1 + randomInt(100000) + ); + break; + case NO_OP: + op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16)); + break; + default: + throw new AssertionError("unsupported operation type [" + type + "]"); + } + + Translog.Location loc = add(op); + writtenOperations.add(new TestTranslog.LocationOperation(op, loc)); + if (rarely()) { // lets verify we can concurrently read this + assertEquals(op, translog.readOperation(loc)); + } + afterAdd(); + } + } catch (Exception t) { + threadExceptions[threadId] = t; + } + } + + protected Translog.Location add(Translog.Operation op) throws IOException { + Translog.Location location = translog.add(op); + if (randomBoolean()) { + translog.ensureSynced(location); + } + return location; + } + + protected void afterAdd() {} + } + +} diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java new file mode 100644 index 0000000000000..aa390cdba1275 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class RemoteTranslogStatsTests extends OpenSearchTestCase { + RemoteTranslogTransferTracker.Stats transferTrackerStats; + RemoteTranslogStats remoteTranslogStats; + + @Override + public void setUp() throws Exception { + super.setUp(); + transferTrackerStats = getRandomTransferTrackerStats(); + remoteTranslogStats = new RemoteTranslogStats(transferTrackerStats); + } + + public void testRemoteTranslogStatsCreationFromTransferTrackerStats() { + assertEquals(transferTrackerStats.totalUploadsStarted, remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(transferTrackerStats.totalUploadsSucceeded, remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(transferTrackerStats.totalUploadsFailed, remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(transferTrackerStats.uploadBytesStarted, remoteTranslogStats.getUploadBytesStarted()); + assertEquals(transferTrackerStats.uploadBytesSucceeded, remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(transferTrackerStats.uploadBytesFailed, remoteTranslogStats.getUploadBytesFailed()); + } + + public void testRemoteTranslogStatsSerialization() throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + remoteTranslogStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteTranslogStats remoteTranslogStatsFromStream = new RemoteTranslogStats(in); + assertEquals(remoteTranslogStats, remoteTranslogStatsFromStream); + } + } + } + + public void testAdd() { + RemoteTranslogTransferTracker.Stats otherTransferTrackerStats = getRandomTransferTrackerStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(otherTransferTrackerStats); + + otherRemoteTranslogStats.add(remoteTranslogStats); + + assertEquals( + otherRemoteTranslogStats.getTotalUploadsStarted(), + otherTransferTrackerStats.totalUploadsStarted + remoteTranslogStats.getTotalUploadsStarted() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsSucceeded(), + otherTransferTrackerStats.totalUploadsSucceeded + remoteTranslogStats.getTotalUploadsSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsFailed(), + otherTransferTrackerStats.totalUploadsFailed + remoteTranslogStats.getTotalUploadsFailed() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesStarted(), + otherTransferTrackerStats.uploadBytesStarted + remoteTranslogStats.getUploadBytesStarted() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesSucceeded(), + otherTransferTrackerStats.uploadBytesSucceeded + remoteTranslogStats.getUploadBytesSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesFailed(), + otherTransferTrackerStats.uploadBytesFailed + remoteTranslogStats.getUploadBytesFailed() + ); + } + + private static RemoteTranslogTransferTracker.Stats getRandomTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java index 7a8ff88079200..01c8844b51b02 100644 --- a/server/src/test/java/org/opensearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/opensearch/index/translog/TestTranslog.java @@ -34,10 +34,11 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -334,6 +335,18 @@ public void onceFailedFailAlways() { } } + static class SlowDownWriteSwitch { + private volatile int sleepSeconds; + + public void setSleepSeconds(int sleepSeconds) { + this.sleepSeconds = sleepSeconds; + } + + public int getSleepSeconds() { + return sleepSeconds; + } + } + static class SortedSnapshot implements Translog.Snapshot { private final Translog.Snapshot snapshot; private List<Translog.Operation> operations = null; diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java index d086732343bd2..91d51ffd105f0 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogDeletionPolicyTests.java @@ -36,12 +36,11 @@ import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; -import org.mockito.Mockito; import java.io.IOException; import java.nio.channels.FileChannel; @@ -50,6 +49,8 @@ import java.util.ArrayList; import java.util.List; +import org.mockito.Mockito; + import static java.lang.Math.min; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java index 4441e30ea639d..a5d6ee7a06e23 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogHeaderTests.java @@ -132,6 +132,49 @@ public void testHeaderWithoutPrimaryTerm() throws Exception { }); } + public void testCurrentHeaderVersionWithoutUUIDComparison() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final TranslogHeader outHeader = new TranslogHeader(translogUUID, randomNonNegativeLong()); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + outHeader.write(channel, true); + assertThat(outHeader.sizeInBytes(), equalTo((int) channel.position())); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(outHeader.getPrimaryTerm())); + assertThat(inHeader.sizeInBytes(), equalTo((int) channel.position())); + } + + TestTranslog.corruptFile(logger, random(), translogFile, false); + final TranslogCorruptedException corruption = expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader translogHeader = TranslogHeader.read(translogFile, channel); + assertThat( + "version " + TranslogHeader.VERSION_CHECKPOINTS + " translog", + translogHeader.getPrimaryTerm(), + equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM) + ); + throw new TranslogCorruptedException(translogFile.toString(), "adjusted translog version"); + } catch (IllegalStateException e) { + // corruption corrupted the version byte making this look like a v2, v1 or v0 translog + assertThat( + "version " + TranslogHeader.VERSION_CHECKPOINTS + "-or-earlier translog", + e.getMessage(), + anyOf( + containsString("pre-2.0 translog found"), + containsString("pre-1.4 translog found"), + containsString("pre-6.3 translog found") + ) + ); + throw new TranslogCorruptedException(translogFile.toString(), "adjusted translog version", e); + } + }); + assertThat(corruption.getMessage(), not(containsString("this translog file belongs to a different translog"))); + } + static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException { final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel)); CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS); diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 91694060617ab..e17d2770f014a 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -14,34 +14,34 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.AllocationId; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.Mapping; +import org.opensearch.index.mapper.ParseContext; import org.opensearch.index.mapper.ParsedDocument; -import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.SeqNoFieldMapper; +import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.Uid; -import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.nio.charset.Charset; @@ -74,7 +74,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -182,7 +182,7 @@ protected static ParsedDocument testParsedDocument( } else { document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); } - return new ParsedDocument(versionField, seqID, id, routing, List.of(document), source, XContentType.JSON, mappingUpdate); + return new ParsedDocument(versionField, seqID, id, routing, List.of(document), source, MediaTypeRegistry.JSON, mappingUpdate); } protected static ParseContext.Document testDocumentWithTextField() { diff --git a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java index 68dc63aa4b52e..dc56417ea9720 100644 --- a/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/listener/TranslogListenerTests.java @@ -13,11 +13,11 @@ import org.opensearch.test.OpenSearchTestCase; import java.lang.reflect.Proxy; -import java.util.List; -import java.util.Collections; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.LinkedList; +import java.util.List; import java.util.concurrent.atomic.AtomicInteger; public class TranslogListenerTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java index 1175716679d0f..a806eea381297 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java @@ -8,14 +8,13 @@ package org.opensearch.index.translog.transfer; -import org.mockito.Mockito; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -31,6 +30,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import org.mockito.Mockito; + import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -59,7 +60,7 @@ public void testUploadBlobs() throws Exception { 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); Mockito.doAnswer(invocation -> { ActionListener<Void> completionListener = invocation.getArgument(1); completionListener.onResponse(null); @@ -106,7 +107,7 @@ public void testUploadBlobsIOException() throws Exception { 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); doThrow(new IOException()).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); @@ -145,7 +146,7 @@ public void testUploadBlobsUploadFutureCompletedExceptionally() throws Exception 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); Mockito.doAnswer(invocation -> { ActionListener<Void> completionListener = invocation.getArgument(1); completionListener.onFailure(new Exception("Test exception")); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index 5502dc3089c62..e4f5a454b15f6 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -8,13 +8,13 @@ package org.opensearch.index.translog.transfer; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.indices.recovery.RecoverySettings; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java index 8d07af5927135..2d75851e888a5 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileSnapshotTests.java @@ -8,8 +8,8 @@ package org.opensearch.index.translog.transfer; -import org.junit.After; import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java index 1914790ac58d2..b96ada1f6bbff 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java @@ -9,27 +9,33 @@ package org.opensearch.index.translog.transfer; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.HashSet; import java.util.List; +import java.util.Set; public class FileTransferTrackerTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); FileTransferTracker fileTransferTracker; + RemoteTranslogTransferTracker remoteTranslogTransferTracker; @Override public void setUp() throws Exception { super.setUp(); + remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 20); + fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); } public void testOnSuccess() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); + int fileSize = 128; Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( @@ -38,11 +44,17 @@ public void testOnSuccess() throws IOException { null ) ) { + Set<FileSnapshot.TransferFileSnapshot> toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); // idempotent + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); assertEquals(fileTransferTracker.allUploaded().size(), 1); try { + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); fail("failure after succcess invalid"); } catch (IllegalStateException ex) { @@ -52,10 +64,10 @@ public void testOnSuccess() throws IOException { } public void testOnFailure() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); Path testFile2 = createTempFile(); - Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + int fileSize = 128; + Files.write(testFile, randomByteArrayOfLength(fileSize), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( testFile, @@ -66,30 +78,37 @@ public void testOnFailure() throws IOException { testFile2, randomNonNegativeLong(), null - ) + ); ) { - + Set<FileSnapshot.TransferFileSnapshot> toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + toUpload.add(transferFileSnapshot2); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); fileTransferTracker.onSuccess(transferFileSnapshot2); assertEquals(fileTransferTracker.allUploaded().size(), 1); - + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); assertEquals(fileTransferTracker.allUploaded().size(), 2); } } public void testUploaded() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); - Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + int fileSize = 128; + Files.write(testFile, randomByteArrayOfLength(fileSize), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( testFile, randomNonNegativeLong(), null ); - ) { + Set<FileSnapshot.TransferFileSnapshot> toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); String fileName = String.valueOf(testFile.getFileName()); assertTrue(fileTransferTracker.uploaded(fileName)); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index b7091f3f4f8a6..e34bc078896f9 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -9,17 +9,19 @@ package org.opensearch.index.translog.transfer; import org.apache.lucene.tests.util.LuceneTestCase; -import org.mockito.Mockito; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -34,13 +36,19 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; +import org.mockito.Mockito; + +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anySet; @@ -62,6 +70,12 @@ public class TranslogTransferManagerTests extends OpenSearchTestCase { private long primaryTerm; private long generation; private long minTranslogGeneration; + private RemoteTranslogTransferTracker remoteTranslogTransferTracker; + byte[] tlogBytes; + byte[] ckpBytes; + FileTransferTracker tracker; + TranslogTransferManager translogTransferManager; + long delayForBlobDownload; @Override public void setUp() throws Exception { @@ -74,6 +88,28 @@ public void setUp() throws Exception { remoteBaseTransferPath = new BlobPath().add("base_path"); transferService = mock(TransferService.class); threadPool = new TestThreadPool(getClass().getName()); + remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 20); + tlogBytes = "Hello Translog".getBytes(StandardCharsets.UTF_8); + ckpBytes = "Hello Checkpoint".getBytes(StandardCharsets.UTF_8); + tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + tracker, + remoteTranslogTransferTracker + ); + + delayForBlobDownload = 1; + when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new ByteArrayInputStream(tlogBytes); + }); + + when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new ByteArrayInputStream(ckpBytes); + }); } @Override @@ -102,7 +138,10 @@ public void testTransferSnapshot() throws Exception { return null; }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); - FileTransferTracker fileTransferTracker = new FileTransferTracker(new ShardId("index", "indexUUid", 0)) { + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { @Override public void onSuccess(TransferFileSnapshot fileSnapshot) { fileTransferSucceeded.incrementAndGet(); @@ -121,7 +160,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { shardId, transferService, remoteBaseTransferPath, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -142,6 +182,93 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { assertEquals(4, fileTransferTracker.allUploaded().size()); } + public void testTransferSnapshotOnUploadTimeout() throws Exception { + doAnswer(invocationOnMock -> { + Thread.sleep(31 * 1000); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce<Exception> exception = new SetOnce<>(); + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Timed out waiting for transfer of snapshot test-to-string to complete", exception.get().getMessage()); + } + + public void testTransferSnapshotOnThreadInterrupt() throws Exception { + SetOnce<Thread> uploadThread = new SetOnce<>(); + doAnswer(invocationOnMock -> { + uploadThread.set(new Thread(() -> { + ActionListener<TransferFileSnapshot> listener = invocationOnMock.getArgument(2); + try { + Thread.sleep(31 * 1000); + } catch (InterruptedException ignore) { + List<TransferFileSnapshot> list = new ArrayList<>(invocationOnMock.getArgument(0)); + listener.onFailure(new FileTransferException(list.get(0), ignore)); + } + })); + uploadThread.get().start(); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce<Exception> exception = new SetOnce<>(); + + Thread thread = new Thread(() -> { + try { + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + thread.start(); + + Thread.sleep(1000); + // Interrupt the thread + thread.interrupt(); + assertBusy(() -> { + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Failed to upload test-to-string", exception.get().getMessage()); + }); + uploadThread.get().interrupt(); + } + private TransferSnapshot createTransferSnapshot() { return new TransferSnapshot() { @Override @@ -194,6 +321,11 @@ public Set<TransferFileSnapshot> getTranslogFileSnapshots() { public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); } + + @Override + public String toString() { + return "test-to-string"; + } }; } @@ -202,42 +334,56 @@ public void testReadMetadataNoFile() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); doAnswer(invocation -> { - LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(2); + LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); List<BlobMetadata> bmList = new LinkedList<>(); latchedActionListener.onResponse(bmList); return null; - }).when(transferService).listAllInSortedOrder(any(BlobPath.class), anyInt(), any(ActionListener.class)); + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); assertNull(translogTransferManager.readMetadata()); + assertNoDownloadStats(false); } - // This should happen most of the time - Just a single metadata file - public void testReadMetadataSingleFile() throws IOException { + // This should happen most of the time - + public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); - String mdFilename = tm.getFileName(); + TranslogTransferMetadata metadata1 = new TranslogTransferMetadata(1, 1, 1, 2); + String mdFilename1 = metadata1.getFileName(); + + TranslogTransferMetadata metadata2 = new TranslogTransferMetadata(1, 0, 1, 2); + String mdFilename2 = metadata2.getFileName(); doAnswer(invocation -> { - LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(2); + LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); List<BlobMetadata> bmList = new LinkedList<>(); - bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename1, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); latchedActionListener.onResponse(bmList); return null; - }).when(transferService).listAllInSortedOrder(any(BlobPath.class), anyInt(), any(ActionListener.class)); + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); TranslogTransferMetadata metadata = createTransferSnapshot().getTranslogTransferMetadata(); - when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenReturn( - new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)) - ); + long delayForMdDownload = 1; + when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename1))).thenAnswer(invocation -> { + Thread.sleep(delayForMdDownload); + return new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)); + }); assertEquals(metadata, translogTransferManager.readMetadata()); + + assertEquals(translogTransferManager.getMetadataBytes(metadata).length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= delayForMdDownload); } public void testReadMetadataReadException() throws IOException { @@ -245,23 +391,25 @@ public void testReadMetadataReadException() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); String mdFilename = tm.getFileName(); doAnswer(invocation -> { - LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(2); + LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); List<BlobMetadata> bmList = new LinkedList<>(); bmList.add(new PlainBlobMetadata(mdFilename, 1)); latchedActionListener.onResponse(bmList); return null; - }).when(transferService).listAllInSortedOrder(any(BlobPath.class), anyInt(), any(ActionListener.class)); + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenThrow(new IOException("Something went wrong")); assertThrows(IOException.class, translogTransferManager::readMetadata); + assertNoDownloadStats(true); } public void testMetadataFileNameOrder() throws IOException { @@ -279,93 +427,53 @@ public void testReadMetadataListException() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); doAnswer(invocation -> { - LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(2); + LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Issue while listing")); return null; - }).when(transferService).listAllInSortedOrder(any(BlobPath.class), anyInt(), any(ActionListener.class)); + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); when(transferService.downloadBlob(any(BlobPath.class), any(String.class))).thenThrow(new IOException("Something went wrong")); assertThrows(IOException.class, translogTransferManager::readMetadata); + assertNoDownloadStats(false); } public void testDownloadTranslog() throws IOException { Path location = createTempDir(); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - new FileTransferTracker(new ShardId("index", "indexUuid", 0)) - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - assertFalse(Files.exists(location.resolve("translog-23.tlog"))); assertFalse(Files.exists(location.resolve("translog-23.ckp"))); translogTransferManager.downloadTranslog("12", "23", location); assertTrue(Files.exists(location.resolve("translog-23.tlog"))); assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStats(); } public void testDownloadTranslogAlreadyExists() throws IOException { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); Path location = createTempDir(); Files.createFile(location.resolve("translog-23.tlog")); Files.createFile(location.resolve("translog-23.ckp")); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - tracker - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - translogTransferManager.downloadTranslog("12", "23", location); verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); assertTrue(Files.exists(location.resolve("translog-23.tlog"))); assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStats(); } public void testDownloadTranslogWithTrackerUpdated() throws IOException { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); Path location = createTempDir(); String translogFile = "translog-23.tlog", checkpointFile = "translog-23.ckp"; Files.createFile(location.resolve(translogFile)); Files.createFile(location.resolve(checkpointFile)); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - tracker - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq(translogFile))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - when(transferService.downloadBlob(any(BlobPath.class), eq(checkpointFile))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - translogTransferManager.downloadTranslog("12", "23", location); verify(transferService).downloadBlob(any(BlobPath.class), eq(translogFile)); @@ -380,10 +488,10 @@ public void testDownloadTranslogWithTrackerUpdated() throws IOException { // Since the tracker already holds the files with success state, adding them with success state is allowed tracker.add(translogFile, true); tracker.add(checkpointFile, true); + assertTlogCkpDownloadStats(); } public void testDeleteTranslogSuccess() throws Exception { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); @@ -392,7 +500,8 @@ public void testDeleteTranslogSuccess() throws Exception { shardId, blobStoreTransferService, remoteBaseTransferPath, - tracker + tracker, + remoteTranslogTransferTracker ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -410,13 +519,14 @@ public void testDeleteStaleTranslogMetadata() { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); String tm1 = new TranslogTransferMetadata(1, 1, 1, 2).getFileName(); String tm2 = new TranslogTransferMetadata(1, 2, 1, 2).getFileName(); String tm3 = new TranslogTransferMetadata(2, 3, 1, 2).getFileName(); doAnswer(invocation -> { - ActionListener<List<BlobMetadata>> actionListener = invocation.getArgument(3); + ActionListener<List<BlobMetadata>> actionListener = invocation.getArgument(4); List<BlobMetadata> bmList = new LinkedList<>(); bmList.add(new PlainBlobMetadata(tm1, 1)); bmList.add(new PlainBlobMetadata(tm2, 1)); @@ -424,12 +534,19 @@ public void testDeleteStaleTranslogMetadata() { actionListener.onResponse(bmList); return null; }).when(transferService) - .listAllInSortedOrderAsync(eq(ThreadPool.Names.REMOTE_PURGE), any(BlobPath.class), anyInt(), any(ActionListener.class)); + .listAllInSortedOrderAsync( + eq(ThreadPool.Names.REMOTE_PURGE), + any(BlobPath.class), + eq(TranslogTransferMetadata.METADATA_PREFIX), + anyInt(), + any(ActionListener.class) + ); List<String> files = List.of(tm2, tm3); translogTransferManager.deleteStaleTranslogMetadataFilesAsync(() -> { verify(transferService).listAllInSortedOrderAsync( eq(ThreadPool.Names.REMOTE_PURGE), any(BlobPath.class), + eq(TranslogTransferMetadata.METADATA_PREFIX), eq(Integer.MAX_VALUE), any() ); @@ -443,7 +560,7 @@ public void testDeleteStaleTranslogMetadata() { } public void testDeleteTranslogFailure() throws Exception { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); + FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); doAnswer(invocation -> { throw new IOException("test exception"); }).when(blobStore).blobContainer(any(BlobPath.class)); @@ -453,7 +570,8 @@ public void testDeleteTranslogFailure() throws Exception { shardId, blobStoreTransferService, remoteBaseTransferPath, - tracker + tracker, + remoteTranslogTransferTracker ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -463,4 +581,60 @@ public void testDeleteTranslogFailure() throws Exception { translogTransferManager.deleteGenerationAsync(primaryTerm, Set.of(19L), () -> {}); assertEquals(2, tracker.allUploaded().size()); } + + private void assertNoDownloadStats(boolean nonZeroUploadTime) { + assertEquals(0, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + assertEquals(0, remoteTranslogTransferTracker.getTotalDownloadsSucceeded()); + assertEquals(0, remoteTranslogTransferTracker.getLastSuccessfulDownloadTimestamp()); + if (nonZeroUploadTime) { + assertNotEquals(0, remoteTranslogTransferTracker.getTotalDownloadTimeInMillis()); + } else { + assertEquals(0, remoteTranslogTransferTracker.getTotalDownloadTimeInMillis()); + } + } + + private void assertTlogCkpDownloadStats() { + assertEquals(tlogBytes.length + ckpBytes.length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + // Expect delay for both tlog and ckp file + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= 2 * delayForBlobDownload); + } + + public void testGetPrimaryTermAndGeneration() { + String nodeId = UUID.randomUUID().toString(); + String tm = new TranslogTransferMetadata(1, 2, 1, 2, nodeId).getFileName(); + Tuple<Tuple<Long, Long>, String> actualOutput = TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm); + assertEquals(1L, (long) (actualOutput.v1().v1())); + assertEquals(2L, (long) (actualOutput.v1().v2())); + assertEquals(String.valueOf(Objects.hash(nodeId)), actualOutput.v2()); + } + + public void testMetadataConflict() throws InterruptedException { + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + null, + remoteTranslogTransferTracker + ); + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(10, count); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + String mdFilename2 = tm2.getFileName(); + + doAnswer(invocation -> { + LatchedActionListener<List<BlobMetadata>> latchedActionListener = invocation.getArgument(3); + List<BlobMetadata> bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + latchedActionListener.onResponse(bmList); + return null; + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); + + assertThrows(RuntimeException.class, translogTransferManager::readMetadata); + } } diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerTests.java index 9b27e8ee8f3f5..b99479df9c15e 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferMetadataHandlerTests.java @@ -10,11 +10,11 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.OutputStreamIndexOutput; -import org.junit.Before; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/opensearch/indices/IndexingMemoryControllerTests.java index 62984f43744c8..c91ed00547bff 100644 --- a/server/src/test/java/org/opensearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/opensearch/indices/IndexingMemoryControllerTests.java @@ -36,9 +36,9 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.InternalEngine; @@ -367,7 +367,7 @@ public void testThrottling() throws Exception { public void testTranslogRecoveryWorksWithIMC() throws IOException { IndexShard shard = newStartedShard(true); for (int i = 0; i < 100; i++) { - indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}", XContentType.JSON, null); + indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}", MediaTypeRegistry.JSON, null); } shard.close("simon says", false, false); AtomicReference<IndexShard> shardRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index c9bb263bf785e..5e6398da6fa1b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -40,13 +40,13 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index 9c8ad3917c23f..c54dfa0fab277 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -32,8 +32,8 @@ package org.opensearch.indices; -import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IgnoredFieldMapper; diff --git a/server/src/test/java/org/opensearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesQueryCacheTests.java index 5590c529cc9fd..ba40343fb2130 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesQueryCacheTests.java @@ -51,10 +51,10 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -151,7 +151,7 @@ public void testBasics() throws IOException { assertEquals(1L, stats.getCacheSize()); assertEquals(1L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); - assertEquals(1L, stats.getMissCount()); + assertEquals(2L, stats.getMissCount()); assertTrue(stats.getMemorySizeInBytes() > 0L && stats.getMemorySizeInBytes() < Long.MAX_VALUE); for (int i = 1; i < 20; ++i) { @@ -162,7 +162,7 @@ public void testBasics() throws IOException { assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(0L, stats.getHitCount()); - assertEquals(20L, stats.getMissCount()); + assertEquals(40L, stats.getMissCount()); assertTrue(stats.getMemorySizeInBytes() > 0L && stats.getMemorySizeInBytes() < Long.MAX_VALUE); s.count(new DummyQuery(10)); @@ -171,7 +171,7 @@ public void testBasics() throws IOException { assertEquals(10L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); - assertEquals(20L, stats.getMissCount()); + assertEquals(40L, stats.getMissCount()); assertTrue(stats.getMemorySizeInBytes() > 0L && stats.getMemorySizeInBytes() < Long.MAX_VALUE); IOUtils.close(r, dir); @@ -181,7 +181,7 @@ public void testBasics() throws IOException { assertEquals(0L, stats.getCacheSize()); assertEquals(20L, stats.getCacheCount()); assertEquals(1L, stats.getHitCount()); - assertEquals(20L, stats.getMissCount()); + assertEquals(40L, stats.getMissCount()); assertTrue(stats.getMemorySizeInBytes() > 0L && stats.getMemorySizeInBytes() < Long.MAX_VALUE); cache.onClose(shard); @@ -232,7 +232,7 @@ public void testTwoShards() throws IOException { assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); - assertEquals(1L, stats1.getMissCount()); + assertEquals(2L, stats1.getMissCount()); assertTrue(stats1.getMemorySizeInBytes() >= 0L && stats1.getMemorySizeInBytes() < Long.MAX_VALUE); QueryCacheStats stats2 = cache.getStats(shard2); @@ -248,14 +248,14 @@ public void testTwoShards() throws IOException { assertEquals(1L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); - assertEquals(1L, stats1.getMissCount()); + assertEquals(2L, stats1.getMissCount()); assertTrue(stats1.getMemorySizeInBytes() >= 0L && stats1.getMemorySizeInBytes() < Long.MAX_VALUE); stats2 = cache.getStats(shard2); assertEquals(1L, stats2.getCacheSize()); assertEquals(1L, stats2.getCacheCount()); assertEquals(0L, stats2.getHitCount()); - assertEquals(1L, stats2.getMissCount()); + assertEquals(2L, stats2.getMissCount()); assertTrue(stats2.getMemorySizeInBytes() >= 0L && stats2.getMemorySizeInBytes() < Long.MAX_VALUE); for (int i = 0; i < 20; ++i) { @@ -266,14 +266,14 @@ public void testTwoShards() throws IOException { assertEquals(0L, stats1.getCacheSize()); // evicted assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); - assertEquals(1L, stats1.getMissCount()); + assertEquals(2L, stats1.getMissCount()); assertTrue(stats1.getMemorySizeInBytes() >= 0L && stats1.getMemorySizeInBytes() < Long.MAX_VALUE); stats2 = cache.getStats(shard2); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); - assertEquals(20L, stats2.getMissCount()); + assertEquals(40L, stats2.getMissCount()); assertTrue(stats2.getMemorySizeInBytes() >= 0L && stats2.getMemorySizeInBytes() < Long.MAX_VALUE); IOUtils.close(r1, dir1); @@ -283,14 +283,14 @@ public void testTwoShards() throws IOException { assertEquals(0L, stats1.getCacheSize()); assertEquals(1L, stats1.getCacheCount()); assertEquals(0L, stats1.getHitCount()); - assertEquals(1L, stats1.getMissCount()); + assertEquals(2L, stats1.getMissCount()); assertTrue(stats1.getMemorySizeInBytes() >= 0L && stats1.getMemorySizeInBytes() < Long.MAX_VALUE); stats2 = cache.getStats(shard2); assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); - assertEquals(20L, stats2.getMissCount()); + assertEquals(40L, stats2.getMissCount()); assertTrue(stats2.getMemorySizeInBytes() >= 0L && stats2.getMemorySizeInBytes() < Long.MAX_VALUE); cache.onClose(shard1); @@ -307,7 +307,7 @@ public void testTwoShards() throws IOException { assertEquals(10L, stats2.getCacheSize()); assertEquals(20L, stats2.getCacheCount()); assertEquals(1L, stats2.getHitCount()); - assertEquals(20L, stats2.getMissCount()); + assertEquals(40L, stats2.getMissCount()); assertTrue(stats2.getMemorySizeInBytes() >= 0L && stats2.getMemorySizeInBytes() < Long.MAX_VALUE); IOUtils.close(r2, dir2); @@ -388,8 +388,10 @@ public void testStatsOnEviction() throws IOException { private static class DummyWeight extends Weight { private final Weight weight; + private final int randCount = randomIntBetween(0, Integer.MAX_VALUE); private boolean scorerCalled; private boolean scorerSupplierCalled; + private boolean countCalled; DummyWeight(Weight weight) { super(weight.getQuery()); @@ -413,6 +415,12 @@ public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOExcepti return weight.scorerSupplier(context); } + @Override + public int count(LeafReaderContext context) throws IOException { + countCalled = true; + return randCount; + } + @Override public boolean isCacheable(LeafReaderContext ctx) { return true; @@ -458,4 +466,26 @@ public void onUse(Query query) {} cache.onClose(shard); cache.close(); } + + public void testDelegatesCount() throws Exception { + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); + w.addDocument(new Document()); + DirectoryReader r = DirectoryReader.open(w); + w.close(); + ShardId shard = new ShardId("index", "_na_", 0); + r = OpenSearchDirectoryReader.wrap(r, shard); + IndexSearcher s = new IndexSearcher(r); + IndicesQueryCache cache = new IndicesQueryCache(Settings.EMPTY); + s.setQueryCache(cache); + Query query = new MatchAllDocsQuery(); + final DummyWeight weight = new DummyWeight(s.createWeight(s.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f)); + final Weight cached = cache.doCache(weight, s.getQueryCachingPolicy()); + assertFalse(weight.countCalled); + assertEquals(weight.randCount, cached.count(s.getIndexReader().leaves().get(0))); + assertTrue(weight.countCalled); + IOUtils.close(r, dir); + cache.onClose(shard); + cache.close(); + } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index a06688150a38a..b9cbbb2c65162 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -46,43 +46,61 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedSupplier; -import org.opensearch.core.common.bytes.AbstractBytesReference; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.AbstractBytesReference; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentHelper; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.IndexService; +import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardState; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.Optional; +import java.util.UUID; -public class IndicesRequestCacheTests extends OpenSearchTestCase { +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndicesRequestCacheTests extends OpenSearchSingleNodeTestCase { public void testBasicOperationsCache() throws Exception { - ShardRequestCache requestCacheStats = new ShardRequestCache(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.EMPTY, + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService() + ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); - AtomicBoolean indexShard = new AtomicBoolean(true); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = indexShard.requestCache(); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -90,10 +108,11 @@ public void testBasicOperationsCache() throws Exception { assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = indexShard.requestCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -106,7 +125,68 @@ public void testBasicOperationsCache() throws Exception { if (randomBoolean()) { reader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open + cache.clear(entity); + } + cache.cleanCache(); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertTrue(loader.loadedFromCache); + assertEquals(0, cache.count()); + assertEquals(0, requestCacheStats.stats().getMemorySize().bytesAsInt()); + + IOUtils.close(reader, writer, dir, cache); + assertEquals(0, cache.numRegisteredCloseListeners()); + } + + public void testBasicOperationsCacheWithFeatureFlag() throws Exception { + IndexShard indexShard = createIndex("test").getShard(0); + CacheService cacheService = new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService(); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.PLUGGABLE_CACHE, "true").build(), + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + cacheService + ); + Directory dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); + + writer.addDocument(newDoc(0, "foo")); + DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + + // initial cache + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); + Loader loader = new Loader(reader, 0); + BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = indexShard.requestCache(); + assertEquals(0, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertFalse(loader.loadedFromCache); + assertEquals(1, cache.count()); + + // cache hit + entity = new IndicesService.IndexShardCacheEntity(indexShard); + loader = new Loader(reader, 0); + value = cache.getOrCompute(entity, loader, reader, termBytes); + assertEquals("foo", value.streamInput().readString()); + requestCacheStats = indexShard.requestCache(); + assertEquals(1, requestCacheStats.stats().getHitCount()); + assertEquals(1, requestCacheStats.stats().getMissCount()); + assertEquals(0, requestCacheStats.stats().getEvictions()); + assertTrue(loader.loadedFromCache); + assertEquals(1, cache.count()); + assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > value.length()); + assertEquals(1, cache.numRegisteredCloseListeners()); + + // Closing the cache doesn't modify an already returned CacheEntity + if (randomBoolean()) { + reader.close(); + } else { + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } cache.cleanCache(); @@ -122,16 +202,24 @@ public void testBasicOperationsCache() throws Exception { } public void testCacheDifferentReaders() throws Exception { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService()); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); if (randomBoolean()) { writer.flush(); IOUtils.close(writer); @@ -141,9 +229,10 @@ public void testCacheDifferentReaders() throws Exception { DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + ShardRequestCache requestCacheStats = entity.stats(); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); @@ -155,9 +244,10 @@ public void testCacheDifferentReaders() throws Exception { assertEquals(1, cache.numRegisteredCloseListeners()); // cache the second - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); value = cache.getOrCompute(entity, loader, secondReader, termBytes); + requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); @@ -167,9 +257,10 @@ public void testCacheDifferentReaders() throws Exception { assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length()); assertEquals(2, cache.numRegisteredCloseListeners()); - secondEntity = new TestEntity(requestCacheStats, indexShard); + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); value = cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); @@ -177,10 +268,11 @@ public void testCacheDifferentReaders() throws Exception { assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(2, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -201,7 +293,7 @@ public void testCacheDifferentReaders() throws Exception { if (randomBoolean()) { secondReader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(secondEntity); } cache.cleanCache(); @@ -218,110 +310,125 @@ public void testCacheDifferentReaders() throws Exception { public void testEviction() throws Exception { final ByteSizeValue size; { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.EMPTY, + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService() + ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - size = requestCacheStats.stats().getMemorySize(); + size = indexShard.requestCache().stats().getMemorySize(); IOUtils.close(reader, secondReader, writer, dir, cache); } + IndexShard indexShard = createIndex("test1").getShard(0); IndicesRequestCache cache = new IndicesRequestCache( - Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 1 + "b").build() + Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 1 + "b").build(), + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))), + new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService() ); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity thirddEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader thirdLoader = new Loader(thirdReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); + logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); assertEquals("baz", value3.streamInput().readString()); assertEquals(2, cache.count()); - assertEquals(1, requestCacheStats.stats().getEvictions()); + assertEquals(1, indexShard.requestCache().stats().getEvictions()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); } public void testClearAllEntityIdentity() throws Exception { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService()); - ShardRequestCache requestCacheStats = new ShardRequestCache(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - AtomicBoolean differentIdentity = new AtomicBoolean(true); - TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity); + IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(createIndex("test1").getShard(0)); Loader thirdLoader = new Loader(thirdReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); + logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); - final long hitCount = requestCacheStats.stats().getHitCount(); + RequestCacheStats requestCacheStats = entity.stats().stats(); + requestCacheStats.add(thirddEntity.stats().stats()); + final long hitCount = requestCacheStats.getHitCount(); // clear all for the indexShard Idendity even though is't still open cache.clear(randomFrom(entity, secondEntity)); cache.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); - assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount()); + requestCacheStats = entity.stats().stats(); + requestCacheStats.add(thirddEntity.stats().stats()); + assertEquals(hitCount + 1, requestCacheStats.getHitCount()); assertEquals("baz", value3.streamInput().readString()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); @@ -365,22 +472,31 @@ public BytesReference get() { } public void testInvalidate() throws Exception { - ShardRequestCache requestCacheStats = new ShardRequestCache(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + }), new CacheModule(new ArrayList<>(), Settings.EMPTY).getCacheService()); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); writer.addDocument(newDoc(0, "foo")); DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); - BytesReference termBytes = XContentHelper.toXContent(termQuery, XContentType.JSON, false); - AtomicBoolean indexShard = new AtomicBoolean(true); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = entity.stats(); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -388,10 +504,11 @@ public void testInvalidate() throws Exception { assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -401,11 +518,12 @@ public void testInvalidate() throws Exception { assertEquals(1, cache.numRegisteredCloseListeners()); // load again after invalidate - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); cache.invalidate(entity, reader, termBytes); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -418,7 +536,7 @@ public void testInvalidate() throws Exception { if (randomBoolean()) { reader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } cache.cleanCache(); @@ -433,22 +551,25 @@ public void testInvalidate() throws Exception { } public void testEqualsKey() throws IOException { - AtomicBoolean trueBoolean = new AtomicBoolean(true); - AtomicBoolean falseBoolean = new AtomicBoolean(false); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(); IndexWriter writer = new IndexWriter(dir, config); - IndexReader reader1 = DirectoryReader.open(writer); - IndexReader.CacheKey rKey1 = reader1.getReaderCacheHelper().getKey(); + ShardId shardId = new ShardId("foo", "bar", 1); + ShardId shardId1 = new ShardId("foo1", "bar1", 2); + IndexReader reader1 = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + String rKey1 = ((OpenSearchDirectoryReader) reader1).getDelegatingCacheHelper().getDelegatingCacheKey().getId(); writer.addDocument(new Document()); - IndexReader reader2 = DirectoryReader.open(writer); - IndexReader.CacheKey rKey2 = reader2.getReaderCacheHelper().getKey(); + IndexReader reader2 = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + String rKey2 = ((OpenSearchDirectoryReader) reader2).getDelegatingCacheHelper().getDelegatingCacheKey().getId(); IOUtils.close(reader1, reader2, writer, dir); - IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key3 = new IndicesRequestCache.Key(new TestEntity(null, falseBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key4 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey2, new TestBytesReference(1)); - IndicesRequestCache.Key key5 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(2)); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key3 = new IndicesRequestCache.Key(shardId1, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key4 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey2); + IndicesRequestCache.Key key5 = new IndicesRequestCache.Key(shardId, new TestBytesReference(2), rKey2); String s = "Some other random object"; assertEquals(key1, key1); assertEquals(key1, key2); @@ -459,6 +580,29 @@ public void testEqualsKey() throws IOException { assertNotEquals(key1, key5); } + public void testSerializationDeserializationOfCacheKey() throws Exception { + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + IndexService indexService = createIndex("test"); + IndexShard indexShard = indexService.getShard(0); + IndicesService.IndexShardCacheEntity shardCacheEntity = new IndicesService.IndexShardCacheEntity(indexShard); + String readerCacheKeyId = UUID.randomUUID().toString(); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(indexShard.shardId(), termBytes, readerCacheKeyId); + BytesReference bytesReference = null; + try (BytesStreamOutput out = new BytesStreamOutput()) { + key1.writeTo(out); + bytesReference = out.bytes(); + } + StreamInput in = bytesReference.streamInput(); + + IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(in); + + assertEquals(readerCacheKeyId, key2.readerCacheKeyId); + assertEquals(((IndexShard) shardCacheEntity.getCacheIdentity()).shardId(), key2.shardId); + assertEquals(termBytes, key2.value); + + } + private class TestBytesReference extends AbstractBytesReference { int dummyValue; @@ -509,34 +653,4 @@ public boolean isFragment() { return false; } } - - private class TestEntity extends AbstractIndexShardCacheEntity { - private final AtomicBoolean standInForIndexShard; - private final ShardRequestCache shardRequestCache; - - private TestEntity(ShardRequestCache shardRequestCache, AtomicBoolean standInForIndexShard) { - this.standInForIndexShard = standInForIndexShard; - this.shardRequestCache = shardRequestCache; - } - - @Override - protected ShardRequestCache stats() { - return shardRequestCache; - } - - @Override - public boolean isOpen() { - return standInForIndexShard.get(); - } - - @Override - public Object getCacheIdentity() { - return standInForIndexShard; - } - - @Override - public long ramBytesUsed() { - return 42; - } - } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java index 280a9ff2acf21..8a00cd2db21c9 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java @@ -32,33 +32,39 @@ package org.opensearch.indices; -import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; -import org.opensearch.indices.IndicesRequestCache.Key; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.node.MockNode; import org.opensearch.node.Node; import org.opensearch.node.NodeValidationException; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.opensearch.transport.nio.MockNioTransportPlugin; +import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; @@ -73,6 +79,56 @@ public class IndicesServiceCloseTests extends OpenSearchTestCase { + private static class DummyQuery extends Query { + + private final int id; + + DummyQuery(int id) { + this.id = id; + } + + @Override + public void visit(QueryVisitor visitor) { + visitor.visitLeaf(this); + } + + @Override + public boolean equals(Object obj) { + return sameClassAs(obj) && id == ((IndicesServiceCloseTests.DummyQuery) obj).id; + } + + @Override + public int hashCode() { + return 31 * classHash() + id; + } + + @Override + public String toString(String field) { + return "dummy"; + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new ConstantScoreWeight(this, boost) { + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + return new ConstantScoreScorer(this, score(), scoreMode, DocIdSetIterator.all(context.reader().maxDoc())); + } + + @Override + public int count(LeafReaderContext context) { + return -1; + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return true; + } + }; + } + + } + private Node startNode() throws NodeValidationException { final Path tempDir = createTempDir(); String nodeName = "node_s_0"; @@ -225,7 +281,7 @@ public void testCloseAfterRequestHasUsedQueryCache() throws Exception { Engine.Searcher searcher = shard.acquireSearcher("test"); assertEquals(1, searcher.getIndexReader().maxDoc()); - Query query = LongPoint.newRangeQuery("foo", 0, 5); + Query query = new DummyQuery(1); assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); searcher.count(query); assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); @@ -271,7 +327,7 @@ public void testCloseWhileOngoingRequestUsesQueryCache() throws Exception { node.close(); assertEquals(1, indicesService.indicesRefCount.refCount()); - Query query = LongPoint.newRangeQuery("foo", 0, 5); + Query query = new DummyQuery(1); assertEquals(0L, cache.getStats(shard.shardId()).getCacheSize()); searcher.count(query); assertEquals(1L, cache.getStats(shard.shardId()).getCacheSize()); @@ -314,15 +370,12 @@ public void testCloseWhileOngoingRequestUsesRequestCache() throws Exception { assertEquals(1, indicesService.indicesRefCount.refCount()); assertEquals(0L, cache.count()); - IndicesRequestCache.CacheEntity cacheEntity = new IndicesRequestCache.CacheEntity() { + IndicesService.IndexShardCacheEntity cacheEntity = new IndicesService.IndexShardCacheEntity(shard) { @Override public long ramBytesUsed() { return 42; } - @Override - public void onCached(Key key, BytesReference value) {} - @Override public boolean isOpen() { return true; @@ -330,17 +383,8 @@ public boolean isOpen() { @Override public Object getCacheIdentity() { - return this; + return shard; } - - @Override - public void onHit() {} - - @Override - public void onMiss() {} - - @Override - public void onRemoval(RemovalNotification<Key, BytesReference> notification) {} }; cache.getOrCompute(cacheEntity, () -> new BytesArray("bar"), searcher.getDirectoryReader(), new BytesArray("foo")); assertEquals(1L, cache.count()); diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 2c0f061f13e58..742dbdeba8c5b 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.cluster.ClusterName; @@ -45,16 +44,18 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.gateway.LocalAllocateDangledIndices; import org.opensearch.gateway.MetaStateService; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; @@ -69,15 +70,14 @@ import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.similarity.NonNegativeScoresSimilarity; import org.opensearch.indices.IndicesService.ShardDeletionCheckResult; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -619,4 +619,9 @@ public void testConflictingEngineFactories() { ".*multiple engine factories provided for \\[foobar/.*\\]: \\[.*FooEngineFactory\\],\\[.*BarEngineFactory\\].*"; assertThat(e, hasToString(new RegexMatcher(pattern))); } + + public void testClusterRemoteTranslogBufferIntervalDefault() { + IndicesService indicesService = getIndicesService(); + assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + } } diff --git a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java index 9be45d4e77940..2424e38636466 100644 --- a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java @@ -32,6 +32,10 @@ package org.opensearch.indices; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.search.SearchRequestStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; @@ -43,7 +47,10 @@ public class NodeIndicesStatsTests extends OpenSearchTestCase { public void testInvalidLevel() { - final NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap()); + CommonStats oldStats = new CommonStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats requestStats = new SearchRequestStats(clusterSettings); + final NodeIndicesStats stats = new NodeIndicesStats(oldStats, Collections.emptyMap(), requestStats); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 6f3f6180e6ae0..040632ea3ed8d 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -54,17 +54,17 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ShardLimitValidatorTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/indices/TermsLookupTests.java b/server/src/test/java/org/opensearch/indices/TermsLookupTests.java index b37b6eb219950..8a7867729f2c1 100644 --- a/server/src/test/java/org/opensearch/indices/TermsLookupTests.java +++ b/server/src/test/java/org/opensearch/indices/TermsLookupTests.java @@ -33,9 +33,9 @@ package org.opensearch.indices; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java index 38398a204491a..c9e26d6d6159a 100644 --- a/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/analysis/AnalysisModuleTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharFilter; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -42,6 +41,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.store.Directory; import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.Streams; @@ -55,18 +55,18 @@ import org.opensearch.index.analysis.CharFilterFactory; import org.opensearch.index.analysis.CustomAnalyzer; import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.MyFilterTokenFilterFactory; import org.opensearch.index.analysis.PreConfiguredCharFilter; import org.opensearch.index.analysis.PreConfiguredTokenFilter; import org.opensearch.index.analysis.PreConfiguredTokenizer; import org.opensearch.index.analysis.StandardTokenizerFactory; import org.opensearch.index.analysis.StopTokenFilterFactory; import org.opensearch.index.analysis.TokenFilterFactory; -import org.opensearch.index.analysis.MyFilterTokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; import org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.opensearch.plugins.AnalysisPlugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.MatcherAssert; @@ -86,10 +86,10 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.apache.lucene.tests.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; public class AnalysisModuleTests extends OpenSearchTestCase { private final Settings emptyNodeSettings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/indices/breaker/BreakerSettingsTests.java b/server/src/test/java/org/opensearch/indices/breaker/BreakerSettingsTests.java index 93c3b0f48315d..c158d9add83f4 100644 --- a/server/src/test/java/org/opensearch/indices/breaker/BreakerSettingsTests.java +++ b/server/src/test/java/org/opensearch/indices/breaker/BreakerSettingsTests.java @@ -32,8 +32,8 @@ package org.opensearch.indices.breaker; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 7bdff59e7c334..591f8f15a1441 100644 --- a/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -33,26 +33,27 @@ package org.opensearch.indices.breaker; import org.opensearch.common.breaker.ChildMemoryCircuitBreaker; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.Arrays; -import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 65a70b9f059f7..c455101ff4549 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -32,8 +32,6 @@ package org.opensearch.indices.cluster; -import org.junit.Before; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -43,16 +41,17 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndex; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices; @@ -63,6 +62,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.io.IOException; import java.util.HashMap; @@ -76,9 +76,9 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.common.collect.MapBuilder.newMapBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.opensearch.common.collect.MapBuilder.newMapBuilder; /** * Abstract base class for tests against {@link IndicesClusterStateService} @@ -264,7 +264,7 @@ public MockIndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 16f046306dc7c..dc4dca80ea110 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionResponse; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.opensearch.action.admin.cluster.reroute.TransportClusterRerouteAction; import org.opensearch.action.admin.indices.close.CloseIndexRequest; @@ -90,24 +89,29 @@ import org.opensearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Priority; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Priority; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -123,10 +127,10 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static java.util.Collections.emptyMap; import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.hamcrest.Matchers.notNullValue; +import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; @@ -150,6 +154,8 @@ public class ClusterStateChanges { private final TransportUpdateSettingsAction transportUpdateSettingsAction; private final TransportClusterRerouteAction transportClusterRerouteAction; private final TransportCreateIndexAction transportCreateIndexAction; + private final RepositoriesService repositoriesService; + private final RemoteStoreNodeService remoteStoreNodeService; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final JoinTaskExecutor joinTaskExecutor; @@ -232,7 +238,8 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(SETTINGS, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); MetadataIndexUpgradeService metadataIndexUpgradeService = new MetadataIndexUpgradeService( SETTINGS, @@ -362,8 +369,19 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m indexNameExpressionResolver ); + repositoriesService = new RepositoriesService( + Settings.EMPTY, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}); + joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}, remoteStoreNodeService); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 6b1e4c8ebbed6..22bf337b05598 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -62,14 +62,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -545,7 +546,8 @@ private IndicesClusterStateService createIndicesClusterStateService( TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService( diff --git a/server/src/test/java/org/opensearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/server/src/test/java/org/opensearch/indices/memory/breaker/CircuitBreakerUnitTests.java index a2705a0730aaf..aeadcf91e3e21 100644 --- a/server/src/test/java/org/opensearch/indices/memory/breaker/CircuitBreakerUnitTests.java +++ b/server/src/test/java/org/opensearch/indices/memory/breaker/CircuitBreakerUnitTests.java @@ -32,8 +32,8 @@ package org.opensearch.indices.memory.breaker; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.indices.breaker.BreakerSettings; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java index 0b2de2acbb1d0..4685a7196b85a 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java @@ -40,16 +40,13 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; -import org.junit.After; -import org.junit.Before; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.PlainActionFuture; @@ -59,10 +56,8 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.concurrent.GatedCloseable; -import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.store.IndexOutputOutputStream; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; @@ -70,9 +65,13 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.RecoveryEngineException; @@ -90,7 +89,6 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardRelocatedException; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; @@ -104,6 +102,8 @@ import org.opensearch.threadpool.FixedExecutorBuilder; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.OutputStream; @@ -487,7 +487,7 @@ private Engine.Index getIndex(final String id) { null, Arrays.asList(document), source, - XContentType.JSON, + MediaTypeRegistry.JSON, null ); return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 67811e24b03c4..34f854cae56ba 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -35,7 +35,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; @@ -45,10 +44,11 @@ import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.common.Randomness; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.mapper.SourceToParse; @@ -189,7 +189,7 @@ private SeqNoStats populateRandomData(IndexShard shard) throws IOException { shard.getOperationPrimaryTerm(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(shard.shardId().getIndexName(), UUIDs.randomBase64UUID(), new BytesArray("{}"), XContentType.JSON) + new SourceToParse(shard.shardId().getIndexName(), UUIDs.randomBase64UUID(), new BytesArray("{}"), MediaTypeRegistry.JSON) ); if (randomInt(100) < 5) { shard.flush(new FlushRequest().waitIfOngoing(true)); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 75639661f539d..18e7dfb375132 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,4 +96,49 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() + ); + assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() + ); + assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); + } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java index 7b171af4570d9..aac715cca095e 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTargetTests.java @@ -41,11 +41,11 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex.FileMetadata; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.indices.recovery.RecoveryState.Translog; import org.opensearch.indices.recovery.RecoveryState.VerifyIndex; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex.FileMetadata; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 2c45c9e177c52..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -33,6 +33,7 @@ package org.opensearch.indices.recovery; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; @@ -41,19 +42,19 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.bulk.BulkShardRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -167,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); @@ -188,7 +189,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "id", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "id", new BytesArray("{}"), MediaTypeRegistry.JSON) ); // index #3 orgReplica.applyIndexOperationOnReplica( @@ -198,7 +199,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "id-3", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "id-3", new BytesArray("{}"), MediaTypeRegistry.JSON) ); // Flushing a new commit with local checkpoint=1 allows to delete the translog gen #1. orgReplica.flush(new FlushRequest().force(true).waitIfOngoing(true)); @@ -210,7 +211,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "id-2", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "id-2", new BytesArray("{}"), MediaTypeRegistry.JSON) ); orgReplica.sync(); // advance local checkpoint orgReplica.updateGlobalCheckpointOnReplica(3L, "test"); @@ -222,7 +223,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, - new SourceToParse(indexName, "id-5", new BytesArray("{}"), XContentType.JSON) + new SourceToParse(indexName, "id-5", new BytesArray("{}"), MediaTypeRegistry.JSON) ); if (randomBoolean()) { @@ -331,7 +332,7 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { Engine.IndexResult result = primaryShard.applyIndexOperationOnPrimary( Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse(primaryShard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), + new SourceToParse(primaryShard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), MediaTypeRegistry.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, @@ -498,7 +499,7 @@ public void testRecoveryTrimsLocalTranslog() throws Exception { } int inflightDocs = scaledRandomIntBetween(1, 100); for (int i = 0; i < inflightDocs; i++) { - final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_" + i).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).id("extra_" + i).source("{}", MediaTypeRegistry.JSON); final BulkShardRequest bulkShardRequest = indexOnPrimary(indexRequest, oldPrimary); for (IndexShard replica : randomSubsetOf(shards.getReplicas())) { indexOnReplica(bulkShardRequest, shards, replica); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index 8135d9cd3718e..131514eb019b3 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -12,11 +12,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; +import java.nio.file.Path; + public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLevelReplicationTestCase { private static final Settings settings = Settings.builder() @@ -27,22 +30,27 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .build(); public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { // Step1 - Start primary, index docs and flush shards.startPrimary(); final IndexShard primary = shards.getPrimary(); - int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + int numDocs = shards.indexDocs(randomIntBetween(10, 20)); + logger.info("--> Index numDocs {} and flush", numDocs); shards.flush(); // Step 2 - Start replica for recovery to happen, check both has same number of docs - final IndexShard replica1 = shards.addReplica(); + final IndexShard replica1 = shards.addReplica(remoteDir); + logger.info("--> Added and started replica {}", replica1.routingEntry()); shards.startAll(); assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); // Step 3 - Index more docs, run segment replication, check both have same number of docs - int moreDocs = shards.indexDocs(randomIntBetween(10, 100)); + int moreDocs = shards.indexDocs(randomIntBetween(10, 20)); primary.refresh("test"); + logger.info("--> Index more docs {} and replicate segments", moreDocs); replicateSegments(primary, shards.getReplicas()); assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); @@ -55,7 +63,8 @@ public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { assertFalse(primary.getRetentionLeases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replica1.routingEntry()))); // Step 6 - Start new replica, recovery happens, and check that new replica has all docs - final IndexShard replica2 = shards.addReplica(); + final IndexShard replica2 = shards.addReplica(remoteDir); + logger.info("--> Added and started replica {}", replica2.routingEntry()); shards.startAll(); shards.assertAllEqual(numDocs + moreDocs); diff --git a/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java index afad385deabe4..fb221728f03d7 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java @@ -33,9 +33,9 @@ package org.opensearch.indices.recovery; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.action.ActionListener; import org.opensearch.indices.replication.common.ReplicationRequestTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/indices/recovery/StartRecoveryRequestTests.java b/server/src/test/java/org/opensearch/indices/recovery/StartRecoveryRequestTests.java index a944c0c2de8ed..b76f830fe8431 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/StartRecoveryRequestTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/StartRecoveryRequestTests.java @@ -37,9 +37,9 @@ import org.opensearch.common.UUIDs; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 3b289114f5ca1..44e2653cf01da 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -8,20 +8,19 @@ package org.opensearch.indices.replication; -import org.junit.Assert; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.RecoverySettings; @@ -29,6 +28,7 @@ import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.transport.TransportService; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; @@ -94,7 +94,7 @@ public void tearDown() throws Exception { } public void testPrepareAndSendSegments() throws IOException { - indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", MediaTypeRegistry.JSON, "foobar"); primary.refresh("Test"); OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); final CheckpointInfoRequest request = new CheckpointInfoRequest( @@ -162,7 +162,7 @@ public void testCancelReplication_AfterSendFilesStarts() throws IOException, Int CountDownLatch latch = new CountDownLatch(1); OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); // add a doc and refresh so primary has more than one segment. - indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", MediaTypeRegistry.JSON, "foobar"); primary.refresh("Test"); final CheckpointInfoRequest request = new CheckpointInfoRequest( 1L, @@ -403,4 +403,38 @@ public void testCancelForMissingIds() throws IOException { assertEquals(0, replications.cachedCopyStateSize()); closeShards(replica_2); } + + public void testPrepareForReplicationAlreadyReplicating() throws IOException { + OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings); + final String replicaAllocationId = replica.routingEntry().allocationId().getId(); + final CheckpointInfoRequest request = new CheckpointInfoRequest(1L, replicaAllocationId, primaryDiscoveryNode, testCheckpoint); + + final CopyState copyState = replications.prepareForReplication(request, mock(FileChunkWriter.class)); + + final SegmentReplicationSourceHandler handler = replications.getHandlers().get(replicaAllocationId); + assertEquals(handler.getCopyState(), copyState); + assertEquals(1, copyState.refCount()); + + ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( + testCheckpoint.getShardId(), + testCheckpoint.getPrimaryTerm(), + testCheckpoint.getSegmentsGen(), + testCheckpoint.getSegmentInfosVersion() + 1, + testCheckpoint.getCodec() + ); + + final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest( + 1L, + replicaAllocationId, + primaryDiscoveryNode, + secondCheckpoint + ); + + final CopyState secondCopyState = replications.prepareForReplication(secondRequest, mock(FileChunkWriter.class)); + final SegmentReplicationSourceHandler secondHandler = replications.getHandlers().get(replicaAllocationId); + assertEquals(secondHandler.getCopyState(), secondCopyState); + assertEquals("New copy state is incref'd", 1, secondCopyState.refCount()); + assertEquals("Old copy state is cleaned up", 0, copyState.refCount()); + + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 4d273c71e7861..2531790ede4af 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -10,28 +10,25 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.util.Version; -import org.junit.Assert; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; import java.util.Arrays; import java.util.Collections; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; @@ -68,7 +65,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -123,6 +121,7 @@ public void testGetSegmentFiles() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -151,6 +150,7 @@ public void testTransportTimeoutForGetSegmentFilesAction() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -161,39 +161,6 @@ public void testTransportTimeoutForGetSegmentFilesAction() { assertEquals(recoverySettings.internalActionLongTimeout(), capturedRequest.options.timeout()); } - public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { - CountDownLatch latch = new CountDownLatch(1); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); - replicationSource.getSegmentFiles( - REPLICATION_ID, - checkpoint, - Arrays.asList(testMetadata), - mock(IndexShard.class), - new ActionListener<>() { - @Override - public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { - Assert.fail("onFailure response expected."); - } - - @Override - public void onFailure(Exception e) { - assertEquals(e.getClass(), CancellableThreads.ExecutionCancelledException.class); - latch.countDown(); - } - } - ); - replicationSource.cancel(); - latch.await(2, TimeUnit.SECONDS); - assertEquals("listener should have resolved in a failure", 0, latch.getCount()); - } - private DiscoveryNode newDiscoveryNode(String nodeName) { return new DiscoveryNode( nodeName, diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 04b5aa58ea485..287962b158c79 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -8,43 +8,38 @@ package org.opensearch.indices.replication; -import org.apache.lucene.codecs.Codec; import org.apache.lucene.store.FilterDirectory; -import org.mockito.Mockito; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.RemoteStoreRefreshListenerTests; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RemoteStoreReplicationSourceTests extends OpenSearchIndexLevelReplicationTestCase { - - private static final long PRIMARY_TERM = 1L; - private static final long SEGMENTS_GEN = 2L; - private static final long VERSION = 4L; private static final long REPLICATION_ID = 123L; private RemoteStoreReplicationSource replicationSource; - private IndexShard indexShard; - - private IndexShard mockShard; - - private Store remoteStore; + private IndexShard primaryShard; + private IndexShard replicaShard; private final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-repo") @@ -55,146 +50,124 @@ public class RemoteStoreReplicationSourceTests extends OpenSearchIndexLevelRepli @Override public void setUp() throws Exception { super.setUp(); - - indexShard = newStartedShard(true, settings, new InternalEngineFactory()); - - indexDoc(indexShard, "_doc", "1"); - indexDoc(indexShard, "_doc", "2"); - indexShard.refresh("test"); - - // mock shard - mockShard = mock(IndexShard.class); - Store store = mock(Store.class); - when(mockShard.store()).thenReturn(store); - when(store.directory()).thenReturn(indexShard.store().directory()); - remoteStore = mock(Store.class); - when(mockShard.remoteStore()).thenReturn(remoteStore); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); - FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( - new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) - ); - when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); - replicationSource = new RemoteStoreReplicationSource(mockShard); + primaryShard = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primaryShard, "_doc", "1"); + indexDoc(primaryShard, "_doc", "2"); + primaryShard.refresh("test"); + replicaShard = newStartedShard(false, settings, new NRTReplicationEngineFactory()); } @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } public void testGetCheckpointMetadata() throws ExecutionException, InterruptedException { - when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture<CheckpointInfoResponse> res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); CheckpointInfoResponse response = res.get(); assert (response.getCheckpoint().equals(checkpoint)); - assert (!response.getMetadataMap().isEmpty()); + assert (response.getMetadataMap().isEmpty() == false); } public void testGetCheckpointMetadataFailure() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + IndexShard mockShard = mock(IndexShard.class); + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); when(mockShard.getSegmentInfosSnapshot()).thenThrow(new RuntimeException("test")); - assertThrows(RuntimeException.class, () -> { + replicationSource = new RemoteStoreReplicationSource(mockShard); final PlainActionFuture<CheckpointInfoResponse> res = PlainActionFuture.newFuture(); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); res.get(); }); } - public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException, IOException { - when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - IndexShard emptyIndexShard = null; + public void testGetSegmentFiles() throws ExecutionException, InterruptedException, IOException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); + List<StoreFileMetadata> filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); + final PlainActionFuture<GetSegmentFilesResponse> res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, (fileName, bytesRecovered) -> {}, res); + GetSegmentFilesResponse response = res.get(); + assertEquals(response.files.size(), filesToFetch.size()); + assertTrue(response.files.containsAll(filesToFetch)); + closeShards(replicaShard); + } + + public void testGetSegmentFilesAlreadyExists() throws IOException, InterruptedException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); + List<StoreFileMetadata> filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); + CountDownLatch latch = new CountDownLatch(1); try { - emptyIndexShard = newStartedShard( - true, - settings, - new InternalEngineFactory() - ); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) emptyIndexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); - FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( - new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) + final PlainActionFuture<GetSegmentFilesResponse> res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + filesToFetch, + primaryShard, + (fileName, bytesRecovered) -> {}, + res ); - when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); - - final PlainActionFuture<CheckpointInfoResponse> res = PlainActionFuture.newFuture(); - when(mockShard.state()).thenReturn(IndexShardState.RECOVERING); - // Recovering shard should just do a noop and return empty metadata map. - replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); - CheckpointInfoResponse response = res.get(); - assert (response.getCheckpoint().equals(checkpoint)); - assert (response.getMetadataMap().isEmpty()); - - when(mockShard.state()).thenReturn(IndexShardState.STARTED); - // Started shard should fail with assertion error. - expectThrows(AssertionError.class, () -> { - final PlainActionFuture<CheckpointInfoResponse> res2 = PlainActionFuture.newFuture(); - replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); - }); - } finally { - closeShards(emptyIndexShard); + res.get(); + } catch (AssertionError | ExecutionException ex) { + latch.countDown(); + assertTrue(ex instanceof AssertionError); + assertTrue(ex.getMessage().startsWith("Local store already contains the file")); } + latch.await(); } - public void testGetSegmentFiles() throws ExecutionException, InterruptedException { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, InterruptedException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture<GetSegmentFilesResponse> res = PlainActionFuture.newFuture(); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), indexShard, res); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Collections.emptyList(), + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); GetSegmentFilesResponse response = res.get(); assert (response.files.isEmpty()); - assertEquals("remote store", replicationSource.getDescription()); - } - public void testGetSegmentFilesFailure() throws IOException { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - Mockito.doThrow(new RuntimeException("testing")) - .when(mockShard) - .syncSegmentsFromRemoteSegmentStore(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyBoolean()); - assertThrows(ExecutionException.class, () -> { - final PlainActionFuture<GetSegmentFilesResponse> res = PlainActionFuture.newFuture(); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), mockShard, res); - res.get(10, TimeUnit.SECONDS); + public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException, IOException { + IndexShard mockShard = mock(IndexShard.class); + // Build mockShard to return replicaShard directory so that empty metadata file is returned. + buildIndexShardBehavior(mockShard, replicaShard); + replicationSource = new RemoteStoreReplicationSource(mockShard); + + // Mock replica shard state to RECOVERING so that getCheckpointInfo return empty map + final ReplicationCheckpoint checkpoint = replicaShard.getLatestReplicationCheckpoint(); + final PlainActionFuture<CheckpointInfoResponse> res = PlainActionFuture.newFuture(); + when(mockShard.state()).thenReturn(IndexShardState.RECOVERING); + replicationSource = new RemoteStoreReplicationSource(mockShard); + // Recovering shard should just do a noop and return empty metadata map. + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); + CheckpointInfoResponse response = res.get(); + assert (response.getCheckpoint().equals(checkpoint)); + assert (response.getMetadataMap().isEmpty()); + + // Started shard should fail with assertion error. + when(mockShard.state()).thenReturn(IndexShardState.STARTED); + expectThrows(AssertionError.class, () -> { + final PlainActionFuture<CheckpointInfoResponse> res2 = PlainActionFuture.newFuture(); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); }); } + + private void buildIndexShardBehavior(IndexShard mockShard, IndexShard indexShard) { + when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); + Store remoteStore = mock(Store.class); + when(mockShard.remoteStore()).thenReturn(remoteStore); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()).getDelegate(); + FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory)); + when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java index d0065bfe013bd..7628a9ffc9ded 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentFileTransferHandlerTests.java @@ -10,19 +10,19 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFileNames; -import org.junit.Assert; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; +import org.junit.Assert; import java.io.IOException; import java.util.concurrent.CountDownLatch; @@ -31,17 +31,17 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; public class SegmentFileTransferHandlerTests extends IndexShardTestCase { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index b4e9166f377ec..d586767290797 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -8,18 +8,14 @@ package org.opensearch.indices.replication; -import org.hamcrest.MatcherAssert; -import org.hamcrest.Matchers; -import org.junit.Assert; -import org.mockito.Mockito; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -27,6 +23,9 @@ import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; +import org.hamcrest.MatcherAssert; +import org.hamcrest.Matchers; +import org.junit.Assert; import java.io.IOException; import java.util.Collections; @@ -34,9 +33,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import org.mockito.Mockito; + import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class SegmentReplicationSourceHandlerTests extends IndexShardTestCase { @@ -140,7 +141,7 @@ public void onFailure(Exception e) { public void testSendFileFails() throws IOException { // index some docs on the primary so a segment is created. - indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", MediaTypeRegistry.JSON, "foobar"); primary.refresh("Test"); chunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> listener.onFailure( new OpenSearchException("Test") @@ -179,6 +180,7 @@ public void onFailure(Exception e) { assertEquals(e.getClass(), OpenSearchException.class); } }); + copyState.decRef(); } public void testReplicationAlreadyRunning() throws IOException { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index bee1bedc892d8..8f84053f2618e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -10,31 +10,32 @@ import org.apache.lucene.codecs.Codec; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyStateTests; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -43,13 +44,13 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; public class SegmentReplicationSourceServiceTests extends OpenSearchTestCase { @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 4643615d45d7e..3c72dda2d8b5d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,20 +9,29 @@ package org.opensearch.indices.replication; import org.apache.lucene.store.AlreadyClosedException; -import org.junit.Assert; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.index.IndexService; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; @@ -32,23 +41,30 @@ import org.opensearch.indices.recovery.ForceSyncRequest; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.junit.annotations.TestLogging; +import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; -import org.opensearch.test.transport.CapturingTransport; +import org.junit.Assert; + import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.atLeastOnce; @@ -70,10 +86,7 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private IndexShard replicaShard; private IndexShard primaryShard; private ReplicationCheckpoint checkpoint; - private SegmentReplicationSource replicationSource; private SegmentReplicationTargetService sut; - - private ReplicationCheckpoint initialCheckpoint; private ReplicationCheckpoint aheadCheckpoint; private ReplicationCheckpoint newPrimaryCheckpoint; @@ -83,11 +96,13 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private DiscoveryNode localNode; private IndicesService indicesService; - private ClusterService clusterService; private SegmentReplicationState state; + private ReplicationCheckpoint initialCheckpoint; - private static long TRANSPORT_TIMEOUT = 30000;// 30sec + private ClusterState clusterState; + + private static final long TRANSPORT_TIMEOUT = 30000;// 30sec @Override public void setUp() throws Exception { @@ -107,9 +122,6 @@ public void setUp() throws Exception { 0L, replicaShard.getLatestReplicationCheckpoint().getCodec() ); - SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); - replicationSource = mock(SegmentReplicationSource.class); - when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); testThreadPool = new TestThreadPool("test", Settings.EMPTY); localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); @@ -120,14 +132,15 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); indicesService = mock(IndicesService.class); - clusterService = mock(ClusterService.class); - ClusterState clusterState = mock(ClusterState.class); + ClusterService clusterService = mock(ClusterService.class); + clusterState = mock(ClusterState.class); RoutingTable mockRoutingTable = mock(RoutingTable.class); when(clusterService.state()).thenReturn(clusterState); when(clusterState.routingTable()).thenReturn(mockRoutingTable); @@ -135,7 +148,7 @@ public void setUp() throws Exception { when(clusterState.nodes()).thenReturn(DiscoveryNodes.builder().add(localNode).build()); sut = prepareForReplication(primaryShard, replicaShard, transportService, indicesService, clusterService); - initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); + initialCheckpoint = primaryShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -170,19 +183,23 @@ public void tearDown() throws Exception { public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); - sut.startReplication(replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - latch.countDown(); - } + sut.startReplication( + replicaShard, + primaryShard.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - logger.error("Unexpected error", e); - Assert.fail("Test should succeed"); + @Override + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); + } } - }); + ); latch.await(2, TimeUnit.SECONDS); assertEquals(0, latch.getCount()); } @@ -207,6 +224,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { Assert.fail("Should not be called"); @@ -214,6 +232,7 @@ public void getSegmentFiles( }; final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, + primaryShard.getLatestReplicationCheckpoint(), source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override @@ -238,23 +257,64 @@ public void onReplicationFailure(SegmentReplicationState state, ReplicationFaile public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); + verify(spy, times(1)).updateVisibleCheckpoint(NO_OPS_PERFORMED, replicaShard); } + @TestLogging(reason = "Getting trace logs from replication package", value = "org.opensearch.indices.replication:TRACE") public void testShardAlreadyReplicating() { - sut.startReplication(replicaShard, mock(SegmentReplicationTargetService.SegmentReplicationListener.class)); - sut.startReplication(replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { + CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1); + SegmentReplicationSource source = new TestReplicationSource() { @Override - public void onReplicationDone(SegmentReplicationState state) { - Assert.fail("Should not succeed"); + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener<CheckpointInfoResponse> listener + ) { + try { + blockGetCheckpointMetadata.await(); + final CopyState copyState = new CopyState( + ReplicationCheckpoint.empty(primaryShard.shardId(), primaryShard.getLatestReplicationCheckpoint().getCodec()), + primaryShard + ); + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } catch (InterruptedException | IOException e) { + throw new RuntimeException(e); + } } @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - assertEquals("Shard " + replicaShard.shardId() + " is already replicating", e.getMessage()); - assertFalse(sendShardFailure); + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List<StoreFileMetadata> filesToFetch, + IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, + ActionListener<GetSegmentFilesResponse> listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); } - }); + }; + final SegmentReplicationTarget target = spy( + new SegmentReplicationTarget( + replicaShard, + primaryShard.getLatestReplicationCheckpoint(), + source, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) + ); + + final SegmentReplicationTargetService spy = spy(sut); + doReturn(false).when(spy).processLatestReceivedCheckpoint(eq(replicaShard), any()); + // Start first round of segment replication. + spy.startReplication(target); + + // Start second round of segment replication, this should fail to start as first round is still in-progress + spy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); + verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); + blockGetCheckpointMetadata.countDown(); } public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws InterruptedException { @@ -289,6 +349,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { Assert.fail("Unreachable"); @@ -302,8 +363,21 @@ public void cancel() { } }; + final ReplicationCheckpoint updatedCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm(), + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSegmentInfosVersion() + 1, + primaryShard.getDefaultCodecName() + ); + final SegmentReplicationTarget targetSpy = spy( - new SegmentReplicationTarget(replicaShard, source, mock(SegmentReplicationTargetService.SegmentReplicationListener.class)) + new SegmentReplicationTarget( + replicaShard, + updatedCheckpoint, + source, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) ); // start replication. This adds the target to on-ongoing replication collection @@ -317,20 +391,20 @@ public void cancel() { // ensure the old target is cancelled. and new iteration kicks off. verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); - verify(serviceSpy, times(1)).startReplication(eq(replicaShard), any()); + verify(serviceSpy, times(1)).startReplication(eq(replicaShard), any(), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(checkpoint, replicaShard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); } public void testShardNotStarted() throws IOException { SegmentReplicationTargetService spy = spy(sut); IndexShard shard = newShard(false); spy.onNewCheckpoint(checkpoint, shard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(shard); } @@ -346,7 +420,7 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } @@ -371,10 +445,10 @@ public void testStartReplicationListenerSuccess() throws InterruptedException { SegmentReplicationTargetService spy = spy(sut); CountDownLatch latch = new CountDownLatch(1); doAnswer(i -> { - ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(1)).onReplicationDone(state); + ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(2)).onReplicationDone(state); latch.countDown(); return null; - }).when(spy).startReplication(any(), any()); + }).when(spy).startReplication(any(), any(), any()); doNothing().when(spy).updateVisibleCheckpoint(eq(0L), any()); spy.afterIndexShardStarted(replicaShard); @@ -387,14 +461,14 @@ public void testStartReplicationListenerFailure() throws InterruptedException { SegmentReplicationTargetService spy = spy(sut); CountDownLatch latch = new CountDownLatch(1); doAnswer(i -> { - ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(1)).onReplicationFailure( + ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(2)).onReplicationFailure( state, new ReplicationFailedException(replicaShard, null), false ); latch.countDown(); return null; - }).when(spy).startReplication(any(), any()); + }).when(spy).startReplication(any(), any(), any()); doNothing().when(spy).updateVisibleCheckpoint(eq(0L), any()); spy.afterIndexShardStarted(replicaShard); @@ -402,9 +476,22 @@ public void testStartReplicationListenerFailure() throws InterruptedException { verify(spy, (never())).updateVisibleCheckpoint(eq(0L), eq(replicaShard)); } - public void testDoNotProcessLatestCheckpointIfItIsbehind() { - sut.updateLatestReceivedCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); - assertFalse(sut.processLatestReceivedCheckpoint(replicaShard, null)); + public void testDoNotProcessLatestCheckpointIfCheckpointIsBehind() { + SegmentReplicationTargetService service = spy(sut); + doReturn(mock(SegmentReplicationTarget.class)).when(service).startReplication(any(), any(), any()); + ReplicationCheckpoint checkpoint = replicaShard.getLatestReplicationCheckpoint(); + service.updateLatestReceivedCheckpoint(checkpoint, replicaShard); + service.processLatestReceivedCheckpoint(replicaShard, null); + verify(service, times(0)).startReplication(eq(replicaShard), eq(checkpoint), any()); + } + + public void testProcessLatestCheckpointIfCheckpointAhead() { + SegmentReplicationTargetService service = spy(sut); + doNothing().when(service).startReplication(any()); + doReturn(mock(SegmentReplicationTarget.class)).when(service).startReplication(any(), any(), any()); + service.updateLatestReceivedCheckpoint(aheadCheckpoint, replicaShard); + service.processLatestReceivedCheckpoint(replicaShard, null); + verify(service, times(1)).startReplication(eq(replicaShard), eq(aheadCheckpoint), any()); } public void testOnNewCheckpointInvokedOnClosedShardDoesNothing() throws IOException { @@ -497,7 +584,7 @@ public void testForceSegmentSyncHandlerWithFailure() throws Exception { ).txGet(); }); Throwable nestedException = finalizeException.getCause().getCause(); - assertTrue(nestedException instanceof IOException); + assertNotNull(ExceptionsHelper.unwrap(finalizeException, IOException.class)); assertTrue(nestedException.getMessage().contains("dummy failure")); } @@ -533,8 +620,10 @@ public void testForceSegmentSyncHandlerWithFailure_AlreadyClosedException_swallo } public void testTargetCancelledBeforeStartInvoked() { + final String cancelReason = "test"; final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, + primaryShard.getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), new SegmentReplicationTargetService.SegmentReplicationListener() { @Override @@ -545,12 +634,54 @@ public void onReplicationDone(SegmentReplicationState state) { @Override public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { // failures leave state object in last entered stage. - assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); - assertTrue(e.getCause() instanceof CancellableThreads.ExecutionCancelledException); + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + assertEquals(cancelReason, e.getMessage()); } } ); - target.cancel("test"); + target.cancel(cancelReason); sut.startReplication(target); } + + public void testProcessCheckpointOnClusterStateUpdate() { + // set up mocks on indicies & index service to return our replica's index & shard. + IndexService indexService = mock(IndexService.class); + when(indexService.iterator()).thenReturn(Set.of(replicaShard).iterator()); + when(indexService.getIndexSettings()).thenReturn(replicaShard.indexSettings()); + when(indexService.index()).thenReturn(replicaShard.routingEntry().index()); + when(indicesService.iterator()).thenReturn(Set.of(indexService).iterator()); + + // create old & new cluster states + final String targetNodeId = "targetNodeId"; + ShardRouting initialRouting = primaryShard.routingEntry().relocate(targetNodeId, 0L); + assertEquals(ShardRoutingState.RELOCATING, initialRouting.state()); + + ShardRouting targetRouting = ShardRouting.newUnassigned( + primaryShard.shardId(), + true, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "test") + ).initialize(targetNodeId, initialRouting.allocationId().getId(), 0L).moveToStarted(); + assertEquals(targetNodeId, targetRouting.currentNodeId()); + assertEquals(ShardRoutingState.STARTED, targetRouting.state()); + ClusterState oldState = ClusterState.builder(ClusterName.DEFAULT) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(primaryShard.shardId().getIndex()).addShard(initialRouting).build()) + .build() + ) + .build(); + ClusterState newState = ClusterState.builder(ClusterName.DEFAULT) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(primaryShard.shardId().getIndex()).addShard(targetRouting).build()) + .build() + ) + .build(); + + // spy so we can verify process is invoked + SegmentReplicationTargetService spy = spy(sut); + spy.clusterChanged(new ClusterChangedEvent("ignored", oldState, newState)); + verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); + } } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 176954b6d6b3d..8b4b3aff701b4 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -12,48 +12,51 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.Version; -import org.junit.Assert; -import org.mockito.Mockito; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; +import org.junit.Assert; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.Arrays; +import java.util.function.BiConsumer; + +import org.mockito.Mockito; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; @@ -130,10 +133,12 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { assertEquals(1, filesToFetch.size()); assert (filesToFetch.contains(SEGMENT_FILE)); + filesToFetch.forEach(storeFileMetadata -> fileProgressTracker.accept(storeFileMetadata.name(), storeFileMetadata.length())); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -141,13 +146,26 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener<Void>() { @Override public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any()); + assertEquals( + 1, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); + assertEquals( + 0, + segrepTarget.state().getIndex().fileDetails().stream().filter(file -> file.fullyRecovered() == false).count() + ); segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); @@ -181,6 +199,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -189,7 +208,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener<Void>() { @Override @@ -199,6 +218,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -224,6 +252,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onFailure(exception); @@ -232,7 +261,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener<Void>() { @Override @@ -242,13 +271,22 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } }); } - public void testFailure_finalizeReplication_IOException() throws IOException { + public void testFailure_finalizeReplication_NonCorruptionException() throws IOException { IOException exception = new IOException("dummy failure"); SegmentReplicationSource segrepSource = new TestReplicationSource() { @@ -267,6 +305,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -275,7 +314,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); doThrow(exception).when(spyIndexShard).finalizeReplication(any()); @@ -287,6 +326,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals(ReplicationFailedException.class, e.getClass()); assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -312,6 +352,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -320,7 +361,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); doThrow(exception).when(spyIndexShard).finalizeReplication(any()); @@ -356,6 +397,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -364,7 +406,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); when(spyIndexShard.getSegmentMetadataMap()).thenReturn(SI_SNAPSHOT_DIFFERENT); segrepTarget.startReplication(new ActionListener<Void>() { @Override @@ -374,6 +416,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertTrue(e instanceof OpenSearchCorruptionException); assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); @@ -408,6 +459,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -417,7 +469,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); when(spyIndexShard.getSegmentMetadataMap()).thenReturn(storeMetadataSnapshots.get(0).asMap()); segrepTarget.startReplication(new ActionListener<Void>() { @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 48427ab7e5cd4..2cf006176022d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -9,7 +9,6 @@ package org.opensearch.indices.replication.checkpoint; import org.apache.lucene.codecs.Codec; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; @@ -19,12 +18,14 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -34,12 +35,12 @@ import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class PublishCheckpointActionTests extends OpenSearchTestCase { @@ -61,7 +62,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index 13ab40203ff2b..df180a8ab1007 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -16,11 +16,11 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.Environment; import org.opensearch.index.codec.CodecService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; diff --git a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java index 7e79551605a6e..b990e64bf0f18 100644 --- a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -32,10 +32,8 @@ package org.opensearch.indices.settings; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -48,10 +46,12 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java b/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java new file mode 100644 index 0000000000000..7fa95fefe72fd --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.core.common.io.stream.DataOutputStreamOutput; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class ShardAttributesTests extends OpenSearchTestCase { + + Index index = new Index("index", "test-uid"); + ShardId shardId = new ShardId(index, 0); + String customDataPath = "/path/to/data"; + + public void testShardAttributesConstructor() { + ShardAttributes attributes = new ShardAttributes(shardId, customDataPath); + assertEquals(attributes.getShardId(), shardId); + assertEquals(attributes.getCustomDataPath(), customDataPath); + } + + public void testSerialization() throws IOException { + ShardAttributes attributes1 = new ShardAttributes(shardId, customDataPath); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + StreamOutput output = new DataOutputStreamOutput(new DataOutputStream(bytes)); + attributes1.writeTo(output); + output.close(); + StreamInput input = new InputStreamStreamInput(new ByteArrayInputStream(bytes.toByteArray())); + ShardAttributes attributes2 = new ShardAttributes(input); + input.close(); + assertEquals(attributes1.getShardId(), attributes2.getShardId()); + assertEquals(attributes1.getCustomDataPath(), attributes2.getCustomDataPath()); + } + +} diff --git a/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java index cf67c8af139fb..e18e5887c949e 100644 --- a/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java @@ -179,6 +179,16 @@ public void testReadProcessors() throws Exception { assertThat(e2.getMetadata("opensearch.processor_tag"), equalTo(Collections.singletonList("my_second_unknown"))); assertThat(e2.getMetadata("opensearch.processor_type"), equalTo(Collections.singletonList("second_unknown_processor"))); assertThat(e2.getMetadata("opensearch.property_name"), is(nullValue())); + + // test null config + List<Map<String, Object>> config3 = new ArrayList<>(); + config3.add(Collections.singletonMap("null_processor", null)); + + OpenSearchParseException ex = expectThrows( + OpenSearchParseException.class, + () -> ConfigurationUtils.readProcessorConfigs(config3, scriptService, registry) + ); + assertEquals(ex.getMessage(), "the config of processor [null_processor] cannot be null"); } public void testReadProcessorNullDescription() throws Exception { @@ -235,6 +245,12 @@ public void testReadProcessorFromObjectOrMap() throws Exception { () -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig) ); assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]")); + + ex = expectThrows( + OpenSearchParseException.class, + () -> ConfigurationUtils.readProcessor(registry, scriptService, "null_processor", null) + ); + assertEquals(ex.getMessage(), "expect the config of processor [null_processor] to be map, but is null"); } public void testNoScriptCompilation() { diff --git a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java index 8358dadf9cc3a..be035bc6ef7ea 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java @@ -95,6 +95,12 @@ public void setTestIngestDocument() { ingestDocument = new IngestDocument("index", "id", null, null, null, document); } + public void testSelfReferencingSource() { + Map<String, Object> value = new HashMap<>(); + value.put("foo", value); + expectThrows(IllegalArgumentException.class, () -> IngestDocument.deepCopyMap(value)); + } + public void testSimpleGetFieldValue() { assertThat(ingestDocument.getFieldValue("foo", String.class), equalTo("bar")); assertThat(ingestDocument.getFieldValue("int", Integer.class), equalTo(123)); diff --git a/server/src/test/java/org/opensearch/ingest/IngestMetadataTests.java b/server/src/test/java/org/opensearch/ingest/IngestMetadataTests.java index fdaf6d145da64..77444e43af910 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestMetadataTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestMetadataTests.java @@ -34,13 +34,13 @@ import org.opensearch.cluster.DiffableUtils; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -57,18 +57,18 @@ public void testFromXContent() throws IOException { PipelineConfiguration pipeline = new PipelineConfiguration( "1", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); PipelineConfiguration pipeline2 = new PipelineConfiguration( "2", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field1\", \"value\": \"_value1\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); Map<String, PipelineConfiguration> map = new HashMap<>(); map.put(pipeline.getId(), pipeline); map.put(pipeline2.getId(), pipeline2); IngestMetadata ingestMetadata = new IngestMetadata(map); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.prettyPrint(); builder.startObject(); ingestMetadata.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -90,14 +90,14 @@ public void testDiff() throws Exception { BytesReference pipelineConfig = new BytesArray("{}"); Map<String, PipelineConfiguration> pipelines = new HashMap<>(); - pipelines.put("1", new PipelineConfiguration("1", pipelineConfig, XContentType.JSON)); - pipelines.put("2", new PipelineConfiguration("2", pipelineConfig, XContentType.JSON)); + pipelines.put("1", new PipelineConfiguration("1", pipelineConfig, MediaTypeRegistry.JSON)); + pipelines.put("2", new PipelineConfiguration("2", pipelineConfig, MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata1 = new IngestMetadata(pipelines); pipelines = new HashMap<>(); - pipelines.put("1", new PipelineConfiguration("1", pipelineConfig, XContentType.JSON)); - pipelines.put("3", new PipelineConfiguration("3", pipelineConfig, XContentType.JSON)); - pipelines.put("4", new PipelineConfiguration("4", pipelineConfig, XContentType.JSON)); + pipelines.put("1", new PipelineConfiguration("1", pipelineConfig, MediaTypeRegistry.JSON)); + pipelines.put("3", new PipelineConfiguration("3", pipelineConfig, MediaTypeRegistry.JSON)); + pipelines.put("4", new PipelineConfiguration("4", pipelineConfig, MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata2 = new IngestMetadata(pipelines); IngestMetadata.IngestMetadataDiff diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata2.diff(ingestMetadata1); @@ -110,13 +110,13 @@ public void testDiff() throws Exception { IngestMetadata endResult = (IngestMetadata) diff.apply(ingestMetadata2); assertThat(endResult, not(equalTo(ingestMetadata1))); assertThat(endResult.getPipelines().size(), equalTo(3)); - assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, XContentType.JSON))); - assertThat(endResult.getPipelines().get("3"), equalTo(new PipelineConfiguration("3", pipelineConfig, XContentType.JSON))); - assertThat(endResult.getPipelines().get("4"), equalTo(new PipelineConfiguration("4", pipelineConfig, XContentType.JSON))); + assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, MediaTypeRegistry.JSON))); + assertThat(endResult.getPipelines().get("3"), equalTo(new PipelineConfiguration("3", pipelineConfig, MediaTypeRegistry.JSON))); + assertThat(endResult.getPipelines().get("4"), equalTo(new PipelineConfiguration("4", pipelineConfig, MediaTypeRegistry.JSON))); pipelines = new HashMap<>(); - pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}"), XContentType.JSON)); - pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{}"), XContentType.JSON)); + pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}"), MediaTypeRegistry.JSON)); + pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{}"), MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata3 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata3.diff(ingestMetadata1); @@ -126,12 +126,12 @@ public void testDiff() throws Exception { endResult = (IngestMetadata) diff.apply(ingestMetadata3); assertThat(endResult, equalTo(ingestMetadata1)); assertThat(endResult.getPipelines().size(), equalTo(2)); - assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, XContentType.JSON))); - assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", pipelineConfig, XContentType.JSON))); + assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, MediaTypeRegistry.JSON))); + assertThat(endResult.getPipelines().get("2"), equalTo(new PipelineConfiguration("2", pipelineConfig, MediaTypeRegistry.JSON))); pipelines = new HashMap<>(); - pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}"), XContentType.JSON)); - pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"), XContentType.JSON)); + pipelines.put("1", new PipelineConfiguration("1", new BytesArray("{}"), MediaTypeRegistry.JSON)); + pipelines.put("2", new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"), MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata4 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata4.diff(ingestMetadata1); @@ -141,10 +141,10 @@ public void testDiff() throws Exception { endResult = (IngestMetadata) diff.apply(ingestMetadata4); assertThat(endResult, not(equalTo(ingestMetadata1))); assertThat(endResult.getPipelines().size(), equalTo(2)); - assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, XContentType.JSON))); + assertThat(endResult.getPipelines().get("1"), equalTo(new PipelineConfiguration("1", pipelineConfig, MediaTypeRegistry.JSON))); assertThat( endResult.getPipelines().get("2"), - equalTo(new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"), XContentType.JSON)) + equalTo(new PipelineConfiguration("2", new BytesArray("{\"key\" : \"value\"}"), MediaTypeRegistry.JSON)) ); } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 7f5ec52c3620c..2edfe87387c92 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -60,25 +60,25 @@ import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.cbor.CborXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.plugins.IngestPlugin; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptModule; import org.opensearch.script.ScriptService; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.invocation.InvocationOnMock; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -97,6 +97,9 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; +import org.mockito.ArgumentMatcher; +import org.mockito.invocation.InvocationOnMock; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; @@ -110,9 +113,9 @@ import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.argThat; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -147,7 +150,8 @@ public void testIngestPlugin() { null, null, Collections.singletonList(DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ); Map<String, Processor.Factory> factories = ingestService.getProcessorFactories(); assertTrue(factories.containsKey("foo")); @@ -165,7 +169,8 @@ public void testIngestPluginDuplicate() { null, null, Arrays.asList(DUMMY_PLUGIN, DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ) ); assertTrue(e.getMessage(), e.getMessage().contains("already registered")); @@ -180,7 +185,8 @@ public void testExecuteIndexPipelineDoesNotExist() { null, null, Collections.singletonList(DUMMY_PLUGIN), - client + client, + mock(IndicesService.class) ); final IndexRequest indexRequest = new IndexRequest("_index").id("_id") .source(emptyMap()) @@ -221,7 +227,7 @@ public void testUpdatePipelines() { PipelineConfiguration pipeline = new PipelineConfiguration( "_id", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", pipeline)); clusterState = ClusterState.builder(clusterState) @@ -239,7 +245,7 @@ public void testInnerUpdatePipelines() { IngestService ingestService = createWithProcessors(); assertThat(ingestService.pipelines().size(), is(0)); - PipelineConfiguration pipeline1 = new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON); + PipelineConfiguration pipeline1 = new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON); IngestMetadata ingestMetadata = new IngestMetadata(mapOf("_id1", pipeline1)); ingestService.innerUpdatePipelines(ingestMetadata); @@ -247,7 +253,7 @@ public void testInnerUpdatePipelines() { assertThat(ingestService.pipelines().get("_id1").pipeline.getId(), equalTo("_id1")); assertThat(ingestService.pipelines().get("_id1").pipeline.getProcessors().size(), equalTo(0)); - PipelineConfiguration pipeline2 = new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON); + PipelineConfiguration pipeline2 = new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON); ingestMetadata = new IngestMetadata(mapOf("_id1", pipeline1, "_id2", pipeline2)); ingestService.innerUpdatePipelines(ingestMetadata); @@ -257,7 +263,7 @@ public void testInnerUpdatePipelines() { assertThat(ingestService.pipelines().get("_id2").pipeline.getId(), equalTo("_id2")); assertThat(ingestService.pipelines().get("_id2").pipeline.getProcessors().size(), equalTo(0)); - PipelineConfiguration pipeline3 = new PipelineConfiguration("_id3", new BytesArray("{\"processors\": []}"), XContentType.JSON); + PipelineConfiguration pipeline3 = new PipelineConfiguration("_id3", new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON); ingestMetadata = new IngestMetadata(mapOf("_id1", pipeline1, "_id2", pipeline2, "_id3", pipeline3)); ingestService.innerUpdatePipelines(ingestMetadata); @@ -281,7 +287,7 @@ public void testInnerUpdatePipelines() { pipeline3 = new PipelineConfiguration( "_id3", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ingestMetadata = new IngestMetadata(mapOf("_id1", pipeline1, "_id3", pipeline3)); @@ -323,7 +329,7 @@ public void testDelete() { PipelineConfiguration config = new PipelineConfiguration( "_id", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); IngestMetadata ingestMetadata = new IngestMetadata(Collections.singletonMap("_id", config)); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); @@ -355,7 +361,7 @@ public void testValidateNoIngestInfo() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); Exception e = expectThrows(IllegalStateException.class, () -> ingestService.validatePipeline(emptyMap(), putRequest)); assertEquals("Ingest info is empty", e.getMessage()); @@ -384,7 +390,7 @@ public void testGetProcessorsInPipeline() throws Exception { "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -450,7 +456,7 @@ public void testGetProcessorsInPipelineComplexConditional() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( id, new BytesArray("{\"processors\": [{\"complexSet\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -476,7 +482,7 @@ public void testCrud() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( id, new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -504,7 +510,7 @@ public void testPut() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // add a new pipeline: - PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), XContentType.JSON); + PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -518,7 +524,7 @@ public void testPut() { putRequest = new PutPipelineRequest( id, new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), - XContentType.JSON + MediaTypeRegistry.JSON ); previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -540,7 +546,7 @@ public void testPutWithErrorResponse() throws IllegalAccessException { PutPipelineRequest putRequest = new PutPipelineRequest( id, new BytesArray("{\"description\": \"empty processors\"}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -572,9 +578,9 @@ public void testDeleteUsingWildcard() { IngestService ingestService = createWithProcessors(); HashMap<String, PipelineConfiguration> pipelines = new HashMap<>(); BytesArray definition = new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); - pipelines.put("p2", new PipelineConfiguration("p2", definition, XContentType.JSON)); - pipelines.put("q1", new PipelineConfiguration("q1", definition, XContentType.JSON)); + pipelines.put("p1", new PipelineConfiguration("p1", definition, MediaTypeRegistry.JSON)); + pipelines.put("p2", new PipelineConfiguration("p2", definition, MediaTypeRegistry.JSON)); + pipelines.put("q1", new PipelineConfiguration("q1", definition, MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata = new IngestMetadata(pipelines); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; @@ -620,7 +626,7 @@ public void testDeleteWithExistingUnmatchedPipelines() { IngestService ingestService = createWithProcessors(); HashMap<String, PipelineConfiguration> pipelines = new HashMap<>(); BytesArray definition = new BytesArray("{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\"}}]}"); - pipelines.put("p1", new PipelineConfiguration("p1", definition, XContentType.JSON)); + pipelines.put("p1", new PipelineConfiguration("p1", definition, MediaTypeRegistry.JSON)); IngestMetadata ingestMetadata = new IngestMetadata(pipelines); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; @@ -641,8 +647,8 @@ public void testDeleteWithExistingUnmatchedPipelines() { public void testGetPipelines() { Map<String, PipelineConfiguration> configs = new HashMap<>(); - configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), XContentType.JSON)); - configs.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), XContentType.JSON)); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON)); + configs.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{\"processors\": []}"), MediaTypeRegistry.JSON)); assertThat(IngestService.innerGetPipelines(null, "_id1").isEmpty(), is(true)); @@ -684,7 +690,7 @@ public void testValidate() throws Exception { "{\"processors\": [{\"set\" : {\"field\": \"_field\", \"value\": \"_value\", \"tag\": \"tag1\"}}," + "{\"remove\" : {\"field\": \"_field\", \"tag\": \"tag2\"}}]}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); @@ -724,7 +730,7 @@ public String getType() { PutPipelineRequest putRequest = new PutPipelineRequest( id, new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -770,7 +776,7 @@ public void testExecuteBulkPipelineDoesNotExist() { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -817,7 +823,7 @@ public void testExecuteSuccess() { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -848,7 +854,7 @@ public void testExecuteEmptyPipeline() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [], \"description\": \"_description\"}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -882,7 +888,7 @@ public void testExecutePropagateAllMetadataUpdates() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -949,7 +955,7 @@ public void testExecuteFailure() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1009,7 +1015,7 @@ public void testExecuteSuccessWithOnFailure() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1052,7 +1058,7 @@ public void testExecuteFailureWithNestedOnFailure() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1123,7 +1129,7 @@ public void testBulkRequestExecutionWithFailures() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1181,7 +1187,7 @@ public void testBulkRequestExecution() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"mock\": {}}], \"description\": \"_description\"}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; @@ -1241,13 +1247,13 @@ public void testStats() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id1", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); - putRequest = new PutPipelineRequest("_id2", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON); + putRequest = new PutPipelineRequest("_id2", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), MediaTypeRegistry.JSON); previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -1307,7 +1313,7 @@ public void testStats() throws Exception { putRequest = new PutPipelineRequest( "_id1", new BytesArray("{\"processors\": [{\"mock\" : {}}, {\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -1340,7 +1346,7 @@ public void testStats() throws Exception { putRequest = new PutPipelineRequest( "_id1", new BytesArray("{\"processors\": [{\"failure-mock\" : { \"on_failure\": [{\"mock\" : {}}]}}, {\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); previousClusterState = clusterState; clusterState = IngestService.innerPut(putRequest, clusterState); @@ -1418,7 +1424,7 @@ public String getDescription() { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"drop\" : {}}, {\"mock\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1483,7 +1489,8 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet null, null, Arrays.asList(testPlugin), - client + client, + mock(IndicesService.class) ); ingestService.addIngestClusterStateListener(ingestClusterStateListener); @@ -1491,7 +1498,7 @@ public Map<String, Processor.Factory> getProcessors(Processor.Parameters paramet PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"test\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty ClusterState previousClusterState = clusterState; @@ -1514,7 +1521,7 @@ public void testCBORParsing() throws Exception { PutPipelineRequest putRequest = new PutPipelineRequest( "_id", new BytesArray("{\"processors\": [{\"foo\" : {}}]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); clusterState = IngestService.innerPut(putRequest, clusterState); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -1700,7 +1707,7 @@ private static IngestService createWithProcessors(Map<String, Processor.Factory> public Map<String, Processor.Factory> getProcessors(final Processor.Parameters parameters) { return processors; } - }), client); + }), client, mock(IndicesService.class)); } private CompoundProcessor mockCompoundProcessor() { diff --git a/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java b/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java index aed08c2d1875a..7bcb29addce78 100644 --- a/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java +++ b/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java @@ -32,18 +32,19 @@ package org.opensearch.ingest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractXContentTestCase; import java.io.IOException; @@ -56,15 +57,15 @@ public void testSerialization() throws IOException { PipelineConfiguration configuration = new PipelineConfiguration( "1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), - XContentType.JSON + MediaTypeRegistry.JSON ); - assertEquals(XContentType.JSON, configuration.getMediaType()); + assertEquals(MediaTypeRegistry.JSON, configuration.getMediaType()); BytesStreamOutput out = new BytesStreamOutput(); configuration.writeTo(out); StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); PipelineConfiguration serialized = PipelineConfiguration.readFrom(in); - assertEquals(XContentType.JSON, serialized.getMediaType()); + assertEquals(MediaTypeRegistry.JSON, serialized.getMediaType()); assertEquals("{}", serialized.getConfig().utf8ToString()); } @@ -73,7 +74,7 @@ public void testParser() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); final BytesReference bytes; try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { - new PipelineConfiguration("1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), XContentType.JSON).toXContent( + new PipelineConfiguration("1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), MediaTypeRegistry.JSON).toXContent( builder, ToXContent.EMPTY_PARAMS ); @@ -96,7 +97,7 @@ protected PipelineConfiguration createTestInstance() { } else { config = new BytesArray("{\"foo\": \"bar\"}".getBytes(StandardCharsets.UTF_8)); } - return new PipelineConfiguration(randomAlphaOfLength(4), config, XContentType.JSON); + return new PipelineConfiguration(randomAlphaOfLength(4), config, MediaTypeRegistry.JSON); } @Override diff --git a/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java b/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java index 16e60cec8c941..af18727336960 100644 --- a/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/TrackingResultProcessorTests.java @@ -41,7 +41,6 @@ import org.opensearch.script.ScriptType; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; -import org.mockito.Mockito; import java.util.ArrayList; import java.util.Arrays; @@ -50,6 +49,8 @@ import java.util.List; import java.util.Map; +import org.mockito.Mockito; + import static org.opensearch.ingest.CompoundProcessor.ON_FAILURE_MESSAGE_FIELD; import static org.opensearch.ingest.CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD; import static org.opensearch.ingest.CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD; diff --git a/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilterTests.java b/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilterTests.java index 1bcb17a81efee..6e8b7ef5f5f42 100644 --- a/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilterTests.java +++ b/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/DeDuplicatingTokenFilterTests.java @@ -33,13 +33,13 @@ package org.opensearch.lucene.analysis.miscellaneous; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.miscellaneous.DeDuplicatingTokenFilter; import org.apache.lucene.analysis.miscellaneous.DuplicateByteSequenceSpotter; import org.apache.lucene.analysis.miscellaneous.DuplicateSequenceAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java b/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java index bce9073f85bbe..f3a7436290a3b 100644 --- a/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java +++ b/server/src/test/java/org/opensearch/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java @@ -33,11 +33,11 @@ package org.opensearch.lucene.analysis.miscellaneous; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.tests.analysis.MockTokenizer; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/lucene/grouping/CollapsingTopDocsCollectorTests.java b/server/src/test/java/org/opensearch/lucene/grouping/CollapsingTopDocsCollectorTests.java index 9a2a44abbb4ef..f425423d1cd82 100644 --- a/server/src/test/java/org/opensearch/lucene/grouping/CollapsingTopDocsCollectorTests.java +++ b/server/src/test/java/org/opensearch/lucene/grouping/CollapsingTopDocsCollectorTests.java @@ -40,8 +40,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.tests.search.CheckHits; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; @@ -59,6 +57,8 @@ import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.CheckHits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java index 8842b7db5c6b4..2f6581bfde8d4 100644 --- a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java @@ -36,7 +36,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StringField; -import org.apache.lucene.tests.index.BaseMergePolicyTestCase; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -48,6 +47,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.BaseMergePolicyTestCase; import java.io.IOException; import java.util.function.Consumer; diff --git a/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java b/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java index c58847bb56a3b..580789525522d 100644 --- a/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java +++ b/server/src/test/java/org/opensearch/lucene/misc/search/similarity/LegacyBM25SimilarityTests.java @@ -25,12 +25,12 @@ package org.opensearch.lucene.misc.search.similarity; -import java.util.Random; - import org.apache.lucene.misc.search.similarity.LegacyBM25Similarity; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.tests.search.similarities.BaseSimilarityTestCase; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.tests.search.similarities.BaseSimilarityTestCase; + +import java.util.Random; @Deprecated public class LegacyBM25SimilarityTests extends BaseSimilarityTestCase { diff --git a/server/src/test/java/org/opensearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/server/src/test/java/org/opensearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index 4e12c946cf353..5d49fbe2fbfa1 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/server/src/test/java/org/opensearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -33,8 +33,8 @@ import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Field; -import org.apache.lucene.tests.search.BaseRangeFieldQueryTestCase; import org.apache.lucene.search.Query; +import org.apache.lucene.tests.search.BaseRangeFieldQueryTestCase; import org.apache.lucene.util.BytesRef; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; diff --git a/server/src/test/java/org/opensearch/lucene/queries/BinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/BinaryDocValuesRangeQueryTests.java index faf84b53e0b16..0a1b38ed6f5ba 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/BinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/opensearch/lucene/queries/BinaryDocValuesRangeQueryTests.java @@ -34,10 +34,10 @@ import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.index.mapper.RangeFieldMapper; import org.opensearch.index.mapper.RangeType; diff --git a/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java index 6844742759883..ca420bfbf8fbb 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java @@ -31,7 +31,6 @@ package org.opensearch.lucene.queries; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -47,7 +46,6 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; @@ -56,10 +54,12 @@ import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.Directory; -import org.opensearch.test.OpenSearchTestCase; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.search.QueryUtils; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.opensearch.test.EqualsHashCodeTestUtils.MutateFunction; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/lucene/queries/MinDocQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/MinDocQueryTests.java index a75a1d5b2fdb7..fffb1b33fb563 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/MinDocQueryTests.java +++ b/server/src/test/java/org/opensearch/lucene/queries/MinDocQueryTests.java @@ -35,11 +35,11 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.QueryUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -61,10 +61,11 @@ public void testBasics() { public void testRewrite() throws Exception { IndexReader reader = new MultiReader(); + IndexSearcher searcher = new IndexSearcher(reader); MinDocQuery query = new MinDocQuery(42); - Query rewritten = query.rewrite(reader); + Query rewritten = query.rewrite(searcher); QueryUtils.checkUnequal(query, rewritten); - Query rewritten2 = rewritten.rewrite(reader); + Query rewritten2 = rewritten.rewrite(searcher); assertSame(rewritten, rewritten2); } diff --git a/server/src/test/java/org/opensearch/lucene/queries/SearchAfterSortedDocQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/SearchAfterSortedDocQueryTests.java index 4920e252e7a6b..7d7aecbd4f08c 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/SearchAfterSortedDocQueryTests.java +++ b/server/src/test/java/org/opensearch/lucene/queries/SearchAfterSortedDocQueryTests.java @@ -38,12 +38,10 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -51,6 +49,8 @@ import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/lucene/queries/SpanMatchNoDocsQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/SpanMatchNoDocsQueryTests.java index 727377c9a5d53..834c94b04475a 100644 --- a/server/src/test/java/org/opensearch/lucene/queries/SpanMatchNoDocsQueryTests.java +++ b/server/src/test/java/org/opensearch/lucene/queries/SpanMatchNoDocsQueryTests.java @@ -33,7 +33,6 @@ package org.opensearch.lucene.queries; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.DirectoryReader; @@ -47,28 +46,21 @@ import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.search.QueryUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; public class SpanMatchNoDocsQueryTests extends OpenSearchTestCase { public void testSimple() throws Exception { - Directory dir = newDirectory(); - IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig()); - IndexReader ir = DirectoryReader.open(iw); - SpanMatchNoDocsQuery query = new SpanMatchNoDocsQuery("field", "a good reason"); assertEquals(query.toString(), "SpanMatchNoDocsQuery(\"a good reason\")"); - Query rewrite = query.rewrite(ir); + Query rewrite = query.rewrite((IndexSearcher) null); assertTrue(rewrite instanceof SpanMatchNoDocsQuery); assertEquals(rewrite.toString(), "SpanMatchNoDocsQuery(\"a good reason\")"); - - iw.close(); - ir.close(); - dir.close(); } public void testQuery() throws Exception { @@ -101,7 +93,7 @@ public void testQuery() throws Exception { assertEquals(searcher.count(orQuery), 1); hits = searcher.search(orQuery, 1000).scoreDocs; assertEquals(1, hits.length); - Query rewrite = orQuery.rewrite(ir); + Query rewrite = orQuery.rewrite(searcher); assertEquals(rewrite, orQuery); SpanNearQuery nearQuery = new SpanNearQuery( @@ -112,7 +104,7 @@ public void testQuery() throws Exception { assertEquals(searcher.count(nearQuery), 0); hits = searcher.search(nearQuery, 1000).scoreDocs; assertEquals(0, hits.length); - rewrite = nearQuery.rewrite(ir); + rewrite = nearQuery.rewrite(searcher); assertEquals(rewrite, nearQuery); iw.close(); diff --git a/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index ac1c67eda0d91..1282b0152b57f 100644 --- a/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/opensearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -70,8 +70,8 @@ import java.text.BreakIterator; import java.util.Locale; -import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; import static org.hamcrest.CoreMatchers.equalTo; +import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; public class CustomUnifiedHighlighterTests extends OpenSearchTestCase { private void assertHighlightOneDoc( diff --git a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java index 0fd039b84e887..0059f8e215f2e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java @@ -46,7 +46,12 @@ public void testDeviceStats() { final int sectorsRead = randomIntBetween(8 * readsCompleted, 16 * readsCompleted); final int writesCompleted = randomIntBetween(1, 1 << 16); final int sectorsWritten = randomIntBetween(8 * writesCompleted, 16 * writesCompleted); - + final int readTime = randomIntBetween(1, 1 << 16); + ; + final int writeTime = randomIntBetween(1, 1 << 16); + ; + final int queueSize = randomIntBetween(1, 1 << 16); + final int ioTime = randomIntBetween(1, 1 << 16); FsInfo.DeviceStats previous = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -55,6 +60,10 @@ public void testDeviceStats() { sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, null ); FsInfo.DeviceStats current = new FsInfo.DeviceStats( @@ -65,6 +74,10 @@ public void testDeviceStats() { sectorsRead + 16384, writesCompleted + 2048, sectorsWritten + 32768, + readTime + 500, + writeTime + 100, + queueSize + 20, + ioTime + 8192, previous ); assertThat(current.operations(), equalTo(1024L + 2048L)); @@ -72,6 +85,10 @@ public void testDeviceStats() { assertThat(current.writeOperations(), equalTo(2048L)); assertThat(current.readKilobytes(), equalTo(16384L / 2)); assertThat(current.writeKilobytes(), equalTo(32768L / 2)); + assertEquals(500, current.readTime()); + assertEquals(100, current.writeTime()); + assertEquals(20, current.queueSize()); + assertEquals(8192, current.ioTimeInMillis()); } } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java index 0246c8a85e97a..48b2941fe3b7e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsHealthServiceTests.java @@ -43,8 +43,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.NodeEnvironment; import org.opensearch.monitor.StatusInfo; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -54,18 +54,18 @@ import java.io.OutputStream; import java.nio.channels.FileChannel; import java.nio.file.FileSystem; -import java.nio.file.Path; import java.nio.file.OpenOption; +import java.nio.file.Path; import java.nio.file.attribute.FileAttribute; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.hamcrest.Matchers.equalTo; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; import static org.opensearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class FsHealthServiceTests extends OpenSearchTestCase { @@ -185,7 +185,7 @@ public void testLoggingOnHungIO() throws Exception { } public void testFailsHealthOnHungIOBeyondHealthyTimeout() throws Exception { - long healthyTimeoutThreshold = randomLongBetween(500, 1000); + long healthyTimeoutThreshold = randomLongBetween(1500, 2000); long refreshInterval = randomLongBetween(500, 1000); long slowLogThreshold = randomLongBetween(100, 200); long delayBetweenChecks = 100; diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index 216594f24e2ea..59a888c665be7 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -33,12 +33,12 @@ package org.opensearch.monitor.fs; import org.apache.lucene.util.Constants; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeEnvironment.NodePath; import org.opensearch.index.store.remote.filecache.FileCache; @@ -91,6 +91,14 @@ public void testFsInfo() throws IOException { assertThat(deviceStats.previousWritesCompleted, equalTo(-1L)); assertThat(deviceStats.currentSectorsWritten, greaterThanOrEqualTo(0L)); assertThat(deviceStats.previousSectorsWritten, equalTo(-1L)); + assertThat(deviceStats.currentReadTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousReadTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentWriteTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousWriteTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentQueueSize, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousQueueSize, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentIOTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousIOTime, greaterThanOrEqualTo(-1L)); } } else { assertNull(stats.getIoStats()); @@ -243,6 +251,16 @@ List<String> readProcDiskStats() throws IOException { assertThat(first.devicesStats[0].previousWritesCompleted, equalTo(-1L)); assertThat(first.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(first.devicesStats[0].previousSectorsWritten, equalTo(-1L)); + + assertEquals(33457, first.devicesStats[0].currentReadTime); + assertEquals(-1, first.devicesStats[0].previousReadTime); + assertEquals(18730966, first.devicesStats[0].currentWriteTime); + assertEquals(-1, first.devicesStats[0].previousWriteTime); + assertEquals(18767169, first.devicesStats[0].currentQueueSize); + assertEquals(-1, first.devicesStats[0].previousQueueSize); + assertEquals(1918440, first.devicesStats[0].currentIOTime); + assertEquals(-1, first.devicesStats[0].previousIOTime); + assertThat(first.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(first.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(first.devicesStats[1].deviceName, equalTo("dm-2")); @@ -255,6 +273,15 @@ List<String> readProcDiskStats() throws IOException { assertThat(first.devicesStats[1].currentSectorsWritten, equalTo(64126096L)); assertThat(first.devicesStats[1].previousSectorsWritten, equalTo(-1L)); + assertEquals(49312, first.devicesStats[1].currentReadTime); + assertEquals(-1, first.devicesStats[1].previousReadTime); + assertEquals(33730596, first.devicesStats[1].currentWriteTime); + assertEquals(-1, first.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1, first.devicesStats[1].previousIOTime); + diskStats.set( Arrays.asList( " 259 0 nvme0n1 336870 0 7928397 82876 10264393 0 182986405 52451610 0 2971042 52536492", @@ -281,6 +308,16 @@ List<String> readProcDiskStats() throws IOException { assertThat(second.devicesStats[0].previousWritesCompleted, equalTo(8398869L)); assertThat(second.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(second.devicesStats[0].previousSectorsWritten, equalTo(118857776L)); + + assertEquals(33464, second.devicesStats[0].currentReadTime); + assertEquals(33457, second.devicesStats[0].previousReadTime); + assertEquals(18730966, second.devicesStats[0].currentWriteTime); + assertEquals(18730966, second.devicesStats[0].previousWriteTime); + assertEquals(18767176, second.devicesStats[0].currentQueueSize); + assertEquals(18767169, second.devicesStats[0].previousQueueSize); + assertEquals(1918444, second.devicesStats[0].currentIOTime); + assertEquals(1918440, second.devicesStats[0].previousIOTime); + assertThat(second.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(second.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(second.devicesStats[1].deviceName, equalTo("dm-2")); @@ -293,11 +330,25 @@ List<String> readProcDiskStats() throws IOException { assertThat(second.devicesStats[1].currentSectorsWritten, equalTo(64128568L)); assertThat(second.devicesStats[1].previousSectorsWritten, equalTo(64126096L)); + assertEquals(49369, second.devicesStats[1].currentReadTime); + assertEquals(49312, second.devicesStats[1].previousReadTime); + assertEquals(33730766, second.devicesStats[1].currentWriteTime); + assertEquals(33730596, second.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1L, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1L, first.devicesStats[1].previousIOTime); + assertThat(second.totalOperations, equalTo(575L)); assertThat(second.totalReadOperations, equalTo(261L)); assertThat(second.totalWriteOperations, equalTo(314L)); assertThat(second.totalReadKilobytes, equalTo(2392L)); assertThat(second.totalWriteKilobytes, equalTo(1236L)); + + assertEquals(64, second.totalReadTime); + assertEquals(170, second.totalWriteTime); + assertEquals(236, second.totalQueueSize); + assertEquals(158, second.totalIOTimeInMillis); } public void testAdjustForHugeFilesystems() throws Exception { diff --git a/server/src/test/java/org/opensearch/monitor/jvm/JvmGcMonitorServiceTests.java b/server/src/test/java/org/opensearch/monitor/jvm/JvmGcMonitorServiceTests.java index 3ea53bd0ea811..dbd8074da721c 100644 --- a/server/src/test/java/org/opensearch/monitor/jvm/JvmGcMonitorServiceTests.java +++ b/server/src/test/java/org/opensearch/monitor/jvm/JvmGcMonitorServiceTests.java @@ -33,8 +33,8 @@ package org.opensearch.monitor.jvm; import org.apache.logging.log4j.Logger; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; import static org.mockito.Mockito.mock; diff --git a/server/src/test/java/org/opensearch/monitor/jvm/JvmStatsTests.java b/server/src/test/java/org/opensearch/monitor/jvm/JvmStatsTests.java index 6a7d3afdc5dad..f9c39daef8aa8 100644 --- a/server/src/test/java/org/opensearch/monitor/jvm/JvmStatsTests.java +++ b/server/src/test/java/org/opensearch/monitor/jvm/JvmStatsTests.java @@ -32,8 +32,8 @@ package org.opensearch.monitor.jvm; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java index 575ab02bd6f07..73f6ef8916a20 100644 --- a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java @@ -32,6 +32,18 @@ package org.opensearch.monitor.os; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.allOf; @@ -44,19 +56,6 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.junit.Assume.assumeThat; - -import java.io.IOException; -import java.math.BigInteger; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.stream.Collectors; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.Constants; -import org.opensearch.test.OpenSearchTestCase; - import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; diff --git a/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java b/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java index 24560fb84878e..5a8de1fca0d31 100644 --- a/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java +++ b/server/src/test/java/org/opensearch/node/InternalSettingsPreparerTests.java @@ -35,10 +35,10 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSetting; -import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; +import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java b/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java new file mode 100644 index 0000000000000..4a4de44e3acea --- /dev/null +++ b/server/src/test/java/org/opensearch/node/IoUsageStatsTests.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Locale; + +public class IoUsageStatsTests extends OpenSearchTestCase { + IoUsageStats ioUsageStats; + + @Override + public void setUp() throws Exception { + super.setUp(); + ioUsageStats = new IoUsageStats(10); + } + + public void testDefaultsIoUsageStats() { + assertEquals(ioUsageStats.getIoUtilisationPercent(), 10.0, 0); + } + + public void testUpdateIoUsageStats() { + assertEquals(ioUsageStats.getIoUtilisationPercent(), 10.0, 0); + ioUsageStats.setIoUtilisationPercent(20); + assertEquals(ioUsageStats.getIoUtilisationPercent(), 20.0, 0); + } + + public void testIoUsageStats() throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = ioUsageStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + String response = builder.toString(); + assertEquals(response, "{\"max_io_utilization_percent\":\"10.0\"}"); + ioUsageStats.setIoUtilisationPercent(20); + builder = JsonXContent.contentBuilder(); + builder = ioUsageStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + response = builder.toString(); + assertEquals(response, "{\"max_io_utilization_percent\":\"20.0\"}"); + } + + public void testIoUsageStatsToString() { + String expected = "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", 10.0); + assertEquals(expected, ioUsageStats.toString()); + ioUsageStats.setIoUtilisationPercent(20); + expected = "IO utilization percent: " + String.format(Locale.ROOT, "%.1f", 20.0); + assertEquals(expected, ioUsageStats.toString()); + } +} diff --git a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java index 0a3af34bc12f4..b2bb6897fe164 100644 --- a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java +++ b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java @@ -18,6 +18,7 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; public class NodeRoleSettingsTests extends OpenSearchTestCase { @@ -72,4 +73,13 @@ public void testUnknownNodeRoleOnly() { assertEquals(testRole, nodeRoles.get(0).roleName()); assertEquals(testRole, nodeRoles.get(0).roleNameAbbreviation()); } + + public void testNodeRolesFromEnvironmentVariables() { + Settings roleSettings = Settings.builder() + .put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "${node.roles.test}") + .replacePropertyPlaceholders() + .build(); + List<DiscoveryNodeRole> nodeRoles = NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings); + assertThat(nodeRoles, empty()); + } } diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index ae8028c143498..d91dc696eb30b 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -37,13 +37,14 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.SetOnce; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; @@ -51,15 +52,14 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.BreakerSettings; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.NodeRoles; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.NodeRoles; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java new file mode 100644 index 0000000000000..c4ba271d27ae9 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + +public class RemoteStoreNodeAttributeTests extends OpenSearchTestCase { + + static private final String KEY_ARN = "arn:aws:kms:us-east-1:123456789:key/6e9aa906-2cc3-4924-8ded-f385c78d9dcf"; + static private final String REGION = "us-east-1"; + + public void testCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map<String, String> attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataKey + ".key_provider_name", + "store-test", + repoCryptoMetadataKey + ".key_provider_type", + "aws-kms", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + Settings.Builder settings = Settings.builder(); + settings.put("region", REGION); + settings.put("key_arn", KEY_ARN); + CryptoMetadata cryptoMetadata = new CryptoMetadata("store-test", "aws-kms", settings.build()); + assertEquals(cryptoMetadata, repositoryMetadata.cryptoMetadata()); + } + + public void testInvalidCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map<String, String> attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + assertThrows(IllegalStateException.class, () -> new RemoteStoreNodeAttribute(node)); + } + + public void testNoCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + Map<String, String> attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz" + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + assertNull(repositoryMetadata.cryptoMetadata()); + } +} diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java new file mode 100644 index 0000000000000..6dd90784ab65f --- /dev/null +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -0,0 +1,197 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.After; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for ResourceUsageCollectorService where we test collect method, get method and whether schedulers + * are working as expected + */ +public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(5000)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .build(); + } + + @After + public void cleanup() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testResourceUsageStats() { + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + resourceUsageCollectorService.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99, new IoUsageStats(98)); + Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + assertEquals(98, nodeStats.get("node1").getIoUsageStats().getIoUtilisationPercent(), 0.0); + + Optional<NodeResourceUsageStats> nodeResourceUsageStatsOptional = resourceUsageCollectorService.getNodeStatistics("node1"); + + assertNotNull(nodeResourceUsageStatsOptional.get()); + assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + assertEquals(98, nodeResourceUsageStatsOptional.get().getIoUsageStats().getIoUtilisationPercent(), 0.0); + + nodeResourceUsageStatsOptional = resourceUsageCollectorService.getNodeStatistics("node2"); + assertTrue(nodeResourceUsageStatsOptional.isEmpty()); + } + + public void testScheduler() throws Exception { + /** + * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map + */ + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + assertBusy(() -> assertEquals(1, resourceUsageCollectorService.getAllNodeStatistics().size())); + + /** + * Wait for memory utilization to be reported greater than 0 + */ + assertBusy( + () -> assertThat( + resourceUsageCollectorService.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + greaterThan(0.0) + ), + 5, + TimeUnit.SECONDS + ); + assertTrue(resourceUsageCollectorService.getNodeStatistics("Invalid").isEmpty()); + } + + /* + * Test that concurrently adding values and removing nodes does not cause exceptions + */ + public void testConcurrentAddingAndRemovingNodes() throws Exception { + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + String[] nodes = new String[] { "a", "b", "c", "d" }; + + final CountDownLatch latch = new CountDownLatch(5); + + Runnable f = () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + fail("should not be interrupted"); + } + for (int i = 0; i < randomIntBetween(100, 200); i++) { + if (randomBoolean()) { + resourceUsageCollectorService.removeNodeResourceUsageStats(randomFrom(nodes)); + } + resourceUsageCollectorService.collectNodeResourceUsageStats( + randomFrom(nodes), + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) + ); + } + }; + + Thread t1 = new Thread(f); + Thread t2 = new Thread(f); + Thread t3 = new Thread(f); + Thread t4 = new Thread(f); + + t1.start(); + t2.start(); + t3.start(); + t4.start(); + latch.countDown(); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + final Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); + for (String nodeId : nodes) { + if (nodeStats.containsKey(nodeId)) { + assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).getIoUsageStats().getIoUtilisationPercent(), greaterThan(0.0)); + } + } + } + + public void testNodeRemoval() { + ResourceUsageCollectorService resourceUsageCollectorService = getInstanceFromNode(ResourceUsageCollectorService.class); + resourceUsageCollectorService.collectNodeResourceUsageStats( + "node1", + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) + ); + resourceUsageCollectorService.collectNodeResourceUsageStats( + "node2", + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100), + new IoUsageStats(randomIntBetween(1, 100)) + ); + + ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node1")) + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9201), "node2")) + ) + .build(); + ClusterState newState = ClusterState.builder(previousState) + .nodes(DiscoveryNodes.builder(previousState.nodes()).remove("node2")) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); + + resourceUsageCollectorService.clusterChanged(event); + final Map<String, NodeResourceUsageStats> nodeStats = resourceUsageCollectorService.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertFalse(nodeStats.containsKey("node2")); + } +} diff --git a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java index 2b13df3027cfa..7ca1f1e864b99 100644 --- a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java @@ -40,7 +40,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java new file mode 100644 index 0000000000000..49a58991e8e5c --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.ValidationException; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation + */ +public class AverageUsageTrackerTests extends OpenSearchTestCase { + ThreadPool threadPool; + AverageMemoryUsageTracker averageMemoryUsageTracker; + AverageCpuUsageTracker averageCpuUsageTracker; + AverageIoUsageTracker averageIoUsageTracker; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + FsService fsService = mock(FsService.class); + averageMemoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageCpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageIoUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testBasicUsage() { + assertAverageUsageStats(averageMemoryUsageTracker); + assertAverageUsageStats(averageCpuUsageTracker); + assertAverageUsageStats(averageIoUsageTracker); + } + + public void testUpdateWindowSize() { + assertUpdateWindowSize(averageMemoryUsageTracker); + assertUpdateWindowSize(averageCpuUsageTracker); + assertUpdateWindowSize(averageIoUsageTracker); + } + + private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + } + + private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + + usageTracker.setWindowSize(new TimeValue(2000, TimeUnit.MILLISECONDS)); + assertEquals(0, usageTracker.getWindowSize()); + assertEquals(0.0, usageTracker.getAverage(), 0.0); + // verify 2000/500 = 4 is the window size and average is calculated on window size of 4 + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(4, usageTracker.getWindowSize()); + // (1 + 2 + 1 + 2 ) / 4 = 1.5 + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 + assertEquals(1.75, usageTracker.getAverage(), 0.0); + } + + public void testPreValidationForIOTracker() { + Optional<ValidationException> validationException = averageIoUsageTracker.preValidateFsStats(); + assertTrue(validationException.isPresent()); + FsService fsService = mock(FsService.class); + FsInfo fsInfo = mock(FsInfo.class); + FsInfo.IoStats ioStats = mock(FsInfo.IoStats.class); + when(fsService.stats()).thenReturn(fsInfo); + when(fsInfo.getIoStats()).thenReturn(ioStats); + FsInfo.DeviceStats[] deviceStats = new FsInfo.DeviceStats[0]; + when(fsService.stats().getIoStats().getDevicesStats()).thenReturn(deviceStats); + averageIoUsageTracker = new AverageIoUsageTracker( + fsService, + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + validationException = averageIoUsageTracker.preValidateFsStats(); + assertFalse(validationException.isPresent()); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java new file mode 100644 index 0000000000000..191b09331f111 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.fs.FsService; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.mock; + +/** + * Tests to assert resource usage trackers retrieving resource utilization averages + */ +public class NodeResourceUsageTrackerTests extends OpenSearchSingleNodeTestCase { + ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testStats() throws Exception { + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + mock(FsService.class), + threadPool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + tracker.start(); + /** + * Asserting memory utilization to be greater than 0 + * cpu percent used is mostly 0, so skipping assertion for that + */ + assertBusy(() -> assertThat(tracker.getMemoryUtilizationPercent(), greaterThan(0.0)), 5, TimeUnit.SECONDS); + tracker.stop(); + tracker.close(); + } + + public void testUpdateSettings() { + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + mock(FsService.class), + threadPool, + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getIoWindowDuration().getSeconds(), 120); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") + .build(); + ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + assertEquals( + "10s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + + Settings jvmsettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "5s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(jvmsettings).get(); + assertEquals( + "5s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + Settings ioSettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "20s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(ioSettings).get(); + assertEquals( + "20s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + } +} diff --git a/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java index 7b17d4d3b0471..fba26b0c72e0e 100644 --- a/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java @@ -36,13 +36,12 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.http.HttpInfo; @@ -119,7 +118,7 @@ private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOEx param2Builder.startObject(); param2.toXContent(param2Builder, params); param2Builder.endObject(); - assertThat(Strings.toString(param1Builder), equalTo(Strings.toString(param2Builder))); + assertThat(param1Builder.toString(), equalTo(param2Builder.toString())); } private static NodeInfo createNodeInfo() { diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java index bfff3c77e1d89..68b35dc1fb906 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java @@ -34,7 +34,6 @@ import org.opensearch.ResourceNotFoundException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -49,6 +48,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.opensearch.persistent.TestPersistentTasksPlugin.TestParams; diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java index 7fae4d5bb23e3..a55665d4748ed 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksCustomMetadataTests.java @@ -41,10 +41,11 @@ import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.core.ParseField; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; @@ -54,9 +55,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.opensearch.persistent.PersistentTasksCustomMetadata.Builder; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; @@ -204,7 +203,7 @@ public void testSerializationContext() throws Exception { BytesReference shuffled = toShuffledXContent(testInstance, xContentType, params, false); PersistentTasksCustomMetadata newInstance; - try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { + try (XContentParser parser = createParser(xContentType.xContent(), shuffled)) { newInstance = doParseInstance(parser); } assertNotSame(newInstance, testInstance); diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceStatusTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceStatusTests.java index 73c5d674a5816..f6f9289e656b3 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceStatusTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceStatusTests.java @@ -32,8 +32,8 @@ package org.opensearch.persistent; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.persistent.PersistentTasksNodeService.Status; +import org.opensearch.test.AbstractWireSerializingTestCase; import static org.hamcrest.Matchers.containsString; diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceTests.java index 84ca449bec665..a97e3504f4d34 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksNodeServiceTests.java @@ -33,7 +33,6 @@ package org.opensearch.persistent; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterChangedEvent; @@ -47,12 +46,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.opensearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import org.opensearch.persistent.TestPersistentTasksPlugin.TestParams; import org.opensearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.tasks.TaskManager; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/opensearch/persistent/TestPersistentTasksPlugin.java index 3991f6721a350..0a1893173e8a7 100644 --- a/server/src/test/java/org/opensearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/opensearch/persistent/TestPersistentTasksPlugin.java @@ -35,10 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; -import org.opensearch.action.ActionType; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; +import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; @@ -52,17 +50,21 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; -import org.opensearch.common.component.Lifecycle; import org.opensearch.common.inject.Inject; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.settings.SettingsModule; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.ParseField; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -71,8 +73,6 @@ import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.PersistentTaskPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.tasks.TaskCancelledException; -import org.opensearch.tasks.TaskId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -300,7 +300,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(XContentType.JSON, this); + return Strings.toString(MediaTypeRegistry.JSON, this); } // Implements equals and hashcode for testing diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 2b2a8c7ae447c..12c7dc870c104 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -32,10 +32,16 @@ package org.opensearch.plugins; +import com.fasterxml.jackson.core.JsonParseException; + import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import java.nio.ByteBuffer; @@ -46,8 +52,8 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; public class PluginInfoTests extends OpenSearchTestCase { @@ -74,6 +80,33 @@ public void testReadFromProperties() throws Exception { assertEquals("fake desc", info.getDescription()); assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); + assertThat(info.getExtendedPlugins(), empty()); + } + + public void testReadFromPropertiesWithSingleOpenSearchRange() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertEquals("my_plugin", info.getName()); + assertEquals("fake desc", info.getDescription()); + assertEquals("1.0", info.getVersion()); + assertEquals("FakePlugin", info.getClassname()); + assertEquals("~" + Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -102,6 +135,7 @@ public void testReadFromPropertiesWithFolderNameAndVersionAfter() throws Excepti assertEquals("1.0", info.getVersion()); assertEquals("FakePlugin", info.getClassname()); assertEquals("custom-folder", info.getTargetFolderName()); + assertEquals(Version.CURRENT.toString(), info.getOpenSearchVersionRanges().get(0).toString()); assertThat(info.getExtendedPlugins(), empty()); } @@ -130,11 +164,40 @@ public void testReadFromPropertiesVersionMissing() throws Exception { assertThat(e.getMessage(), containsString("[version] is missing")); } - public void testReadFromPropertiesOpenSearchVersionMissing() throws Exception { + public void testReadFromPropertiesOpenSearchVersionAndDependenciesMissing() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc", "name", "my_plugin", "version", "1.0"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("[opensearch.version] is missing")); + assertThat( + e.getMessage(), + containsString("Either [opensearch.version] or [dependencies] property must be specified for the plugin ") + ); + } + + public void testReadFromPropertiesWithDependenciesAndOpenSearchVersion() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "dependencies", + "{opensearch:" + Version.CURRENT.toString() + "}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("Only one of [opensearch.version] or [dependencies] property can be specified for the plugin") + ); } public void testReadFromPropertiesJavaVersionMissing() throws Exception { @@ -305,7 +368,31 @@ public void testSerialize() throws Exception { ByteBufferStreamInput input = new ByteBufferStreamInput(buffer); PluginInfo info2 = new PluginInfo(input); assertThat(info2.toString(), equalTo(info.toString())); + } + public void testToXContent() throws Exception { + PluginInfo info = new PluginInfo( + "fake", + "foo", + "dummy", + Version.CURRENT, + "1.8", + "dummyClass", + "folder", + Collections.emptyList(), + false + ); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + String prettyPrint = info.toXContent(builder, ToXContent.EMPTY_PARAMS).prettyPrint().toString(); + assertTrue(prettyPrint.contains("\"name\" : \"fake\"")); + assertTrue(prettyPrint.contains("\"version\" : \"dummy\"")); + assertTrue(prettyPrint.contains("\"opensearch_version\" : \"" + Version.CURRENT)); + assertTrue(prettyPrint.contains("\"java_version\" : \"1.8\"")); + assertTrue(prettyPrint.contains("\"description\" : \"foo\"")); + assertTrue(prettyPrint.contains("\"classname\" : \"dummyClass\"")); + assertTrue(prettyPrint.contains("\"custom_foldername\" : \"folder\"")); + assertTrue(prettyPrint.contains("\"extended_plugins\" : [ ]")); + assertTrue(prettyPrint.contains("\"has_native_controller\" : false")); } public void testPluginListSorted() { @@ -347,4 +434,193 @@ public void testUnknownProperties() throws Exception { assertThat(e.getMessage(), containsString("Unknown properties in plugin descriptor")); } + public void testMultipleDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~" + Version.CURRENT.toString() + "\", dependency2:\"1.0.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testNonOpenSearchDependency() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{some_dependency:\"~" + Version.CURRENT.toString() + "\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Only opensearch is allowed to be specified as a plugin dependency")); + } + + public void testEmptyDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one dependency is allowed to be specified in plugin descriptor properties")); + } + + public void testInvalidDependenciesProperty() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{invalid}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(JsonParseException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testEmptyOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Version cannot be empty")); + } + + public void testInvalidOpenSearchVersionInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"1.2\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat( + e.getMessage(), + containsString("the version needs to contain major, minor, and revision, and optionally the build: 1.2") + ); + } + + public void testInvalidRangeInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"<2.2.0\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + expectThrows(NumberFormatException.class, () -> PluginInfo.readFromProperties(pluginDir)); + } + + public void testhMultipleOpenSearchRangesInDependencies() throws Exception { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "dependencies", + "{opensearch:\"~1.2.3, =1.2.3\"}", + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin" + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginInfo.readFromProperties(pluginDir)); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } + + public void testhMultipleOpenSearchRangesInConstructor() throws Exception { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new PluginInfo( + "plugin_name", + "foo", + "dummy", + List.of( + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.EQ), + new SemverRange(Version.CURRENT, SemverRange.RangeOperator.DEFAULT) + ), + "1.8", + "dummyclass", + null, + Collections.emptyList(), + randomBoolean() + ) + ); + assertThat(e.getMessage(), containsString("Exactly one range is allowed to be specified in dependencies for the plugin")); + } } diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index 6066316bc606b..bd9ee33856f14 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; -import org.apache.lucene.util.Constants; import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.util.Constants; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.bootstrap.JarHell; @@ -45,8 +45,10 @@ import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexModule; +import org.opensearch.semver.SemverRange; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -717,6 +719,45 @@ public void testIncompatibleOpenSearchVersion() throws Exception { assertThat(e.getMessage(), containsString("was built for OpenSearch version 6.0.0")); } + public void testCompatibleOpenSearchVersionRange() { + List<SemverRange> pluginCompatibilityRange = List.of(new SemverRange(Version.CURRENT, SemverRange.RangeOperator.TILDE)); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + PluginsService.verifyCompatibility(info); + } + + public void testIncompatibleOpenSearchVersionRange() { + // Version.CURRENT is behind by one with respect to patch version in the range + List<SemverRange> pluginCompatibilityRange = List.of( + new SemverRange( + VersionUtils.getVersion(Version.CURRENT.major, Version.CURRENT.minor, (byte) (Version.CURRENT.revision + 1)), + SemverRange.RangeOperator.TILDE + ) + ); + PluginInfo info = new PluginInfo( + "my_plugin", + "desc", + "1.0", + pluginCompatibilityRange, + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsService.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("was built for OpenSearch version ")); + } + public void testIncompatibleJavaVersion() throws Exception { PluginInfo info = new PluginInfo( "my_plugin", @@ -891,7 +932,10 @@ public void testExtensiblePlugin() { TestExtensiblePlugin extensiblePlugin = new TestExtensiblePlugin(); PluginsService.loadExtensions( Collections.singletonList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin) + Tuple.tuple( + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ) ) ); @@ -902,9 +946,12 @@ public void testExtensiblePlugin() { TestPlugin testPlugin = new TestPlugin(); PluginsService.loadExtensions( Arrays.asList( - Tuple.tuple(new PluginInfo("extensible", null, null, null, null, null, Collections.emptyList(), false), extensiblePlugin), Tuple.tuple( - new PluginInfo("test", null, null, null, null, null, Collections.singletonList("extensible"), false), + new PluginInfo("extensible", null, null, Version.CURRENT, null, null, Collections.emptyList(), false), + extensiblePlugin + ), + Tuple.tuple( + new PluginInfo("test", null, null, Version.CURRENT, null, null, Collections.singletonList("extensible"), false), testPlugin ) ) @@ -1036,6 +1083,40 @@ public void testThrowingConstructor() { assertThat(e.getCause().getCause(), hasToString(containsString("test constructor failure"))); } + public void testPluginCompatibilityWithSemverRange() { + // Compatible plugin and core versions + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.0.0"))); + + assertTrue(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.2"))); + + // Incompatible plugin and core versions + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("=1.0.0"), Version.fromString("1.0.1"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.1"), Version.fromString("1.0.0"))); + + assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.1.0"))); + } + + private PluginInfo getPluginInfoWithWithSemverRange(String semverRange) { + return new PluginInfo( + "my_plugin", + "desc", + "1.0", + List.of(SemverRange.fromString(semverRange)), + "1.8", + "FakePlugin", + null, + Collections.emptyList(), + false + ); + } + private static class TestExtensiblePlugin extends Plugin implements ExtensiblePlugin { private List<TestExtensionPoint> extensions; diff --git a/server/src/test/java/org/opensearch/plugins/spi/NamedXContentProviderTests.java b/server/src/test/java/org/opensearch/plugins/spi/NamedXContentProviderTests.java index fe0f609b87213..32f155edbe54a 100644 --- a/server/src/test/java/org/opensearch/plugins/spi/NamedXContentProviderTests.java +++ b/server/src/test/java/org/opensearch/plugins/spi/NamedXContentProviderTests.java @@ -32,8 +32,8 @@ package org.opensearch.plugins.spi; -import org.opensearch.core.ParseField; import org.opensearch.common.io.Streams; +import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.pipeline.ParsedSimpleValue; diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java new file mode 100644 index 0000000000000..4f615290f1805 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlServiceTests.java @@ -0,0 +1,179 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.lucene.util.Constants; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; + +public class AdmissionControlServiceTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + private AdmissionControlService admissionControlService; + private String action = ""; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + action = "indexing"; + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testWhenAdmissionControllerRegistered() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + if (Constants.LINUX) { + assertEquals(admissionControlService.getAdmissionControllers().size(), 2); + } else { + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } + } + + public void testRegisterInvalidAdmissionController() { + String test = "TEST"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + if (Constants.LINUX) { + assertEquals(admissionControlService.getAdmissionControllers().size(), 2); + } else { + assertEquals(admissionControlService.getAdmissionControllers().size(), 1); + } + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> admissionControlService.registerAdmissionController(test) + ); + assertEquals(ex.getMessage(), "Not Supported AdmissionController : " + test); + } + + public void testAdmissionControllerSettings() { + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + AdmissionControlSettings admissionControlSettings = admissionControlService.admissionControlSettings; + List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } + CpuBasedAdmissionController cpuBasedAdmissionController = (CpuBasedAdmissionController) admissionControlService + .getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals( + admissionControlSettings.isTransportLayerAdmissionControlEnabled(), + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + + Settings newSettings = Settings.builder() + .put(settings) + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(newSettings); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue( + cpuBasedAdmissionController.isEnabledForTransportLayer( + cpuBasedAdmissionController.settings.getTransportLayerAdmissionControllerMode() + ) + ); + } + + public void testApplyAdmissionControllerDisabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); + List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); + admissionControllerList.forEach(admissionController -> { + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + }); + } + + public void testApplyAdmissionControllerEnabled() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); + assertEquals( + admissionControlService.getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(AdmissionControlActionType.INDEXING.getType()), + 0 + ); + + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } + } + + public void testApplyAdmissionControllerEnforced() { + this.action = "indices:data/write/bulk[s][p]"; + admissionControlService = new AdmissionControlService(Settings.EMPTY, clusterService, threadPool, null); + admissionControlService.applyTransportAdmissionControl(this.action, null); + assertEquals( + admissionControlService.getAdmissionController(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount(AdmissionControlActionType.INDEXING.getType()), + 0 + ); + + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + List<AdmissionController> admissionControllerList = admissionControlService.getAdmissionControllers(); + if (Constants.LINUX) { + assertEquals(admissionControllerList.size(), 2); + } else { + assertEquals(admissionControllerList.size(), 1); + } + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java new file mode 100644 index 0000000000000..c11ee1cc608f6 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSettingsTests.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.List; +import java.util.Set; + +public class AdmissionControlSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set<Setting<?>> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the admission controller settings should be supported built in settings", + settings.containsAll(List.of(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE)) + ); + } + + public void testDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + assertEquals(admissionControlSettings.getAdmissionControlTransportLayerMode().getMode(), AdmissionControlSettings.Defaults.MODE); + } + + public void testGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetDefaultSettings() { + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertFalse(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } + + public void testUpdateAfterGetConfiguredSettings() { + Settings settings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + .build(); + + AdmissionControlSettings admissionControlSettings = new AdmissionControlSettings(clusterService.getClusterSettings(), settings); + + Settings newSettings = Settings.builder() + .put(AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED.getMode()) + .build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnabled()); + assertTrue(admissionControlSettings.isTransportLayerAdmissionControlEnforced()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java new file mode 100644 index 0000000000000..5ea062c19489e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/AdmissionControlSingleNodeTests.java @@ -0,0 +1,424 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol; + +import org.apache.lucene.util.Constants; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.bulk.BulkRequestBuilder; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.IoBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControllerStats; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.After; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT; +import static org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.is; + +/** + * Single node integration tests for admission control + */ +public class AdmissionControlSingleNodeTests extends OpenSearchSingleNodeTestCase { + + public static final String INDEX_NAME = "test_index"; + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @After + public void cleanup() { + client().admin().indices().prepareDelete(INDEX_NAME).get(); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(ResourceTrackerSettings.GLOBAL_IO_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), TimeValue.timeValueMillis(5000)) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 0) + .build(); + } + + public void testAdmissionControlRejectionEnforcedMode() throws Exception { + ensureGreen(); + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + client().admin().indices().prepareCreate(INDEX_NAME).execute().actionGet(); + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // Verify that cluster state is updated + ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + // verify bulk request hits 429 + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 0, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request hits 429 + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); + try { + client().search(searchRequest).actionGet(); + } catch (Exception e) { + assertTrue(((SearchPhaseExecutionException) e).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 0, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 0) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + res = client().bulk(bulk.request()).actionGet(); + if (Constants.LINUX) { + assertEquals(429, res.getItems()[0].getFailure().getStatus().getStatus()); + } + admissionControlService = getInstanceFromNode(AdmissionControlService.class); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request hits 429 + searchRequest = new SearchRequest(INDEX_NAME); + try { + client().search(searchRequest).actionGet(); + } catch (Exception e) { + assertTrue(((SearchPhaseExecutionException) e).getDetailedMessage().contains("OpenSearchRejectedExecutionException")); + } + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + } + + public void testAdmissionControlRejectionMonitorOnlyMode() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.MONITOR.getMode()) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request success but admission control having rejections stats + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + + updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 0) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.INDEXING.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + searchRequest = new SearchRequest(INDEX_NAME); + searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals( + 1, + (long) acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .get(AdmissionControlActionType.SEARCH.getType()) + ); + if (Constants.LINUX) { + assertEquals( + 1, + (long) acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER) + .getRejectionCount() + .getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L) + ); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + } + + public void testAdmissionControlRejectionDisabledMode() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder().put(super.nodeSettings()).put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.DISABLED) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success and no rejections + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request success and no rejections + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 0) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 0) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success but admission control having rejections stats + res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + + searchRequest = new SearchRequest(INDEX_NAME); + searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + } + + public void testAdmissionControlWithinLimits() throws Exception { + assertBusy(() -> assertEquals(1, getInstanceFromNode(ResourceUsageCollectorService.class).getAllNodeStatistics().size())); + ActionFuture<ClusterStateResponse> future2 = client().admin().cluster().state(new ClusterStateRequest()); + assertThat(future2.isDone(), is(true)); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings( + Settings.builder() + .put(super.nodeSettings()) + .put(ADMISSION_CONTROL_TRANSPORT_LAYER_MODE.getKey(), AdmissionControlMode.ENFORCED) + .put(SEARCH_CPU_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_CPU_USAGE_LIMIT.getKey(), 101) + .put(SEARCH_IO_USAGE_LIMIT.getKey(), 101) + .put(INDEXING_IO_USAGE_LIMIT.getKey(), 101) + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + BulkRequestBuilder bulk = client().prepareBulk(); + for (int i = 0; i < 3; i++) { + bulk.add(client().prepareIndex(INDEX_NAME).setSource("foo", "bar " + i)); + } + // verify bulk request success and no rejections + BulkResponse res = client().bulk(bulk.request()).actionGet(); + assertFalse(res.hasFailures()); + AdmissionControlService admissionControlService = getInstanceFromNode(AdmissionControlService.class); + Map<String, AdmissionControllerStats> acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + client().admin().indices().prepareRefresh(INDEX_NAME).get(); + + // verify search request success and no rejections + SearchRequest searchRequest = new SearchRequest(INDEX_NAME); + SearchResponse searchResponse = client().search(searchRequest).actionGet(); + assertEquals(3, searchResponse.getHits().getHits().length); + acStats = this.getAdmissionControlStats(admissionControlService); + assertEquals(0, acStats.get(CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + if (Constants.LINUX) { + assertEquals(0, acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER).getRejectionCount().size()); + } else { + assertNull(acStats.get(IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER)); + } + } + + Map<String, AdmissionControllerStats> getAdmissionControlStats(AdmissionControlService admissionControlService) { + Map<String, AdmissionControllerStats> acStats = new HashMap<>(); + for (AdmissionControllerStats admissionControllerStats : admissionControlService.stats().getAdmissionControllerStatsList()) { + acStats.put(admissionControllerStats.getAdmissionControllerName(), admissionControllerStats); + } + return acStats; + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..e72c0cd58ed64 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/CpuBasedAdmissionControllerTests.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import org.mockito.Mockito; + +public class CpuBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + CpuBasedAdmissionController admissionController = null; + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getName(), CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + + assertEquals(admissionController.getName(), CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action, AdmissionControlActionType.INDEXING); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testApplyControllerWhenSettingsEnabled() throws Exception { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertTrue( + admissionController.isAdmissionControllerEnforced(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + // we can assert admission control and rejections as part of ITs + } + + public void testRejectionCount() { + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 3); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 1); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 3); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 5); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java new file mode 100644 index 0000000000000..c5a2208f49ce6 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/controllers/IoBasedAdmissionControllerTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.controllers; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.IoBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import org.mockito.Mockito; + +public class IoBasedAdmissionControllerTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + IoBasedAdmissionController admissionController = null; + String action = "TEST_ACTION"; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testCheckDefaultParameters() { + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getName(), IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertFalse( + admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + } + + public void testCheckUpdateSettings() { + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + null, + clusterService, + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals(admissionController.getName(), IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + } + + public void testApplyControllerWithDefaultSettings() { + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + Settings.EMPTY + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + assertEquals(admissionController.settings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + action = "indices:data/write/bulk[s][p]"; + admissionController.apply(action, AdmissionControlActionType.INDEXING); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testApplyControllerWhenSettingsEnabled() throws Exception { + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + assertTrue(admissionController.isEnabledForTransportLayer(admissionController.settings.getTransportLayerAdmissionControllerMode())); + assertTrue( + admissionController.isAdmissionControllerEnforced(admissionController.settings.getTransportLayerAdmissionControllerMode()) + ); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 0); + } + + public void testRejectionCount() { + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + ResourceUsageCollectorService rs = Mockito.mock(ResourceUsageCollectorService.class); + admissionController = new IoBasedAdmissionController( + IoBasedAdmissionController.IO_BASED_ADMISSION_CONTROLLER, + rs, + clusterService, + settings + ); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 3); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 1); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 3); + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 1); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.SEARCH.getType()), 2); + assertEquals(admissionController.getRejectionCount(AdmissionControlActionType.INDEXING.getType()), 5); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java new file mode 100644 index 0000000000000..15a25e6cbca1c --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlActionTypeTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class AdmissionControlActionTypeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(AdmissionControlActionType.SEARCH.getType(), "search"); + assertEquals(AdmissionControlActionType.INDEXING.getType(), "indexing"); + assertEquals(AdmissionControlActionType.fromName("search"), AdmissionControlActionType.SEARCH); + assertEquals(AdmissionControlActionType.fromName("indexing"), AdmissionControlActionType.INDEXING); + } + + public void testInValidActionType() { + String name = "test"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> AdmissionControlActionType.fromName(name)); + assertEquals(ex.getMessage(), "Not Supported TransportAction Type: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java new file mode 100644 index 0000000000000..98c0f3c7cf24c --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/enums/AdmissionControlModeTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.enums; + +import org.opensearch.test.OpenSearchTestCase; + +public class AdmissionControlModeTests extends OpenSearchTestCase { + + public void testValidActionType() { + assertEquals(AdmissionControlMode.DISABLED.getMode(), "disabled"); + assertEquals(AdmissionControlMode.ENFORCED.getMode(), "enforced"); + assertEquals(AdmissionControlMode.MONITOR.getMode(), "monitor_only"); + assertEquals(AdmissionControlMode.fromName("disabled"), AdmissionControlMode.DISABLED); + assertEquals(AdmissionControlMode.fromName("enforced"), AdmissionControlMode.ENFORCED); + assertEquals(AdmissionControlMode.fromName("monitor_only"), AdmissionControlMode.MONITOR); + } + + public void testInValidActionType() { + String name = "TEST"; + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> AdmissionControlMode.fromName(name)); + assertEquals(ex.getMessage(), "Invalid AdmissionControlMode: " + name); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java new file mode 100644 index 0000000000000..9ce28bc7fdb40 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/CPUBasedAdmissionControllerSettingsTests.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Set; + +public class CPUBasedAdmissionControllerSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set<Setting<?>> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the cpu based admission controller settings should be supported built in settings", + settings.containsAll( + Arrays.asList( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT + ) + ) + ); + } + + public void testDefaultSettings() { + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + long percent = 95; + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + } + + public void testGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), percent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } + + public void testUpdateAfterGetDefaultSettings() { + long percent = 95; + long searchPercent = 80; + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(settings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + } + + public void testUpdateAfterGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + long searchPercent = 80; + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + CpuBasedAdmissionControllerSettings cpuBasedAdmissionControllerSettings = new CpuBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), percent); + + Settings updatedSettings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(cpuBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + + searchPercent = 70; + + updatedSettings = Settings.builder() + .put(updatedSettings) + .put(CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT.getKey(), searchPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + + assertEquals(cpuBasedAdmissionControllerSettings.getSearchCPULimit().longValue(), searchPercent); + assertEquals(cpuBasedAdmissionControllerSettings.getIndexingCPULimit().longValue(), indexingPercent); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java new file mode 100644 index 0000000000000..ff777c175ec0e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/settings/IoBasedAdmissionControllerSettingsTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.settings; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Set; + +public class IoBasedAdmissionControllerSettingsTests extends OpenSearchTestCase { + private ClusterService clusterService; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("io_based_admission_controller_settings_test"); + clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testSettingsExists() { + Set<Setting<?>> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + assertTrue( + "All the IO based admission controller settings should be supported built in settings", + settings.containsAll( + Arrays.asList( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, + IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT, + IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT + ) + ) + ); + } + + public void testDefaultSettings() { + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + long percent = 95; + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.DISABLED); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + } + + public void testGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), percent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + } + + public void testUpdateAfterGetDefaultSettings() { + long percent = 95; + long searchPercent = 80; + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + Settings.EMPTY + ); + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + clusterService.getClusterSettings().applySettings(settings); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + } + + public void testUpdateAfterGetConfiguredSettings() { + long percent = 95; + long indexingPercent = 85; + long searchPercent = 80; + Settings settings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + IoBasedAdmissionControllerSettings ioBasedAdmissionControllerSettings = new IoBasedAdmissionControllerSettings( + clusterService.getClusterSettings(), + settings + ); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.ENFORCED); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), percent); + + Settings updatedSettings = Settings.builder() + .put( + IoBasedAdmissionControllerSettings.IO_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.MONITOR.getMode() + ) + .put(IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT.getKey(), indexingPercent) + .build(); + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(ioBasedAdmissionControllerSettings.getTransportLayerAdmissionControllerMode(), AdmissionControlMode.MONITOR); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + + searchPercent = 70; + updatedSettings = Settings.builder() + .put(updatedSettings) + .put(IoBasedAdmissionControllerSettings.SEARCH_IO_USAGE_LIMIT.getKey(), searchPercent) + .build(); + + clusterService.getClusterSettings().applySettings(updatedSettings); + assertEquals(ioBasedAdmissionControllerSettings.getSearchIOUsageLimit().longValue(), searchPercent); + assertEquals(ioBasedAdmissionControllerSettings.getIndexingIOUsageLimit().longValue(), indexingPercent); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java new file mode 100644 index 0000000000000..7b4db5f787d6e --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControlStatsTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class AdmissionControlStatsTests extends OpenSearchTestCase { + AdmissionController admissionController; + AdmissionControllerStats admissionControllerStats; + AdmissionControlStats admissionControlStats; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + ClusterService clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + admissionController = new CpuBasedAdmissionController( + CpuBasedAdmissionController.CPU_BASED_ADMISSION_CONTROLLER, + mock(ResourceUsageCollectorService.class), + clusterService, + settings + ); + admissionControllerStats = new AdmissionControllerStats(admissionController); + List<AdmissionControllerStats> admissionControllerStats = new ArrayList<>(); + admissionControlStats = new AdmissionControlStats(admissionControllerStats); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testDefaults() throws IOException { + assertEquals(admissionControlStats.getAdmissionControllerStatsList().size(), 0); + } + + public void testRejectionCount() throws IOException { + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 11); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 1); + admissionControllerStats = new AdmissionControllerStats(admissionController); + admissionControlStats = new AdmissionControlStats(List.of(admissionControllerStats)); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder = admissionControlStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String response = builder.toString(); + assertEquals( + response, + "{\"admission_control\":{\"global_cpu_usage\":{\"transport\":{\"rejection_count\":{\"search\":11,\"indexing\":1}}}}}" + ); + AdmissionControlStats admissionControlStats1 = admissionControlStats; + assertEquals(admissionControlStats.hashCode(), admissionControlStats1.hashCode()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java new file mode 100644 index 0000000000000..fe0399e79a5f4 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/stats/AdmissionControllerStatsTests.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.stats; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.node.ResourceUsageCollectorService; +import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; +import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlMode; +import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; + +public class AdmissionControllerStatsTests extends OpenSearchTestCase { + AdmissionController admissionController; + AdmissionControllerStats admissionControllerStats; + private ThreadPool threadPool; + + @Override + public void setUp() throws Exception { + super.setUp(); + Settings settings = Settings.builder() + .put( + CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE.getKey(), + AdmissionControlMode.ENFORCED.getMode() + ) + .build(); + threadPool = new TestThreadPool("admission_controller_settings_test"); + ClusterService clusterService = new ClusterService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + admissionController = new CpuBasedAdmissionController("TEST", mock(ResourceUsageCollectorService.class), clusterService, settings); + admissionControllerStats = new AdmissionControllerStats(admissionController); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testDefaults() throws IOException { + assertEquals(admissionControllerStats.getRejectionCount().size(), 0); + assertEquals(admissionControllerStats.getAdmissionControllerName(), "TEST"); + } + + public void testRejectionCount() throws IOException { + admissionController.addRejectionCount(AdmissionControlActionType.SEARCH.getType(), 11); + admissionController.addRejectionCount(AdmissionControlActionType.INDEXING.getType(), 1); + admissionControllerStats = new AdmissionControllerStats(admissionController); + long searchRejection = admissionControllerStats.getRejectionCount().getOrDefault(AdmissionControlActionType.SEARCH.getType(), 0L); + long indexingRejection = admissionControllerStats.getRejectionCount() + .getOrDefault(AdmissionControlActionType.INDEXING.getType(), 0L); + assertEquals(searchRejection, 11); + assertEquals(indexingRejection, 1); + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = admissionControllerStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + String response = builder.toString(); + assertEquals(response, "{\"transport\":{\"rejection_count\":{\"search\":11,\"indexing\":1}}}"); + AdmissionControllerStats admissionControllerStats1 = admissionControllerStats; + assertEquals(admissionControllerStats.hashCode(), admissionControllerStats1.hashCode()); + } +} diff --git a/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java new file mode 100644 index 0000000000000..0c95769e19489 --- /dev/null +++ b/server/src/test/java/org/opensearch/ratelimitting/admissioncontrol/transport/AdmissionControlTransportHandlerTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ratelimitting.admissioncontrol.transport; + +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; +import org.opensearch.tasks.Task; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; + +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class AdmissionControlTransportHandlerTests extends OpenSearchTestCase { + AdmissionControlTransportHandler<TransportRequest> admissionControlTransportHandler; + + public void testHandlerInvoked() throws Exception { + String action = "TEST"; + InterceptingRequestHandler<TransportRequest> handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler<TransportRequest>( + action, + handler, + mock(AdmissionControlService.class), + false, + null + ); + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRejectedException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new OpenSearchRejectedExecutionException()).when(admissionControlService).applyTransportAdmissionControl(action, null); + InterceptingRequestHandler<TransportRequest> handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler<TransportRequest>( + action, + handler, + admissionControlService, + false, + null + ); + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + assertEquals(1, handler.count); + } + + public void testHandlerInvokedRandomException() throws Exception { + String action = "TEST"; + AdmissionControlService admissionControlService = mock(AdmissionControlService.class); + doThrow(new NullPointerException()).when(admissionControlService).applyTransportAdmissionControl(action, null); + InterceptingRequestHandler<TransportRequest> handler = new InterceptingRequestHandler<>(action); + admissionControlTransportHandler = new AdmissionControlTransportHandler<TransportRequest>( + action, + handler, + admissionControlService, + false, + null + ); + try { + admissionControlTransportHandler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } catch (Exception exception) { + assertEquals(0, handler.count); + handler.messageReceived(mock(TransportRequest.class), mock(TransportChannel.class), mock(Task.class)); + } + assertEquals(1, handler.count); + } + + private class InterceptingRequestHandler<T extends TransportRequest> implements TransportRequestHandler<T> { + private final String action; + public int count; + + public InterceptingRequestHandler(String action) { + this.action = action; + this.count = 0; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + this.count = this.count + 1; + } + } +} diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 776173f73ce5c..fb4dc97435512 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -34,18 +34,18 @@ import org.opensearch.OpenSearchException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.SegmentReplicationSource; import org.opensearch.indices.replication.SegmentReplicationTarget; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; -import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationTarget; import java.util.concurrent.CountDownLatch; @@ -120,11 +120,13 @@ public void testStartMultipleReplicationsForSingleShard() throws Exception { shards.recoverReplica(shard); final SegmentReplicationTarget target1 = new SegmentReplicationTarget( shard, + shards.getPrimary().getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), mock(ReplicationListener.class) ); final SegmentReplicationTarget target2 = new SegmentReplicationTarget( shard, + shards.getPrimary().getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), mock(ReplicationListener.class) ); diff --git a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java index 0e27dbded01bf..2b927b3b40115 100644 --- a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java +++ b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java @@ -33,12 +33,12 @@ package org.opensearch.repositories; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java b/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java index fda330ba7a7f3..1e2d72e4a91fd 100644 --- a/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java +++ b/server/src/test/java/org/opensearch/repositories/IndexMetadataGenerationsTests.java @@ -8,9 +8,9 @@ package org.opensearch.repositories; -import org.junit.Before; import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; import java.util.Collections; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6cbbb4ffcb7c7..43ebb86fd5342 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -34,61 +34,90 @@ import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; -import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleListener; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.mapper.MapperService; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RepositoriesServiceTests extends OpenSearchTestCase { private RepositoriesService repositoriesService; + private final String kpTypeA = "kp-type-a"; + private final String kpTypeB = "kp-type-b"; @Override public void setUp() throws Exception { super.setUp(); + ThreadPool threadPool = mock(ThreadPool.class); + final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); + when(clusterApplierService.threadPool()).thenReturn(threadPool); + final ClusterService clusterService = mock(ClusterService.class); + repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + } + + private RepositoriesService createRepositoriesServiceWithMockedClusterService(ClusterService clusterService) { ThreadPool threadPool = mock(ThreadPool.class); final TransportService transportService = new TransportService( Settings.EMPTY, @@ -97,11 +126,11 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); when(clusterApplierService.threadPool()).thenReturn(threadPool); - final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); Map<String, Repository.Factory> typesRegistry = Map.of( TestRepository.TYPE, @@ -111,15 +140,24 @@ public void setUp() throws Exception { MeteredRepositoryTypeB.TYPE, metadata -> new MeteredRepositoryTypeB(metadata, clusterService) ); - repositoriesService = new RepositoriesService( + + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(nodes.getMinNodeVersion()).thenReturn(Version.V_2_9_0); + ClusterState currentClusterState = mock(ClusterState.class); + when(currentClusterState.getNodes()).thenReturn(nodes); + when(clusterService.state()).thenReturn(currentClusterState); + + RepositoriesService repositoriesService = new RepositoriesService( Settings.EMPTY, - mock(ClusterService.class), + clusterService, transportService, typesRegistry, typesRegistry, threadPool ); + repositoriesService.start(); + return repositoriesService; } public void testRegisterInternalRepository() { @@ -164,6 +202,13 @@ public void testRegisterRejectsInvalidRepositoryNames() { } } + public void testUpdateOrRegisterRejectsForSystemRepository() { + String repoName = "name"; + PutRepositoryRequest request = new PutRepositoryRequest(repoName); + request.settings(Settings.builder().put(SYSTEM_REPOSITORY_SETTING.getKey(), true).build()); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { String repoName = "name"; expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); @@ -174,20 +219,265 @@ public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", emptyState(), clusterStateWithRepoTypeA)); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(0)); ClusterState clusterStateWithRepoTypeB = createClusterStateWithRepo(repoName, MeteredRepositoryTypeB.TYPE); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); List<RepositoryStatsSnapshot> repositoriesStats = repositoriesService.repositoriesStats(); - assertThat(repositoriesStats.size(), equalTo(2)); + assertThat(repositoriesStats.size(), equalTo(1)); RepositoryStatsSnapshot repositoryStatsTypeA = repositoriesStats.get(0); - assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeA.TYPE)); - assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeA.STATS)); + assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); + assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); + + } + + public void testWithSameKeyProviderNames() { + String keyProviderName = "kp-name"; + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + ClusterState clusterStateWithRepoTypeB = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeB.TYPE, + keyProviderName, + kpTypeA + ); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeB repositoryB = (MeteredRepositoryTypeB) repositoriesService.repository("repoName"); + assertNotNull(repositoryB); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + } + + public void testCryptoManagersUnchangedWithSameCryptoMetadata() { + String keyProviderName = "kp-name"; + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + } + + public void testRepositoryUpdateWithDifferentCryptoMetadata() { + String keyProviderName = "kp-name"; + + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + ClusterService clusterService = mock(ClusterService.class); + + PutRepositoryRequest request = new PutRepositoryRequest("repoName"); + request.type(MeteredRepositoryTypeA.TYPE); + request.settings(Settings.EMPTY); + + doAnswer((invocation) -> { + AckedClusterStateUpdateTask<ClusterStateUpdateResponse> task = (AckedClusterStateUpdateTask< + ClusterStateUpdateResponse>) invocation.getArguments()[1]; + task.execute(clusterStateWithRepoTypeA); + return null; + }).when(clusterService).submitStateUpdateTask(any(), any()); + + RepositoriesService repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); + cryptoSettings.keyProviderType(kpTypeA); + cryptoSettings.settings(Settings.builder().put("key-1", "val-1")); + request.cryptoSettings(cryptoSettings); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + cryptoSettings.settings(Settings.builder()); + cryptoSettings.keyProviderName("random"); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + cryptoSettings.keyProviderName(keyProviderName); + + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + repositoriesService.registerOrUpdateRepository(request, null); + } + + public void testCryptoManagerClusterStateChanges() { + + ClusterService clusterService = mock(ClusterService.class); + AtomicBoolean verified = new AtomicBoolean(); + List<RepositoryMetadata> repositoryMetadata = new ArrayList<>(); + + String keyProviderName = "kp-name-1"; + String repoName = "repoName"; + String keyProviderType = kpTypeA; + Settings.Builder settings = Settings.builder(); + PutRepositoryRequest request = createPutRepositoryEncryptedRequest( + repoName, + MeteredRepositoryTypeA.TYPE, + keyProviderName, + settings, + keyProviderType + ); + verified.set(false); + RepositoriesService repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // No change + keyProviderType = kpTypeA; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // Same crypto client in new repo + repoName = "repoName-2"; + keyProviderType = kpTypeA; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // Different crypto client in new repo + repoName = "repoName-3"; + keyProviderType = kpTypeB; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeB, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + } + + private RepositoriesService createRepositoriesServiceAndMockCryptoClusterState( + ClusterService clusterService, + String repoName, + String keyProviderName, + String keyProviderType, + Settings settings, + AtomicBoolean verified, + List<RepositoryMetadata> repositoryMetadataList + ) { + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + CryptoMetadata newCryptoMetadata = new CryptoMetadata(keyProviderName, keyProviderType, Settings.EMPTY); + Metadata.Builder mdBuilder = Metadata.builder(); + + RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + repoName, + MeteredRepositoryTypeA.TYPE, + Settings.EMPTY, + newCryptoMetadata + ); + if (!repositoryMetadataList.contains(newRepositoryMetadata)) { + repositoryMetadataList.add(newRepositoryMetadata); + } + RepositoriesMetadata newRepositoriesMetadata = new RepositoriesMetadata(repositoryMetadataList); + mdBuilder.putCustom(RepositoriesMetadata.TYPE, newRepositoriesMetadata); + state.metadata(mdBuilder); + ClusterState clusterStateWithRepoTypeA = state.build(); + + RepositoriesService repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + + doAnswer((invocation) -> { + AckedClusterStateUpdateTask<ClusterStateUpdateResponse> task = (AckedClusterStateUpdateTask< + ClusterStateUpdateResponse>) invocation.getArguments()[1]; + ClusterState clusterState = task.execute(clusterStateWithRepoTypeA); + RepositoriesMetadata repositories = clusterState.metadata().custom(RepositoriesMetadata.TYPE); + RepositoryMetadata repositoryMetadata = repositories.repositories().get(repositoryMetadataList.size() - 1); + CryptoMetadata cryptoMetadata = repositoryMetadata.cryptoMetadata(); + assertNotNull(cryptoMetadata); + assertEquals(keyProviderName, cryptoMetadata.keyProviderName()); + assertEquals(keyProviderType, cryptoMetadata.keyProviderType()); + assertEquals(cryptoMetadata.settings(), settings); + verified.set(true); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + return null; + }).when(clusterService).submitStateUpdateTask(any(), any()); - RepositoryStatsSnapshot repositoryStatsTypeB = repositoriesStats.get(1); - assertThat(repositoryStatsTypeB.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); - assertThat(repositoryStatsTypeB.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); + return repositoriesService; } private ClusterState createClusterStateWithRepo(String repoName, String repoType) { @@ -202,13 +492,136 @@ private ClusterState createClusterStateWithRepo(String repoName, String repoType return state.build(); } + private ClusterState createClusterStateWithKeyProvider( + String repoName, + String repoType, + String keyProviderName, + String keyProviderType + ) { + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + Metadata.Builder mdBuilder = Metadata.builder(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(keyProviderName, keyProviderType, Settings.EMPTY); + mdBuilder.putCustom( + RepositoriesMetadata.TYPE, + new RepositoriesMetadata(Collections.singletonList(new RepositoryMetadata(repoName, repoType, Settings.EMPTY, cryptoMetadata))) + ); + state.metadata(mdBuilder); + + return state.build(); + } + + private PutRepositoryRequest createPutRepositoryEncryptedRequest( + String repoName, + String repoType, + String keyProviderName, + Settings.Builder settings, + String keyProviderType + ) { + PutRepositoryRequest repositoryRequest = new PutRepositoryRequest(repoName); + repositoryRequest.type(repoType); + repositoryRequest.settings(Settings.EMPTY); + CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); + cryptoSettings.keyProviderName(keyProviderName); + cryptoSettings.keyProviderType(keyProviderType); + cryptoSettings.settings(settings); + repositoryRequest.cryptoSettings(cryptoSettings); + + return repositoryRequest; + } + private ClusterState emptyState() { return ClusterState.builder(new ClusterName("test")).build(); } private void assertThrowsOnRegister(String repoName) { PutRepositoryRequest request = new PutRepositoryRequest(repoName); - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + + private static class TestCryptoProvider implements CryptoHandler<Object, Object> { + final String kpName; + final String kpType; + + public TestCryptoProvider(String kpName, String kpType) { + this.kpName = kpName; + this.kpType = kpType; + } + + @Override + public Object initEncryptionMetadata() { + return new Object(); + } + + @Override + public long adjustContentSizeForPartialEncryption(Object cryptoContextObj, long contentSize) { + return 0; + } + + @Override + public long estimateEncryptedLengthOfEntireContent(Object cryptoContextObj, long contentLength) { + return 0; + } + + @Override + public InputStreamContainer createEncryptingStream(Object encryptionMetadata, InputStreamContainer streamContainer) { + return null; + } + + @Override + public InputStreamContainer createEncryptingStreamOfPart( + Object cryptoContextObj, + InputStreamContainer stream, + int totalStreams, + int streamIdx + ) { + return null; + } + + @Override + public InputStream createDecryptingStream(InputStream encryptingStream) { + return null; + } + + @Override + public Object loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException { + return null; + } + + @Override + public DecryptedRangedStreamProvider createDecryptingStreamOfRange( + Object cryptoContext, + long startPosOfRawContent, + long endPosOfRawContent + ) { + return null; + } + + @Override + public long estimateDecryptedLength(Object cryptoContext, long contentLength) { + return 0; + } + + @Override + public void close() throws IOException { + + } + } + + private static abstract class TestCryptoHandler implements CryptoPlugin<Object, Object> { + private final Settings settings; + + public TestCryptoHandler(Settings settings) { + this.settings = settings; + } + + public CryptoHandler<Object, Object> getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return new TestCryptoProvider(keyProviderName, keyProviderType); + } } private static class TestRepository implements Repository { @@ -281,6 +694,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; @@ -301,6 +724,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, @@ -425,16 +853,19 @@ public void close() { private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository { private static final String TYPE = "type-a"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("GET", 10L)); + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-a") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-a")); + + if (metadata.cryptoMetadata() != null) { + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); + } else { + cryptoHandler = null; + } } @Override @@ -456,16 +887,19 @@ public BlobPath basePath() { private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository { private static final String TYPE = "type-b"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("LIST", 20L)); + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-b") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-b")); + + if (metadata.cryptoMetadata() != null) { + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); + } else { + cryptoHandler = null; + } } @Override diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java index cf0b06a3f7d16..da0cbcb1d4b17 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -122,14 +121,11 @@ private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repository private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repositoryStats, long clusterVersion) { RepositoryInfo repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), randomAlphaOfLength(10), randomAlphaOfLength(10), - Map.of("bucket", randomAlphaOfLength(10)), - System.currentTimeMillis(), - null + Map.of("bucket", randomAlphaOfLength(10)) ); - return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion, true); + return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java index 26e4a2844a4ce..46293e6a0db7a 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java @@ -35,12 +35,13 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchTestCase; @@ -296,7 +297,7 @@ public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { } public void testIndexThatReferenceANullSnapshot() throws IOException { - final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent()); + final XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); { builder.startArray("snapshots"); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java new file mode 100644 index 0000000000000..57c126b85ff70 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class BlobStoreRepositoryHelperTests extends OpenSearchSingleNodeTestCase { + + static final String REPO_TYPE = "fsLike"; + + protected Collection<Class<? extends Plugin>> getPlugins() { + return Arrays.asList(FsLikeRepoPlugin.class); + } + + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); + BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + return Arrays.stream(lockDirectory.listAll()) + .filter(lock -> lock.endsWith(".lock") || lock.endsWith(".v2_lock")) + .toArray(String[]::new); + } + } + + // the reason for this plug-in is to drop any assertSnapshotOrGenericThread as mostly all access in this test goes from test threads + public static class FsLikeRepoPlugin extends Plugin implements RepositoryPlugin { + + @Override + public Map<String, Repository.Factory> getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + REPO_TYPE, + (metadata) -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we access blobStore on test/main threads + } + } + ); + } + } + + protected void createRepository(Client client, String repoName) { + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository(repoName) + .setType(REPO_TYPE) + .setSettings( + Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + ) + .get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + protected void createRepository(Client client, String repoName, Settings repoSettings) { + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository(repoName) + .setType(REPO_TYPE) + .setSettings(repoSettings) + .get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + protected void updateRepository(Client client, String repoName, Settings repoSettings) { + createRepository(client, repoName, repoSettings); + } + + protected Settings getRemoteStoreBackedIndexSettings() { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.refresh_interval", "300s") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, List<String> indices) { + logger.info("--> creating snapshot [{}] of {} in [{}]", snapshot, indices, repositoryName); + + final CreateSnapshotResponse response = client().admin() + .cluster() + .prepareCreateSnapshot(repositoryName, snapshot) + .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) + .setWaitForCompletion(true) + .get(); + SnapshotInfo snapshotInfo = response.getSnapshotInfo(); + assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.failedShards(), equalTo(0)); + return snapshotInfo; + } + + protected void indexDocuments(Client client, String indexName) { + int numDocs = randomIntBetween(10, 20); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + } + + protected IndexSettings getIndexSettings(String indexName) { + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); + return indexService.getIndexSettings(); + } + +} diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java new file mode 100644 index 0000000000000..9cca495cced72 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -0,0 +1,376 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.repositories.blobstore; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests for the {@link BlobStoreRepository} and its subclasses. + */ +public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { + + @Override + protected Settings nodeSettings() { + Path tempDir = createTempDir(); + return Settings.builder() + .put(super.nodeSettings()) + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(buildRemoteStoreNodeAttributes("test-rs-repo", tempDir.resolve("repo"))) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), tempDir.getParent()) + .build(); + } + + private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) { + String repoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String repoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .build(); + } + + // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot + public void testRetrieveShallowCopySnapshotCase1() throws IOException { + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating an index and indexing documents"); + final String indexName = "test-idx"; + createIndex(indexName); + ensureGreen(); + indexDocuments(client, indexName); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create first snapshot"); + SnapshotInfo snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-1", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("there should be no lock files present in directory, but found " + Arrays.toString(lockFiles), 0, lockFiles.length); + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-2", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); + + logger.info("--> create another normal snapshot"); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-3", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); + + logger.info("--> make sure the node's repository can resolve the snapshots"); + final List<SnapshotId> originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + + List<SnapshotId> snapshotIds = repositoryData.getSnapshotIds() + .stream() + .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) + .collect(Collectors.toList()); + assertThat(snapshotIds, equalTo(originalSnapshots)); + + // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId2, + indexId, + new ShardId(remoteStoreIndexName, indexId.getId(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + } + + public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + SnapshotInfo snapshotInfo = createSnapshot(snapshotRepositoryName, "test-snap-2", new ArrayList<>(List.of(remoteStoreIndexName))); + final SnapshotId snapshotId = snapshotInfo.snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId.getUUID() + ".v2_lock")); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId, + indexId, + new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); + assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); + } + + // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot + // -> remoteStoreShallowCopy Snapshot -> normal snapshot + public void testRetrieveShallowCopySnapshotCase2() throws IOException { + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() + .cluster() + .prepareGetRepositories(snapshotRepositoryName) + .get(); + + RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); + + assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + logger.info("--> creating an index and indexing documents"); + final String indexName = "test-idx"; + createIndex(indexName); + ensureGreen(); + indexDocuments(client, indexName); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create first remote index shallow snapshot"); + + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + updatedGetRepositoriesResponse = client.admin().cluster().prepareGetRepositories(snapshotRepositoryName).get(); + + updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); + + assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + SnapshotInfo snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-1", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId1 = snapshotInfo.snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("lock files are " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId1.getUUID() + ".v2_lock")); + + logger.info("--> create second remote index shallow snapshot"); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-2", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId2 = snapshotInfo.snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("lock files are " + Arrays.toString(lockFiles), 2, lockFiles.length); + List<SnapshotId> shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assertTrue(lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID())); + } + logger.info("--> create third remote index shallow snapshot"); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-3", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId3 = snapshotInfo.snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals(3, lockFiles.length); + shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); + } + logger.info("--> create normal snapshot"); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + snapshotInfo = createSnapshot( + snapshotRepositoryName, + "test-snap-4", + new ArrayList<>(Arrays.asList(indexName, remoteStoreIndexName)) + ); + final SnapshotId snapshotId4 = snapshotInfo.snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assertEquals("lock files are " + Arrays.toString(lockFiles), 3, lockFiles.length); + shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); + } + + logger.info("--> make sure the node's repository can resolve the snapshots"); + final List<SnapshotId> originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3, snapshotId4); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + List<SnapshotId> snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) + .getSnapshotIds() + .stream() + .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) + .collect(Collectors.toList()); + assertThat(snapshotIds, equalTo(originalSnapshots)); + } + +} diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 93958ce1ba7fb..e4e83f2453fa2 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -46,6 +46,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; import org.opensearch.index.engine.EngineConfigFactory; @@ -54,8 +56,6 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 28513f279f8ad..b76e01d6d4c82 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -32,33 +32,21 @@ package org.opensearch.repositories.blobstore; -import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.Version; -import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; -import org.opensearch.index.IndexModule; -import org.opensearch.index.IndexService; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; -import org.opensearch.index.store.RemoteBufferedOutputDirectory; -import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.IndexId; @@ -70,29 +58,26 @@ import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.OpenSearchSingleNodeTestCase; -import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.repositories.RepositoryDataTests.generateRandomRepoData; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -import static org.opensearch.repositories.RepositoryDataTests.generateRandomRepoData; /** * Tests for the {@link BlobStoreRepository} and its subclasses. */ -@LuceneTestCase.SuppressFileSystems("ExtrasFS") -public class BlobStoreRepositoryTests extends OpenSearchSingleNodeTestCase { +public class BlobStoreRepositoryTests extends BlobStoreRepositoryHelperTests { static final String REPO_TYPE = "fsLike"; @@ -122,11 +107,6 @@ protected void assertSnapshotOrGenericThread() { } } - @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); @@ -183,367 +163,6 @@ public void testRetrieveSnapshots() throws Exception { assertThat(snapshotIds, equalTo(originalSnapshots)); } - private void createRepository(Client client, String repoName) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - } - - private void createRepository(Client client, String repoName, Settings repoSettings) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings(repoSettings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - } - - private void updateRepository(Client client, String repoName, Settings repoSettings) { - createRepository(client, repoName, repoSettings); - } - - private Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { - return Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put("index.refresh_interval", "300s") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) - .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) - .build(); - } - - private void indexDocuments(Client client, String indexName) { - int numDocs = randomIntBetween(10, 20); - for (int i = 0; i < numDocs; i++) { - String id = Integer.toString(i); - client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); - } - client.admin().indices().prepareFlush(indexName).get(); - } - - private String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); - BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); - BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); - try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { - return Arrays.stream(lockDirectory.listAll()).filter(lock -> lock.endsWith(".lock")).toArray(String[]::new); - } - } - - // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot - public void testRetrieveShallowCopySnapshotCase1() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - - logger.info("--> creating an index and indexing documents"); - final String indexName = "test-idx"; - createIndex(indexName); - ensureGreen(); - indexDocuments(client, indexName); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create first snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); - logger.info("--> create remote index shallow snapshot"); - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); - - logger.info("--> create another normal snapshot"); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); - - logger.info("--> make sure the node's repository can resolve the snapshots"); - final List<SnapshotId> originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); - IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); - - List<SnapshotId> snapshotIds = repositoryData.getSnapshotIds() - .stream() - .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) - .collect(Collectors.toList()); - assertThat(snapshotIds, equalTo(originalSnapshots)); - - // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata - RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( - snapshotId2, - indexId, - new ShardId(remoteStoreIndexName, indexId.getId(), 0) - ); - assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); - } - - public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create remote index shallow snapshot"); - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(remoteStoreIndexName) - .get(); - final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); - IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); - IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); - RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( - snapshotId, - indexId, - new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) - ); - assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); - assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); - assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); - } - - private IndexSettings getIndexSettings(String indexName) { - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); - return indexService.getIndexSettings(); - } - - // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot - // -> remoteStoreShallowCopy Snapshot -> normal snapshot - public void testRetrieveShallowCopySnapshotCase2() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() - .cluster() - .prepareGetRepositories(snapshotRepositoryName) - .get(); - - RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); - - assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - - logger.info("--> creating remote store repository"); - createRepository(client, remoteStoreRepositoryName); - - logger.info("--> creating an index and indexing documents"); - final String indexName = "test-idx"; - createIndex(indexName); - ensureGreen(); - indexDocuments(client, indexName); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create first remote index shallow snapshot"); - - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - updatedGetRepositoriesResponse = client.admin().cluster().prepareGetRepositories(snapshotRepositoryName).get(); - - updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); - - assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); - - logger.info("--> create second remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); - List<SnapshotId> shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); - } - logger.info("--> create third remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3); - shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); - } - logger.info("--> create normal snapshot"); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-4") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId4 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); - shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); - } - - logger.info("--> make sure the node's repository can resolve the snapshots"); - final List<SnapshotId> originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3, snapshotId4); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - List<SnapshotId> snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) - .getSnapshotIds() - .stream() - .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) - .collect(Collectors.toList()); - assertThat(snapshotIds, equalTo(originalSnapshots)); - } - public void testReadAndWriteSnapshotsThroughIndexFile() throws Exception { final BlobStoreRepository repository = setupRepo(); final long pendingGeneration = repository.metadata.pendingGeneration(); @@ -636,7 +255,7 @@ public void testBadChunksize() throws Exception { ); } - public void testFsRepositoryCompressDeprecated() { + public void testFsRepositoryCompressDeprecatedIgnored() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); final RepositoryMetadata metadata = new RepositoryMetadata("test-repo", REPO_TYPE, settings); @@ -649,10 +268,7 @@ public void testFsRepositoryCompressDeprecated() { new FsRepository(metadata, useCompressEnvironment, null, BlobStoreTestUtil.mockClusterService(), null); - assertWarnings( - "[repositories.fs.compress] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version." - ); + assertNoDeprecationWarnings(); } private static void writeIndexGen(BlobStoreRepository repository, RepositoryData repositoryData, long generation) throws Exception { @@ -705,4 +321,38 @@ private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boo return repoData; } + public void testRemoteStoreShardCleanupTask() { + // todo: move it to separate class and add more scenarios. + AtomicBoolean executed1 = new AtomicBoolean(false); + Runnable task1 = () -> executed1.set(true); + String indexName = "test-idx"; + String testIndexUUID = "test-idx-uuid"; + ShardId shardId = new ShardId(new Index(indexName, testIndexUUID), 0); + + // Scenario 1: pending = empty, ongoing = false => executed + RemoteStoreShardCleanupTask remoteStoreShardCleanupTask = new RemoteStoreShardCleanupTask(task1, testIndexUUID, shardId); + remoteStoreShardCleanupTask.run(); + assertTrue(executed1.get()); + + // Scenario 2: pending = empty, ongoing = true => pending = currentTask + executed1.set(false); + String shardIdentifier = String.join("/", testIndexUUID, String.valueOf(shardId.id())); + RemoteStoreShardCleanupTask.ongoingRemoteDirectoryCleanups.add(shardIdentifier); + + remoteStoreShardCleanupTask = new RemoteStoreShardCleanupTask(task1, testIndexUUID, shardId); + remoteStoreShardCleanupTask.run(); + assertFalse(executed1.get()); + assertSame(RemoteStoreShardCleanupTask.pendingRemoteDirectoryCleanups.get(shardIdentifier), task1); + + // Scenario3: pending = anotherTask, ongoing = true => pending = currentTask + AtomicBoolean executed2 = new AtomicBoolean(false); + Runnable task2 = () -> executed2.set(true); + RemoteStoreShardCleanupTask.pendingRemoteDirectoryCleanups.put(shardIdentifier, task1); + RemoteStoreShardCleanupTask.ongoingRemoteDirectoryCleanups.add(shardIdentifier); + + remoteStoreShardCleanupTask = new RemoteStoreShardCleanupTask(task2, testIndexUUID, shardId); + remoteStoreShardCleanupTask.run(); + assertFalse(executed1.get()); + assertSame(RemoteStoreShardCleanupTask.pendingRemoteDirectoryCleanups.get(shardIdentifier), task2); + } } diff --git a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java index 3049aa161f10b..d9f599714805b 100644 --- a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java @@ -31,7 +31,6 @@ package org.opensearch.repositories.fs; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedDocValuesField; @@ -44,9 +43,10 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOSupplier; -import org.apache.lucene.tests.util.TestUtil; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; @@ -58,24 +58,26 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.DummyShardLock; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -90,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.is; public class FsRepositoryTests extends OpenSearchTestCase { @@ -218,6 +221,31 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { } } + public void testRestrictedSettingsDefault() { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .put("location", repo) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + RepositoryMetadata metadata = new RepositoryMetadata("test", "fs", settings); + FsRepository repository = new FsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + + List<Setting<?>> restrictedSettings = repository.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(4)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(FsRepository.LOCATION_SETTING)); + } + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); threadPool.generic().submit(() -> { diff --git a/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java new file mode 100644 index 0000000000000..db2cf9c3e9582 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.compress.ZstdCompressor; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.file.Path; +import java.util.Locale; + +public class ReloadableFsRepositoryTests extends OpenSearchTestCase { + ReloadableFsRepository repository; + RepositoryMetadata metadata; + Settings settings; + Path repo; + + @Override + public void setUp() throws Exception { + super.setUp(); + + repo = createTempDir(); + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + } + + /** + * Validates that {@link ReloadableFsRepository} supports inplace reloading + */ + public void testIsReloadable() { + assertTrue(repository.isReloadable()); + } + + /** + * Updates repository metadata of an existing repository to enable default compressor + */ + public void testCompressReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + } + + /** + * Updates repository metadata of an existing repository to change compressor type from default to Zstd + */ + public void testCompressionTypeReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", ZstdCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.getCompressor(ZstdCompressor.NAME.toUpperCase(Locale.ROOT)), repository.getCompressor()); + } + + private void updateCompressionTypeToDefault() { + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", DeflateCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + } +} diff --git a/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java index 1f5e0cda883c5..ce929e64d8960 100644 --- a/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/opensearch/rest/BaseRestHandlerTests.java @@ -34,26 +34,26 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.Table; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.rest.RestHandler.ReplacedRoute; +import org.opensearch.rest.RestHandler.Route; +import org.opensearch.rest.RestRequest.Method; import org.opensearch.rest.action.cat.AbstractCatAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.rest.RestRequest.Method; -import org.opensearch.rest.RestHandler.Route; -import org.opensearch.rest.RestHandler.ReplacedRoute; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; -import java.util.Arrays; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -299,7 +299,7 @@ public String getName() { try (XContentBuilder builder = JsonXContent.contentBuilder().startObject().endObject()) { final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(builder.toString()), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); @@ -344,7 +344,7 @@ public String getName() { try (XContentBuilder builder = JsonXContent.contentBuilder().startObject().endObject()) { final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray(builder.toString()), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); final IllegalArgumentException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 5bdc2cc0bd280..9252475327b9a 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -33,21 +33,21 @@ package org.opensearch.rest; import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchStatusException; import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchStatusException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.ResourceNotFoundException; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; diff --git a/server/src/test/java/org/opensearch/rest/DeprecationRestHandlerTests.java b/server/src/test/java/org/opensearch/rest/DeprecationRestHandlerTests.java index 9b0f826b67367..a7dba22621d88 100644 --- a/server/src/test/java/org/opensearch/rest/DeprecationRestHandlerTests.java +++ b/server/src/test/java/org/opensearch/rest/DeprecationRestHandlerTests.java @@ -36,8 +36,8 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.test.OpenSearchTestCase; - import org.junit.Before; + import org.mockito.InOrder; import static org.mockito.Mockito.inOrder; diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index f8ea59040d22b..b7239e7b59742 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -33,21 +33,23 @@ package org.opensearch.rest; import org.opensearch.client.node.NodeClient; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.yaml.YamlXContent; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpInfo; import org.opensearch.http.HttpRequest; import org.opensearch.http.HttpResponse; @@ -80,8 +82,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -136,6 +138,37 @@ public void teardown() throws IOException { IOUtils.close(client); } + public void testDefaultRestControllerGetAllHandlersContainsFavicon() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + assertFalse(handlers.hasNext()); + } + + public void testRestControllerGetAllHandlers() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + + restController.registerHandler(RestRequest.Method.PATCH, "/foo", mock(RestHandler.class)); + restController.registerHandler(RestRequest.Method.GET, "/foo", mock(RestHandler.class)); + + Iterator<MethodHandlers> handlers = restController.getAllHandlers(); + + assertTrue(handlers.hasNext()); + MethodHandlers rootHandler = handlers.next(); + assertEquals(rootHandler.getPath(), "/foo"); + assertEquals(rootHandler.getValidMethods(), Set.of(RestRequest.Method.GET, RestRequest.Method.PATCH)); + + assertTrue(handlers.hasNext()); + MethodHandlers faviconHandler = handlers.next(); + assertEquals(faviconHandler.getPath(), "/favicon.ico"); + assertEquals(faviconHandler.getValidMethods(), Set.of(RestRequest.Method.GET)); + + assertFalse(handlers.hasNext()); + } + public void testApplyRelevantHeaders() throws Exception { final ThreadContext threadContext = client.threadPool().getThreadContext(); Set<RestHeaderDefinition> headers = new HashSet<>( @@ -148,15 +181,15 @@ public void testApplyRelevantHeaders() throws Exception { restHeaders.put("header.3", Collections.singletonList("false")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestController spyRestController = spy(restController); - when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<MethodHandlers>() { + when(spyRestController.getAllRestMethodHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<RestMethodHandlers>() { @Override public boolean hasNext() { return false; } @Override - public MethodHandlers next() { - return new MethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { + public RestMethodHandlers next() { + return new RestMethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { assertEquals("true", threadContext.getHeader("header.1")); assertEquals("true", threadContext.getHeader("header.2")); assertNull(threadContext.getHeader("header.3")); @@ -290,7 +323,7 @@ public void testRestHandlerWrapper() throws Exception { return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); }, client, circuitBreakerService, usageService, identityService); restController.registerHandler(RestRequest.Method.GET, "/wrapped", handler); - RestRequest request = testRestRequest("/wrapped", "{}", XContentType.JSON); + RestRequest request = testRestRequest("/wrapped", "{}", MediaTypeRegistry.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); httpServerTransport.start(); @@ -301,7 +334,7 @@ public void testRestHandlerWrapper() throws Exception { public void testDispatchRequestAddsAndFreesBytesOnSuccess() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); - RestRequest request = testRestRequest("/", content, XContentType.JSON); + RestRequest request = testRestRequest("/", content, MediaTypeRegistry.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -313,7 +346,7 @@ public void testDispatchRequestAddsAndFreesBytesOnSuccess() { public void testDispatchRequestAddsAndFreesBytesOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); - RestRequest request = testRestRequest("/error", content, XContentType.JSON); + RestRequest request = testRestRequest("/error", content, MediaTypeRegistry.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -326,7 +359,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); // we will produce an error in the rest handler and one more when sending the error response - RestRequest request = testRestRequest("/error", content, XContentType.JSON); + RestRequest request = testRestRequest("/error", content, MediaTypeRegistry.JSON); ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -338,7 +371,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); - RestRequest request = testRestRequest("/", content, XContentType.JSON); + RestRequest request = testRestRequest("/", content, MediaTypeRegistry.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.TOO_MANY_REQUESTS); restController.dispatchRequest(request, channel, client.threadPool().getThreadContext()); @@ -576,7 +609,7 @@ public void testHandleBadRequestWithHtmlSpecialCharsInUri() { public void testHandleBadInputWithCreateIndex() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withPath("/foo") .withMethod(RestRequest.Method.PUT) - .withContent(new BytesArray("ddd"), XContentType.JSON) + .withContent(new BytesArray("ddd"), MediaTypeRegistry.JSON) .build(); final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); restController.registerHandler(RestRequest.Method.PUT, "/foo", new RestCreateIndexAction()); @@ -731,10 +764,10 @@ public void sendResponse(RestResponse response) { } } - private static RestRequest testRestRequest(String path, String content, XContentType xContentType) { + private static RestRequest testRestRequest(String path, String content, MediaType mediaType) { FakeRestRequest.Builder builder = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY); builder.withPath(path); - builder.withContent(new BytesArray(content), xContentType); + builder.withContent(new BytesArray(content), mediaType); return builder.build(); } } diff --git a/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java index b181d571c14fb..5d677247b8b6d 100644 --- a/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java @@ -33,13 +33,13 @@ package org.opensearch.rest; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.identity.IdentityService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; diff --git a/server/src/test/java/org/opensearch/rest/RestRequestTests.java b/server/src/test/java/org/opensearch/rest/RestRequestTests.java index 97350824dd1e4..ea603804bee89 100644 --- a/server/src/test/java/org/opensearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/opensearch/rest/RestRequestTests.java @@ -34,12 +34,12 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.MapBuilder; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpRequest; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/rest/action/RestActionsTests.java b/server/src/test/java/org/opensearch/rest/action/RestActionsTests.java index a49af7849dff8..d0a71deb2a5c4 100644 --- a/server/src/test/java/org/opensearch/rest/action/RestActionsTests.java +++ b/server/src/test/java/org/opensearch/rest/action/RestActionsTests.java @@ -33,22 +33,22 @@ package org.opensearch.rest.action; import com.fasterxml.jackson.core.io.JsonEOFException; + import org.opensearch.action.OriginalIndices; -import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; import org.opensearch.test.OpenSearchTestCase; @@ -138,7 +138,7 @@ public void testBuildBroadcastShardsHeader() throws IOException { RestActions.buildBroadcastShardsHeader(builder, ToXContent.EMPTY_PARAMS, 12, 3, 0, 9, failures); builder.endObject(); assertThat( - Strings.toString(builder), + builder.toString(), equalTo( "{\n" + " \"_shards\" : {\n" diff --git a/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java b/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java index d865607aa5451..cd91d7c2a97b3 100644 --- a/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java +++ b/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java @@ -33,15 +33,15 @@ package org.opensearch.rest.action; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import java.util.concurrent.atomic.AtomicReference; diff --git a/server/src/test/java/org/opensearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/opensearch/rest/action/RestCancellableNodeClientTests.java index 0a4282451538e..f7c311674002c 100644 --- a/server/src/test/java/org/opensearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/opensearch/rest/action/RestCancellableNodeClientTests.java @@ -32,9 +32,7 @@ package org.opensearch.rest.action; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; @@ -44,10 +42,12 @@ import org.opensearch.action.support.PlainListenableActionFuture; import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.tasks.TaskId; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpResponse; import org.opensearch.tasks.Task; -import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/rest/action/RestMainActionTests.java b/server/src/test/java/org/opensearch/rest/action/RestMainActionTests.java index e5e1001884e1e..e4413daa0dfb1 100644 --- a/server/src/test/java/org/opensearch/rest/action/RestMainActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/RestMainActionTests.java @@ -36,13 +36,13 @@ import org.opensearch.Version; import org.opensearch.action.main.MainResponse; import org.opensearch.cluster.ClusterName; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java index 750519ab006d4..80fb43a706e0e 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java @@ -38,6 +38,7 @@ import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; import org.junit.Before; + import java.util.HashMap; import java.util.Map; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterAddWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterAddWeightedRoutingActionTests.java index 4d61ccad10b45..d8a29c6dbf5af 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterAddWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterAddWeightedRoutingActionTests.java @@ -9,14 +9,15 @@ package org.opensearch.rest.action.admin.cluster; import com.fasterxml.jackson.core.JsonParseException; -import org.junit.Before; + import org.opensearch.OpenSearchParseException; import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; import java.io.IOException; import java.util.HashMap; @@ -70,7 +71,7 @@ private RestRequest buildRestRequest(String content) { return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.PUT) .withPath("/_cluster/routing/awareness/zone/weights") .withParams(singletonMap("attribute", "zone")) - .withContent(new BytesArray(content), XContentType.JSON) + .withContent(new BytesArray(content), MediaTypeRegistry.JSON) .build(); } diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingActionTests.java index b11103a9cab11..ed9ed4207bec6 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterDeleteWeightedRoutingActionTests.java @@ -8,14 +8,14 @@ package org.opensearch.rest.action.admin.cluster; -import org.junit.Before; import org.opensearch.OpenSearchParseException; import org.opensearch.action.admin.cluster.shards.routing.weighted.delete.ClusterDeleteWeightedRoutingRequest; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; import java.io.IOException; @@ -57,14 +57,14 @@ private RestRequest buildRestRequestWithAwarenessAttribute(String content) { return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.DELETE) .withPath("/_cluster/routing/awareness/zone/weights") .withParams(singletonMap("attribute", "zone")) - .withContent(new BytesArray(content), XContentType.JSON) + .withContent(new BytesArray(content), MediaTypeRegistry.JSON) .build(); } private RestRequest buildRestRequest(String content) { return new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.DELETE) .withPath("/_cluster/routing/awareness/weights") - .withContent(new BytesArray(content), XContentType.JSON) + .withContent(new BytesArray(content), MediaTypeRegistry.JSON) .build(); } diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java index b5f61f751b19f..b6adf363ae952 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDecommissionActionTests.java @@ -8,11 +8,11 @@ package org.opensearch.rest.action.admin.cluster; -import org.junit.Before; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.rest.RestRequest; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; import java.io.IOException; import java.util.HashMap; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java index 01f988efdf6eb..6fbf094e6a6be 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java @@ -8,11 +8,11 @@ package org.opensearch.rest.action.admin.cluster; -import org.junit.Before; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.test.rest.RestActionTestCase; +import org.junit.Before; import java.util.List; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java index d8f460dccbf4e..e976886458f05 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestReloadSecureSettingsActionTests.java @@ -34,9 +34,9 @@ import org.opensearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.nullValue; @@ -46,7 +46,7 @@ public class RestReloadSecureSettingsActionTests extends OpenSearchTestCase { public void testParserWithPassword() throws Exception { final String request = "{" + "\"secure_settings_password\": \"secure_settings_password_string\"" + "}"; try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request) ) { NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); @@ -57,7 +57,7 @@ public void testParserWithPassword() throws Exception { public void testParserWithoutPassword() throws Exception { final String request = "{" + "}"; try ( - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request) ) { NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = RestReloadSecureSettingsAction.PARSER.parse(parser, null); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestAnalyzeActionTests.java index 1c6c4eca6ca0d..9e3af600706b4 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -33,10 +33,10 @@ import org.opensearch.action.admin.indices.analyze.AnalyzeAction; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.analysis.NameOrDefinition; import org.opensearch.rest.RestRequest; import org.opensearch.test.OpenSearchTestCase; @@ -118,7 +118,7 @@ public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() { RestAnalyzeAction action = new RestAnalyzeAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{invalid_json}"), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); try (NodeClient client = new NoOpNodeClient(this.getClass().getSimpleName())) { IOException e = expectThrows(IOException.class, () -> action.handleRequest(request, null, client)); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestCreateIndexActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestCreateIndexActionTests.java index f6db8a0f24d4b..ebf921b17a536 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestCreateIndexActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestCreateIndexActionTests.java @@ -32,10 +32,10 @@ package org.opensearch.rest.action.admin.indices; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetAliasesActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetAliasesActionTests.java index 9739419406851..d326515edf5fb 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetAliasesActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestGetAliasesActionTests.java @@ -33,9 +33,8 @@ package org.opensearch.rest.action.admin.indices; import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.RestResponse; import org.opensearch.test.OpenSearchTestCase; @@ -44,8 +43,8 @@ import java.util.List; import java.util.Map; -import static org.opensearch.core.rest.RestStatus.OK; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; +import static org.opensearch.core.rest.RestStatus.OK; import static org.hamcrest.Matchers.equalTo; public class RestGetAliasesActionTests extends OpenSearchTestCase { @@ -60,7 +59,7 @@ public class RestGetAliasesActionTests extends OpenSearchTestCase { // }' public void testBareRequest() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final AliasMetadata foobarAliasMetadata = AliasMetadata.builder("foobar").build(); final AliasMetadata fooAliasMetadata = AliasMetadata.builder("foo").build(); @@ -72,7 +71,7 @@ public void testBareRequest() throws Exception { } public void testSimpleAliasWildcardMatchingNothing() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final RestResponse restResponse = RestGetAliasesAction.buildRestResponse( true, @@ -86,7 +85,7 @@ public void testSimpleAliasWildcardMatchingNothing() throws Exception { } public void testMultipleAliasWildcardsSomeMatching() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final AliasMetadata aliasMetadata = AliasMetadata.builder("foobar").build(); openMapBuilder.put("index", Arrays.asList(aliasMetadata)); @@ -102,7 +101,7 @@ public void testMultipleAliasWildcardsSomeMatching() throws Exception { } public void testAliasWildcardsIncludeAndExcludeAll() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final RestResponse restResponse = RestGetAliasesAction.buildRestResponse( true, @@ -116,7 +115,7 @@ public void testAliasWildcardsIncludeAndExcludeAll() throws Exception { } public void testAliasWildcardsIncludeAndExcludeSome() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final AliasMetadata aliasMetadata = AliasMetadata.builder("foo").build(); openMapBuilder.put("index", Arrays.asList(aliasMetadata)); @@ -132,7 +131,7 @@ public void testAliasWildcardsIncludeAndExcludeSome() throws Exception { } public void testAliasWildcardsIncludeAndExcludeSomeAndExplicitMissing() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final AliasMetadata aliasMetadata = AliasMetadata.builder("foo").build(); openMapBuilder.put("index", Arrays.asList(aliasMetadata)); @@ -153,7 +152,7 @@ public void testAliasWildcardsIncludeAndExcludeSomeAndExplicitMissing() throws E } public void testAliasWildcardsExcludeExplicitMissing() throws Exception { - final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); final Map<String, List<AliasMetadata>> openMapBuilder = new HashMap<>(); final RestResponse restResponse = RestGetAliasesAction.buildRestResponse( true, diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 094b4d8b8c12d..3fb6764846da6 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -31,21 +31,20 @@ package org.opensearch.rest.action.admin.indices; -import java.util.List; -import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionType; -import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.admin.indices.validate.query.ValidateQueryAction; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.TransportAction; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.identity.IdentityService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestRequest; import org.opensearch.search.AbstractSearchTestCase; @@ -62,6 +61,7 @@ import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -182,7 +182,7 @@ public void testRestValidateQueryAction_malformedQuery() throws Exception { private RestRequest createRestRequest(String content) { return new FakeRestRequest.Builder(xContentRegistry()).withPath("index1/type1/_validate/query") .withParams(emptyMap()) - .withContent(new BytesArray(content), XContentType.JSON) + .withContent(new BytesArray(content), MediaTypeRegistry.JSON) .build(); } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java index 44d8f84eef524..41ad9e8bcbb44 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java @@ -9,18 +9,18 @@ package org.opensearch.rest.action.cat; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.Randomness; import org.opensearch.common.Table; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; @@ -84,6 +84,7 @@ public void testSegmentReplicationAction() throws IOException { 0L, 0L, 0L, + 0L, 0L ); segmentReplicationShardStats.setCurrentReplicationState(state); @@ -141,7 +142,7 @@ public void testSegmentReplicationAction() throws IOException { currentReplicationState.getTargetNode().getHostName(), shardStats.getCheckpointsBehindCount(), new ByteSizeValue(shardStats.getBytesBehindCount()), - new TimeValue(shardStats.getCurrentReplicationTimeMillis()), + new TimeValue(shardStats.getCurrentReplicationLagMillis()), new TimeValue(shardStats.getLastCompletedReplicationTimeMillis()), rejectedRequestCount ); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java index fd74a9fe9e6cd..96b1c75371697 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java @@ -45,8 +45,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; -import org.opensearch.index.IndexSettings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java index 2f23209876ed0..20fcac1089bc3 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestRecoveryActionTests.java @@ -33,7 +33,6 @@ package org.opensearch.rest.action.cat; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -42,6 +41,7 @@ import org.opensearch.common.Table; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentOpenSearchExtension; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.RecoveryState; @@ -49,6 +49,7 @@ import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.test.OpenSearchTestCase; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -164,9 +165,9 @@ public void testRestRecoveryAction() { final List<Object> expectedValues = Arrays.asList( "index", i, - XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime()), + XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().startTime())), state.getTimer().startTime(), - XContentOpenSearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime()), + XContentOpenSearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().stopTime())), state.getTimer().stopTime(), new TimeValue(state.getTimer().time()), state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT), diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index a8679a087216d..fa13ec2036797 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.Table; +import org.opensearch.index.shard.DocsStats; import org.opensearch.index.shard.ShardPath; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; @@ -65,6 +66,8 @@ public class RestShardsActionTests extends OpenSearchTestCase { public void testBuildTable() { final int numShards = randomIntBetween(1, 5); + long numDocs = randomLongBetween(0, 10000); + long numDeletedDocs = randomLongBetween(0, 100); DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); List<ShardRouting> shardRoutings = new ArrayList<>(numShards); @@ -76,10 +79,12 @@ public void testBuildTable() { Path path = createTempDir().resolve("indices") .resolve(shardRouting.shardId().getIndex().getUUID()) .resolve(String.valueOf(shardRouting.shardId().id())); + CommonStats commonStats = new CommonStats(); + commonStats.docs = new DocsStats(numDocs, numDeletedDocs, 0); ShardStats shardStats = new ShardStats( shardRouting, new ShardPath(false, path, path, shardRouting.shardId()), - null, + commonStats, null, null, null @@ -120,6 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); + assertThat(headers.get(78).value, equalTo("docs.deleted")); final List<List<Table.Cell>> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -132,10 +138,12 @@ public void testBuildTable() { assertThat(row.get(1).value, equalTo(shardRouting.getId())); assertThat(row.get(2).value, equalTo(shardRouting.primary() ? "p" : "r")); assertThat(row.get(3).value, equalTo(shardRouting.state())); + assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java index 89fbcf6a3506d..8183cb1d3b910 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java @@ -34,6 +34,7 @@ import org.opensearch.common.Table; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.AbstractRestChannel; import org.opensearch.rest.RestResponse; import org.opensearch.test.OpenSearchTestCase; @@ -55,7 +56,7 @@ public class RestTableTests extends OpenSearchTestCase { - private static final String APPLICATION_JSON = XContentType.JSON.mediaType(); + private static final String APPLICATION_JSON = MediaTypeRegistry.JSON.mediaType(); private static final String APPLICATION_YAML = XContentType.YAML.mediaType(); private static final String APPLICATION_SMILE = XContentType.SMILE.mediaType(); private static final String APPLICATION_CBOR = XContentType.CBOR.mediaType(); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java index 9657264aa7fe5..aaa5c5534d5c0 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestTasksActionTests.java @@ -32,13 +32,13 @@ package org.opensearch.rest.action.cat; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.collect.MapBuilder; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestBulkActionTests.java index f795d340778cf..55d7103724072 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestBulkActionTests.java @@ -33,14 +33,14 @@ package org.opensearch.rest.action.document; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.SetOnce; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.test.OpenSearchTestCase; @@ -82,7 +82,7 @@ public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) { + "{\"update\":{\"_id\":\"2\"}}\n" + "{\"script\":{\"source\":\"ctx._source.counter++;\"},\"upsert\":{\"field1\":\"upserted_val\"}}\n" ), - XContentType.JSON + MediaTypeRegistry.JSON ) .withMethod(RestRequest.Method.POST) .build(), diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java index 2fc0391af593c..8d797322dc6ab 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestGetSourceActionTests.java @@ -47,8 +47,8 @@ import org.junit.Before; import static java.util.Collections.emptyMap; -import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.core.rest.RestStatus.OK; +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; public class RestGetSourceActionTests extends RestActionTestCase { diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java index 4131e8d9a55c6..4bb11965a46e9 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestIndexActionTests.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.SetOnce; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.document.RestIndexAction.AutoIdHandler; import org.opensearch.rest.action.document.RestIndexAction.CreateHandler; @@ -104,7 +104,7 @@ private void checkAutoIdOpType(Version minClusterVersion, DocWriteRequest.OpType }); RestRequest autoIdRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/some_index/_doc") - .withContent(new BytesArray("{}"), XContentType.JSON) + .withContent(new BytesArray("{}"), MediaTypeRegistry.JSON) .build(); clusterStateSupplier.set( ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java b/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java index f66f07a22e660..67f45467d0052 100644 --- a/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/document/RestUpdateActionTests.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.client.node.NodeClient; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.VersionType; import org.opensearch.rest.RestRequest; import org.opensearch.test.rest.FakeRestRequest; @@ -72,7 +72,7 @@ public void testUpdateDocVersion() { FakeRestRequest updateRequest = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("test/_update/1") .withParams(params) - .withContent(new BytesArray(content), XContentType.JSON) + .withContent(new BytesArray(content), MediaTypeRegistry.JSON) .build(); ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, diff --git a/server/src/test/java/org/opensearch/script/JodaCompatibleZonedDateTimeTests.java b/server/src/test/java/org/opensearch/script/JodaCompatibleZonedDateTimeTests.java deleted file mode 100644 index a3156897540b2..0000000000000 --- a/server/src/test/java/org/opensearch/script/JodaCompatibleZonedDateTimeTests.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.script; - -import org.opensearch.common.time.DateFormatters; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.junit.Before; - -import java.time.DayOfWeek; -import java.time.Instant; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.Month; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoField; -import java.util.Locale; - -import static org.hamcrest.Matchers.equalTo; - -public class JodaCompatibleZonedDateTimeTests extends OpenSearchTestCase { - private JodaCompatibleZonedDateTime javaTime; - private DateTime jodaTime; - - @Before - public void setupTime() { - long millis = randomIntBetween(0, Integer.MAX_VALUE); - javaTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); - jodaTime = new DateTime(millis, DateTimeZone.forOffsetHours(-7)); - } - - public void testEquals() { - assertThat(javaTime, equalTo(javaTime)); - } - - public void testToString() { - assertThat(javaTime.toString(), equalTo(jodaTime.toString())); - } - - public void testDayOfMonth() { - assertThat(javaTime.getDayOfMonth(), equalTo(jodaTime.getDayOfMonth())); - } - - public void testDayOfYear() { - assertThat(javaTime.getDayOfYear(), equalTo(jodaTime.getDayOfYear())); - } - - public void testHour() { - assertThat(javaTime.getHour(), equalTo(jodaTime.getHourOfDay())); - } - - public void testLocalDate() { - assertThat(javaTime.toLocalDate(), equalTo(LocalDate.of(jodaTime.getYear(), jodaTime.getMonthOfYear(), jodaTime.getDayOfMonth()))); - } - - public void testLocalDateTime() { - LocalDateTime dt = LocalDateTime.of( - jodaTime.getYear(), - jodaTime.getMonthOfYear(), - jodaTime.getDayOfMonth(), - jodaTime.getHourOfDay(), - jodaTime.getMinuteOfHour(), - jodaTime.getSecondOfMinute(), - jodaTime.getMillisOfSecond() * 1000000 - ); - assertThat(javaTime.toLocalDateTime(), equalTo(dt)); - } - - public void testMinute() { - assertThat(javaTime.getMinute(), equalTo(jodaTime.getMinuteOfHour())); - } - - public void testMonth() { - assertThat(javaTime.getMonth(), equalTo(Month.of(jodaTime.getMonthOfYear()))); - } - - public void testMonthValue() { - assertThat(javaTime.getMonthValue(), equalTo(jodaTime.getMonthOfYear())); - } - - public void testNano() { - assertThat(javaTime.getNano(), equalTo(jodaTime.getMillisOfSecond() * 1000000)); - } - - public void testSecond() { - assertThat(javaTime.getSecond(), equalTo(jodaTime.getSecondOfMinute())); - } - - public void testYear() { - assertThat(javaTime.getYear(), equalTo(jodaTime.getYear())); - } - - public void testZone() { - assertThat(javaTime.getZone().getId(), equalTo(jodaTime.getZone().getID())); - } - - public void testMillis() { - assertThat(javaTime.toInstant().toEpochMilli(), equalTo(jodaTime.getMillis())); - } - - public void testCenturyOfEra() { - assertThat(javaTime.get(ChronoField.YEAR_OF_ERA) / 100, equalTo(jodaTime.getCenturyOfEra())); - } - - public void testEra() { - assertThat(javaTime.get(ChronoField.ERA), equalTo(jodaTime.getEra())); - } - - public void testHourOfDay() { - assertThat(javaTime.getHour(), equalTo(jodaTime.getHourOfDay())); - } - - public void testMillisOfDay() { - assertThat(javaTime.get(ChronoField.MILLI_OF_DAY), equalTo(jodaTime.getMillisOfDay())); - } - - public void testMillisOfSecond() { - assertThat(javaTime.get(ChronoField.MILLI_OF_SECOND), equalTo(jodaTime.getMillisOfSecond())); - } - - public void testMinuteOfDay() { - assertThat(javaTime.get(ChronoField.MINUTE_OF_DAY), equalTo(jodaTime.getMinuteOfDay())); - } - - public void testMinuteOfHour() { - assertThat(javaTime.getMinute(), equalTo(jodaTime.getMinuteOfHour())); - } - - public void testMonthOfYear() { - assertThat(javaTime.getMonthValue(), equalTo(jodaTime.getMonthOfYear())); - } - - public void testSecondOfDay() { - assertThat(javaTime.get(ChronoField.SECOND_OF_DAY), equalTo(jodaTime.getSecondOfDay())); - } - - public void testSecondOfMinute() { - assertThat(javaTime.getSecond(), equalTo(jodaTime.getSecondOfMinute())); - } - - public void testWeekOfWeekyear() { - assertThat(javaTime.get(DateFormatters.WEEK_FIELDS_ROOT.weekOfWeekBasedYear()), equalTo(jodaTime.getWeekOfWeekyear())); - } - - public void testWeekyear() { - assertThat(javaTime.get(DateFormatters.WEEK_FIELDS_ROOT.weekBasedYear()), equalTo(jodaTime.getWeekyear())); - } - - public void testYearOfCentury() { - assertThat(javaTime.get(ChronoField.YEAR_OF_ERA) % 100, equalTo(jodaTime.getYearOfCentury())); - } - - public void testYearOfEra() { - assertThat(javaTime.get(ChronoField.YEAR_OF_ERA), equalTo(jodaTime.getYearOfEra())); - } - - public void testToString2() { - assertThat(DateTimeFormatter.ofPattern("EEE", Locale.GERMANY).format(javaTime), equalTo(jodaTime.toString("EEE", Locale.GERMANY))); - } - - public void testDayOfWeek() { - assertThat(javaTime.getDayOfWeekEnum().getValue(), equalTo(jodaTime.getDayOfWeek())); - } - - public void testDayOfWeekEnum() { - assertThat(javaTime.getDayOfWeekEnum(), equalTo(DayOfWeek.of(jodaTime.getDayOfWeek()))); - } - - public void testIsEqual() { - assertTrue(javaTime.isEqual(javaTime)); - } - - public void testIsAfter() { - long millis = randomLongBetween(0, Integer.MAX_VALUE / 2); - JodaCompatibleZonedDateTime beforeTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); - millis = randomLongBetween(millis + 1, Integer.MAX_VALUE); - JodaCompatibleZonedDateTime afterTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); - assertTrue(afterTime.isAfter(beforeTime)); - } - - public void testIsBefore() { - long millis = randomLongBetween(0, Integer.MAX_VALUE / 2); - JodaCompatibleZonedDateTime beforeTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); - millis = randomLongBetween(millis + 1, Integer.MAX_VALUE); - JodaCompatibleZonedDateTime afterTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); - assertTrue(beforeTime.isBefore(afterTime)); - } -} diff --git a/server/src/test/java/org/opensearch/script/ScriptCacheTests.java b/server/src/test/java/org/opensearch/script/ScriptCacheTests.java index 6395b8b315196..99eedb39e7381 100644 --- a/server/src/test/java/org/opensearch/script/ScriptCacheTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptCacheTests.java @@ -31,9 +31,9 @@ package org.opensearch.script; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.rest.RestStatus; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/script/ScriptContextInfoTests.java b/server/src/test/java/org/opensearch/script/ScriptContextInfoTests.java index e10e199c4415c..13e353126f0c3 100644 --- a/server/src/test/java/org/opensearch/script/ScriptContextInfoTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptContextInfoTests.java @@ -32,14 +32,14 @@ package org.opensearch.script; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.script.ScriptContextInfo.ScriptMethodInfo; import org.opensearch.script.ScriptContextInfo.ScriptMethodInfo.ParameterInfo; import org.opensearch.test.OpenSearchTestCase; @@ -317,7 +317,7 @@ public void testGetterConditional() { public void testParameterInfoParser() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -330,7 +330,7 @@ public void testParameterInfoParser() throws IOException { public void testScriptMethodInfoParser() throws IOException { String json = "{\"name\": \"fooFunc\", \"return_type\": \"int\", \"params\": [{\"type\": \"int\", \"name\": \"fooParam\"}, " + "{\"type\": \"java.util.Map\", \"name\": \"barParam\"}]}"; - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new BytesArray(json).streamInput()); ScriptContextInfo.ScriptMethodInfo info = ScriptContextInfo.ScriptMethodInfo.fromXContent(parser); assertEquals( @@ -395,7 +395,7 @@ public void testScriptContextInfoParser() throws IOException { + " }" + " ]" + "}"; - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new BytesArray(json).streamInput()); ScriptContextInfo parsed = ScriptContextInfo.fromXContent(parser); ScriptContextInfo expected = new ScriptContextInfo( diff --git a/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java b/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java index 83e7a3712a9ad..daf3b7ae1feaa 100644 --- a/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java @@ -32,16 +32,17 @@ package org.opensearch.script; import org.opensearch.cluster.DiffableUtils; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; @@ -54,7 +55,7 @@ public void testFromXContentLoading() throws Exception { // failure to load to old namespace scripts with the same id but different langs XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject().field("lang0#id0", "script0").field("lang1#id0", "script1").endObject(); - XContentParser parser0 = XContentType.JSON.xContent() + XContentParser parser0 = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -71,7 +72,7 @@ public void testFromXContentLoading() throws Exception { .field("source", "script1") .endObject() .endObject(); - XContentParser parser1 = XContentType.JSON.xContent() + XContentParser parser1 = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -93,7 +94,7 @@ public void testFromXContentLoading() throws Exception { .field("source", "script1") .endObject() .endObject(); - XContentParser parser2 = XContentType.JSON.xContent() + XContentParser parser2 = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -110,7 +111,7 @@ public void testFromXContentLoading() throws Exception { .field("source", "script1") .endObject() .endObject(); - XContentParser parser3 = XContentType.JSON.xContent() + XContentParser parser3 = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -148,15 +149,24 @@ public void testDiff() throws Exception { ScriptMetadata.Builder builder = new ScriptMetadata.Builder(null); builder.storeScript( "1", - StoredScriptSource.parse(new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"abc\"}}}"), XContentType.JSON) + StoredScriptSource.parse( + new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"abc\"}}}"), + MediaTypeRegistry.JSON + ) ); builder.storeScript( "2", - StoredScriptSource.parse(new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"def\"}}}"), XContentType.JSON) + StoredScriptSource.parse( + new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"def\"}}}"), + MediaTypeRegistry.JSON + ) ); builder.storeScript( "3", - StoredScriptSource.parse(new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"ghi\"}}}"), XContentType.JSON) + StoredScriptSource.parse( + new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"ghi\"}}}"), + MediaTypeRegistry.JSON + ) ); ScriptMetadata scriptMetadata1 = builder.build(); @@ -165,13 +175,16 @@ public void testDiff() throws Exception { "2", StoredScriptSource.parse( new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"changed\"}}}"), - XContentType.JSON + MediaTypeRegistry.JSON ) ); builder.deleteScript("3"); builder.storeScript( "4", - StoredScriptSource.parse(new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"jkl\"}}}"), XContentType.JSON) + StoredScriptSource.parse( + new BytesArray("{\"script\":{\"lang\":\"mustache\",\"source\":{\"foo\":\"jkl\"}}}"), + MediaTypeRegistry.JSON + ) ); ScriptMetadata scriptMetadata2 = builder.build(); @@ -193,7 +206,10 @@ public void testBuilder() { ScriptMetadata.Builder builder = new ScriptMetadata.Builder(null); builder.storeScript( "_id", - StoredScriptSource.parse(new BytesArray("{\"script\": {\"lang\": \"painless\", \"source\": \"1 + 1\"} }"), XContentType.JSON) + StoredScriptSource.parse( + new BytesArray("{\"script\": {\"lang\": \"painless\", \"source\": \"1 + 1\"} }"), + MediaTypeRegistry.JSON + ) ); ScriptMetadata result = builder.build(); @@ -203,7 +219,7 @@ public void testBuilder() { public void testLoadEmptyScripts() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject().field("mustache#empty", "").endObject(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -214,7 +230,7 @@ public void testLoadEmptyScripts() throws IOException { builder = XContentFactory.jsonBuilder(); builder.startObject().field("lang#empty", "").endObject(); - parser = XContentType.JSON.xContent() + parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -225,7 +241,7 @@ public void testLoadEmptyScripts() throws IOException { builder = XContentFactory.jsonBuilder(); builder.startObject().startObject("script").field("lang", "lang").field("source", "").endObject().endObject(); - parser = XContentType.JSON.xContent() + parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -236,7 +252,7 @@ public void testLoadEmptyScripts() throws IOException { builder = XContentFactory.jsonBuilder(); builder.startObject().startObject("script").field("lang", "mustache").field("source", "").endObject().endObject(); - parser = XContentType.JSON.xContent() + parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -247,7 +263,7 @@ public void testLoadEmptyScripts() throws IOException { } public void testOldStyleDropped() throws IOException { - XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); builder.startObject(); { @@ -272,7 +288,7 @@ public void testOldStyleDropped() throws IOException { } builder.endObject(); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, diff --git a/server/src/test/java/org/opensearch/script/ScriptServiceTests.java b/server/src/test/java/org/opensearch/script/ScriptServiceTests.java index 489b267f586e4..3007c37027a9c 100644 --- a/server/src/test/java/org/opensearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptServiceTests.java @@ -36,15 +36,15 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -60,8 +60,8 @@ import static org.opensearch.script.ScriptService.SCRIPT_CACHE_EXPIRE_SETTING; import static org.opensearch.script.ScriptService.SCRIPT_CACHE_SIZE_SETTING; import static org.opensearch.script.ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING; -import static org.opensearch.script.ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING; import static org.opensearch.script.ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING; +import static org.opensearch.script.ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING; import static org.opensearch.script.ScriptService.SCRIPT_MAX_COMPILATIONS_RATE_SETTING; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.is; @@ -375,7 +375,11 @@ public void testStoreScript() throws Exception { .endObject() .endObject() ); - ScriptMetadata scriptMetadata = ScriptMetadata.putStoredScript(null, "_id", StoredScriptSource.parse(script, XContentType.JSON)); + ScriptMetadata scriptMetadata = ScriptMetadata.putStoredScript( + null, + "_id", + StoredScriptSource.parse(script, MediaTypeRegistry.JSON) + ); assertNotNull(scriptMetadata); assertEquals("abc", scriptMetadata.getStoredScript("_id").getSource()); } @@ -384,7 +388,7 @@ public void testDeleteScript() throws Exception { ScriptMetadata scriptMetadata = ScriptMetadata.putStoredScript( null, "_id", - StoredScriptSource.parse(new BytesArray("{\"script\": {\"lang\": \"_lang\", \"source\": \"abc\"} }"), XContentType.JSON) + StoredScriptSource.parse(new BytesArray("{\"script\": {\"lang\": \"_lang\", \"source\": \"abc\"} }"), MediaTypeRegistry.JSON) ); scriptMetadata = ScriptMetadata.deleteStoredScript(scriptMetadata, "_id"); assertNotNull(scriptMetadata); @@ -408,7 +412,7 @@ public void testGetStoredScript() throws Exception { "_id", StoredScriptSource.parse( new BytesArray("{\"script\": {\"lang\": \"_lang\", \"source\": \"abc\"} }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ).build() ) diff --git a/server/src/test/java/org/opensearch/script/ScriptTests.java b/server/src/test/java/org/opensearch/script/ScriptTests.java index 47b4557c3e5e1..0b871ee4847cc 100644 --- a/server/src/test/java/org/opensearch/script/ScriptTests.java +++ b/server/src/test/java/org/opensearch/script/ScriptTests.java @@ -33,16 +33,17 @@ package org.opensearch.script; import org.opensearch.OpenSearchParseException; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -58,7 +59,7 @@ public class ScriptTests extends OpenSearchTestCase { public void testScriptParsing() throws IOException { Script expectedScript = createScript(); - try (XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()))) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values()))) { expectedScript.toXContent(builder, ToXContent.EMPTY_PARAMS); try (XContentParser parser = createParser(builder)) { Script actualScript = Script.parse(parser); @@ -87,7 +88,7 @@ private Script createScript() throws IOException { builder.startObject(); builder.field("field", randomAlphaOfLengthBetween(1, 5)); builder.endObject(); - script = Strings.toString(builder); + script = builder.toString(); } } else { script = randomAlphaOfLengthBetween(1, 5); @@ -96,14 +97,16 @@ private Script createScript() throws IOException { scriptType, scriptType == ScriptType.STORED ? null : randomFrom("_lang1", "_lang2", "_lang3"), script, - scriptType == ScriptType.INLINE ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()) : null, + scriptType == ScriptType.INLINE + ? Collections.singletonMap(Script.CONTENT_TYPE_OPTION, MediaTypeRegistry.JSON.mediaType()) + : null, params ); } public void testParse() throws IOException { Script expectedScript = createScript(); - try (XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()))) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values()))) { expectedScript.toXContent(builder, ToXContent.EMPTY_PARAMS); try (XContentParser xParser = createParser(builder)) { Settings settings = Settings.fromXContent(xParser); @@ -167,8 +170,8 @@ public void testParseFromObjectFromScript() { } Script script = new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "doc['field']", options, params); Map<String, Object> scriptObject = XContentHelper.convertToMap( - XContentType.JSON.xContent(), - Strings.toString(XContentType.JSON, script), + MediaTypeRegistry.JSON.xContent(), + Strings.toString(MediaTypeRegistry.JSON, script), false ); Script parsedScript = Script.parse(scriptObject); diff --git a/server/src/test/java/org/opensearch/script/StoredScriptSourceTests.java b/server/src/test/java/org/opensearch/script/StoredScriptSourceTests.java index 548dccf5387c2..24c4fddc7dce8 100644 --- a/server/src/test/java/org/opensearch/script/StoredScriptSourceTests.java +++ b/server/src/test/java/org/opensearch/script/StoredScriptSourceTests.java @@ -32,12 +32,13 @@ package org.opensearch.script; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable.Reader; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; @@ -48,9 +49,9 @@ public class StoredScriptSourceTests extends AbstractSerializingTestCase<StoredS @Override protected StoredScriptSource createTestInstance() { - XContentType xContentType = randomFrom(XContentType.JSON, XContentType.YAML); + MediaType mediaType = randomFrom(MediaTypeRegistry.JSON, XContentType.YAML); try { - XContentBuilder template = XContentBuilder.builder(xContentType.xContent()); + XContentBuilder template = XContentBuilder.builder(mediaType.xContent()); template.startObject(); template.startObject("script"); { @@ -64,9 +65,9 @@ protected StoredScriptSource createTestInstance() { template.endObject(); Map<String, String> options = new HashMap<>(); if (randomBoolean()) { - options.put(Script.CONTENT_TYPE_OPTION, xContentType.mediaType()); + options.put(Script.CONTENT_TYPE_OPTION, mediaType.mediaType()); } - return StoredScriptSource.parse(BytesReference.bytes(template), xContentType); + return StoredScriptSource.parse(BytesReference.bytes(template), mediaType); } catch (IOException e) { throw new AssertionError("Failed to create test instance", e); } @@ -88,7 +89,7 @@ protected StoredScriptSource mutateInstance(StoredScriptSource instance) throws String lang = instance.getLang(); Map<String, String> options = instance.getOptions(); - XContentType newXContentType = randomFrom(XContentType.JSON, XContentType.YAML); + MediaType newXContentType = randomFrom(MediaTypeRegistry.JSON, XContentType.YAML); XContentBuilder newTemplate = XContentBuilder.builder(newXContentType.xContent()); newTemplate.startObject(); newTemplate.startObject("query"); @@ -100,7 +101,7 @@ protected StoredScriptSource mutateInstance(StoredScriptSource instance) throws switch (between(0, 2)) { case 0: - source = Strings.toString(newTemplate); + source = newTemplate.toString(); break; case 1: lang = randomAlphaOfLengthBetween(1, 20); diff --git a/server/src/test/java/org/opensearch/script/StoredScriptTests.java b/server/src/test/java/org/opensearch/script/StoredScriptTests.java index c16c4f93e6582..36d9f4dc6e601 100644 --- a/server/src/test/java/org/opensearch/script/StoredScriptTests.java +++ b/server/src/test/java/org/opensearch/script/StoredScriptTests.java @@ -34,13 +34,11 @@ import org.opensearch.ResourceNotFoundException; import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; @@ -73,17 +71,17 @@ public void testInvalidDelete() { public void testSourceParsing() throws Exception { // simple script value string - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().startObject("script").field("lang", "lang").field("source", "code").endObject().endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap()); assertThat(parsed, equalTo(source)); } // complex template using script as the field name - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject() .startObject("script") .field("lang", "mustache") @@ -94,11 +92,11 @@ public void testSourceParsing() throws Exception { .endObject(); String code; - try (XContentBuilder cb = XContentFactory.contentBuilder(builder.contentType())) { - code = Strings.toString(cb.startObject().field("query", "code").endObject()); + try (XContentBuilder cb = MediaTypeRegistry.contentBuilder(builder.contentType())) { + code = cb.startObject().field("query", "code").endObject().toString(); } - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource( "mustache", code, @@ -109,20 +107,20 @@ public void testSourceParsing() throws Exception { } // complex script with script object - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("script").startObject().field("lang", "lang").field("source", "code").endObject().endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap()); assertThat(parsed, equalTo(source)); } // complex script using "code" backcompat - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("script").startObject().field("lang", "lang").field("code", "code").endObject().endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap()); assertThat(parsed, equalTo(source)); @@ -130,7 +128,7 @@ public void testSourceParsing() throws Exception { assertWarnings("Deprecated field [code] used, expected [source] instead"); // complex script with script object and empty options - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject() .field("script") .startObject() @@ -142,34 +140,33 @@ public void testSourceParsing() throws Exception { .endObject() .endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap()); assertThat(parsed, equalTo(source)); } // complex script with embedded template - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { - Strings.toString( - builder.startObject() - .field("script") - .startObject() - .field("lang", "lang") - .startObject("source") - .field("query", "code") - .endObject() - .startObject("options") - .endObject() - .endObject() - .endObject() - ); + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { + builder.startObject() + .field("script") + .startObject() + .field("lang", "lang") + .startObject("source") + .field("query", "code") + .endObject() + .startObject("options") + .endObject() + .endObject() + .endObject() + .toString(); String code; - try (XContentBuilder cb = XContentFactory.contentBuilder(builder.contentType())) { - code = Strings.toString(cb.startObject().field("query", "code").endObject()); + try (XContentBuilder cb = MediaTypeRegistry.contentBuilder(builder.contentType())) { + code = cb.startObject().field("query", "code").endObject().toString(); } - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource( "lang", code, @@ -182,29 +179,29 @@ public void testSourceParsing() throws Exception { public void testSourceParsingErrors() throws Exception { // check for missing lang parameter when parsing a script - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("script").startObject().field("source", "code").endObject().endObject(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON) + () -> StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON) ); assertThat(iae.getMessage(), equalTo("must specify lang for stored script")); } // check for missing source parameter when parsing a script - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("script").startObject().field("lang", "lang").endObject().endObject(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON) + () -> StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON) ); assertThat(iae.getMessage(), equalTo("must specify source for stored script")); } // check for illegal options parameter when parsing a script - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject() .field("script") .startObject() @@ -218,17 +215,17 @@ public void testSourceParsingErrors() throws Exception { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON) + () -> StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON) ); assertThat(iae.getMessage(), equalTo("illegal compiler options [{option=option}] specified")); } // check for unsupported template context - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("template", "code").endObject(); ParsingException pEx = expectThrows( ParsingException.class, - () -> StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON) + () -> StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON) ); assertThat( pEx.getMessage(), @@ -238,20 +235,20 @@ public void testSourceParsingErrors() throws Exception { } public void testEmptyTemplateDeprecations() throws IOException { - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); assertThat(parsed, equalTo(source)); assertWarnings("empty templates should no longer be used"); } - try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON)) { builder.startObject().field("script").startObject().field("lang", "mustache").field("source", "").endObject().endObject(); - StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), MediaTypeRegistry.JSON); StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); assertThat(parsed, equalTo(source)); diff --git a/server/src/test/java/org/opensearch/search/AbstractSearchTestCase.java b/server/src/test/java/org/opensearch/search/AbstractSearchTestCase.java index 4a5e99006ac05..c8f1a2491631d 100644 --- a/server/src/test/java/org/opensearch/search/AbstractSearchTestCase.java +++ b/server/src/test/java/org/opensearch/search/AbstractSearchTestCase.java @@ -34,11 +34,11 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/test/java/org/opensearch/search/ClearScrollResponseTests.java b/server/src/test/java/org/opensearch/search/ClearScrollResponseTests.java index 27c127e2c9ea1..f911d6780d958 100644 --- a/server/src/test/java/org/opensearch/search/ClearScrollResponseTests.java +++ b/server/src/test/java/org/opensearch/search/ClearScrollResponseTests.java @@ -33,13 +33,14 @@ package org.opensearch.search; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -58,17 +59,17 @@ public void testToXContent() throws IOException { } public void testToAndFromXContent() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); + MediaType mediaType = randomFrom(XContentType.values()); ClearScrollResponse originalResponse = createTestItem(); - BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference originalBytes = toShuffledXContent(originalResponse, mediaType, ToXContent.EMPTY_PARAMS, randomBoolean()); ClearScrollResponse parsedResponse; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + try (XContentParser parser = createParser(mediaType.xContent(), originalBytes)) { parsedResponse = ClearScrollResponse.fromXContent(parser); } assertEquals(originalResponse.isSucceeded(), parsedResponse.isSucceeded()); assertEquals(originalResponse.getNumFreed(), parsedResponse.getNumFreed()); - BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, xContentType, randomBoolean()); - assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, mediaType, randomBoolean()); + assertToXContentEquivalent(originalBytes, parsedBytes, mediaType); } private static ClearScrollResponse createTestItem() { diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 3a6743a334566..2661873d9498f 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -8,8 +8,6 @@ package org.opensearch.search; -import org.hamcrest.Matchers; -import org.opensearch.action.ActionFuture; import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.CreatePitRequest; @@ -22,15 +20,17 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.search.builder.PointInTimeBuilder; -import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.List; @@ -38,9 +38,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.not; import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -48,6 +45,9 @@ import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; /** * Single node integration tests for various PIT use cases such as create pit, search etc @@ -321,9 +321,9 @@ public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { // deleteall DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[0])); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture<DeletePitResponse> execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -489,8 +489,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * assert without point in time + /* + assert without point in time */ assertThat( @@ -509,8 +509,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * using point in time id will have the same search results as ones before update + /* + using point in time id will have the same search results as ones before update */ assertThat( client().prepareSearch() diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 8d8f2856d7703..3793249d569f0 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -35,24 +35,30 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.IndexCache; @@ -64,8 +70,6 @@ import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.LegacyReaderContext; import org.opensearch.search.internal.PitReaderContext; @@ -92,11 +96,11 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.nullable; import static org.mockito.Mockito.when; public class DefaultSearchContextTests extends OpenSearchTestCase { @@ -547,6 +551,157 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } } + public void testSearchPathEvaluationUsingSortField() throws Exception { + ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); + ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); + when(shardSearchRequest.shardId()).thenReturn(shardId); + + ThreadPool threadPool = new TestThreadPool(this.getClass().getName()); + IndexShard indexShard = mock(IndexShard.class); + QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class); + when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(indexShard.getThreadPool()).thenReturn(threadPool); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + + IndexService indexService = mock(IndexService.class); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( + queryShardContext + ); + + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexService.getIndexSettings()).thenReturn(indexSettings); + + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + + final Supplier<Engine.SearcherSupplier> searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { + @Override + protected void doClose() {} + + @Override + protected Engine.Searcher acquireSearcherInternal(String source) { + try { + IndexReader reader = w.getReader(); + return new Engine.Searcher( + "test", + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + reader + ); + } catch (IOException exc) { + throw new AssertionError(exc); + } + } + }; + + SearchShardTarget target = new SearchShardTarget("node", shardId, null, OriginalIndices.NONE); + ReaderContext readerContext = new ReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + + final ClusterService clusterService = mock(ClusterService.class); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); + clusterSettings.applySettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() + ); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + DefaultSearchContext context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + + // Case1: if sort is on timestamp field, non-concurrent path is used + context.sort( + new SortAndFormats(new Sort(new SortField("@timestamp", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }) + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case2: if sort is on other field, concurrent path is used + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.sort( + new SortAndFormats(new Sort(new SortField("test2", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }) + ); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case 3: With no sort, concurrent path is used + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // shutdown the threadpool + threadPool.shutdown(); + } + } + private ShardSearchContextId newContextId() { return new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()); } diff --git a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java index 638231b470689..8a9f635f7ef3b 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java @@ -10,13 +10,13 @@ import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java index 18456441f1316..5638eeb59c337 100644 --- a/server/src/test/java/org/opensearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/opensearch/search/DocValueFormatTests.java @@ -35,12 +35,12 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.time.DateFormatter; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.time.DateFormatter; import org.opensearch.index.mapper.DateFieldMapper.Resolution; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java new file mode 100644 index 0000000000000..8fb1814962155 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java @@ -0,0 +1,422 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.search; + +import org.opensearch.Version; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseTests; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.rest.action.search.RestSearchAction; +import org.opensearch.search.aggregations.AggregationsTests; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.profile.SearchProfileShardResultsTests; +import org.opensearch.search.suggest.Suggest; +import org.opensearch.search.suggest.SuggestTests; +import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; + +import static java.util.Collections.singletonMap; + +public class GenericSearchExtBuilderTests extends OpenSearchTestCase { + + private static final NamedXContentRegistry xContentRegistry; + static { + List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + namedXContents.add( + new NamedXContentRegistry.Entry( + SearchExtBuilder.class, + GenericSearchExtBuilder.EXT_BUILDER_NAME, + GenericSearchExtBuilder::fromXContent + ) + ); + xContentRegistry = new NamedXContentRegistry(namedXContents); + } + + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + new SearchModule(Settings.EMPTY, List.of(new SearchPlugin() { + @Override + public List<SearchExtSpec<?>> getSearchExts() { + return List.of( + new SearchExtSpec<>( + GenericSearchExtBuilder.EXT_BUILDER_NAME, + GenericSearchExtBuilder::new, + GenericSearchExtBuilder::fromXContent + ) + ); + } + })).getNamedWriteables() + ); + + @Override + protected NamedXContentRegistry xContentRegistry() { + return xContentRegistry; + } + + SearchResponseTests srt = new SearchResponseTests(); + private AggregationsTests aggregationsTests = new AggregationsTests(); + + @Before + public void init() throws Exception { + aggregationsTests.init(); + } + + @After + public void cleanUp() throws Exception { + aggregationsTests.cleanUp(); + } + + public void testFromXContentWithUnregisteredSearchExtBuilders() throws IOException { + List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + String dummyId = UUID.randomUUID().toString(); + List<SearchExtBuilder> extBuilders = List.of( + new SimpleValueSearchExtBuilder(dummyId), + new MapSearchExtBuilder(Map.of("x", "y", "a", "b")), + new ListSearchExtBuilder(List.of("1", "2", "3")) + ); + SearchResponse response = srt.createTestItem(false, extBuilders); + MediaType xcontentType = randomFrom(XContentType.values()); + boolean humanReadable = randomBoolean(); + final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); + BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); + XContentParser parser = createParser(new NamedXContentRegistry(namedXContents), xcontentType.xContent(), originalBytes); + SearchResponse parsed = SearchResponse.fromXContent(parser); + assertEquals(extBuilders.size(), response.getInternalResponse().getSearchExtBuilders().size()); + + List<SearchExtBuilder> actual = parsed.getInternalResponse().getSearchExtBuilders(); + assertEquals(extBuilders.size(), actual.size()); + for (int i = 0; i < actual.size(); i++) { + assertTrue(actual.get(0) instanceof GenericSearchExtBuilder); + } + } + + // This test case fails because GenericSearchExtBuilder does not retain the name of the SearchExtBuilder that it is replacing. + // GenericSearchExtBuilder has its own "generic_ext" section name. + // public void testFromXContentWithSearchExtBuilders() throws IOException { + // String dummyId = UUID.randomUUID().toString(); + // srt.doFromXContentTestWithRandomFields(createTestItem(false, List.of(new SimpleValueSearchExtBuilder(dummyId))), false); + // } + + public void testFromXContentWithGenericSearchExtBuildersForSimpleValues() throws IOException { + String dummyId = UUID.randomUUID().toString(); + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(dummyId, GenericSearchExtBuilder.ValueType.SIMPLE))), + false + ); + } + + public void testFromXContentWithGenericSearchExtBuildersForMapValues() throws IOException { + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(Map.of("x", "y", "a", "b"), GenericSearchExtBuilder.ValueType.MAP))), + false + ); + } + + public void testFromXContentWithGenericSearchExtBuildersForListValues() throws IOException { + String dummyId = UUID.randomUUID().toString(); + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(List.of("1", "2", "3"), GenericSearchExtBuilder.ValueType.LIST))), + false + ); + } + + public void testSerializationWithGenericSearchExtBuildersForSimpleValues() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(id, GenericSearchExtBuilder.ValueType.SIMPLE)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithGenericSearchExtBuildersForMapValues() throws IOException { + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(Map.of("x", "y", "a", "b"), GenericSearchExtBuilder.ValueType.MAP)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithGenericSearchExtBuildersForListValues() throws IOException { + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(List.of("1", "2", "3"), GenericSearchExtBuilder.ValueType.LIST)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public SearchResponse createTestItem( + boolean minimal, + List<SearchExtBuilder> searchExtBuilders, + ShardSearchFailure... shardSearchFailures + ) { + boolean timedOut = randomBoolean(); + Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); + int numReducePhases = randomIntBetween(1, 10); + long tookInMillis = randomNonNegativeLong(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + InternalSearchResponse internalSearchResponse; + if (minimal == false) { + SearchHits hits = SearchHitsTests.createTestItem(true, true); + InternalAggregations aggregations = aggregationsTests.createTestInstance(); + Suggest suggest = SuggestTests.createTestItem(); + SearchProfileShardResults profileShardResults = SearchProfileShardResultsTests.createTestItem(); + internalSearchResponse = new InternalSearchResponse( + hits, + aggregations, + suggest, + profileShardResults, + timedOut, + terminatedEarly, + numReducePhases, + searchExtBuilders + ); + } else { + internalSearchResponse = InternalSearchResponse.empty(); + } + + return new SearchResponse( + internalSearchResponse, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, + null + ); + } + + static SearchResponse.Clusters randomClusters() { + int totalClusters = randomIntBetween(0, 10); + int successfulClusters = randomIntBetween(0, totalClusters); + int skippedClusters = totalClusters - successfulClusters; + return new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters); + } + + static class SimpleValueSearchExtBuilder extends SearchExtBuilder { + + static ParseField FIELD = new ParseField("simple_value"); + + private final String id; + + public SimpleValueSearchExtBuilder(String id) { + assertNotNull(id); + this.id = id; + } + + public SimpleValueSearchExtBuilder(StreamInput in) throws IOException { + this.id = in.readString(); + } + + public String getId() { + return this.id; + } + + @Override + public String getWriteableName() { + return FIELD.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(FIELD.getPreferredName(), id); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof SimpleValueSearchExtBuilder)) { + return false; + } + + return this.id.equals(((SimpleValueSearchExtBuilder) obj).getId()); + } + + public static SimpleValueSearchExtBuilder parse(XContentParser parser) throws IOException { + String id; + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + id = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token); + } + if (id == null) { + throw new ParsingException(parser.getTokenLocation(), "no id specified for " + FIELD.getPreferredName()); + } + return new SimpleValueSearchExtBuilder(id); + } + } + + static class MapSearchExtBuilder extends SearchExtBuilder { + + private final static String EXT_FIELD = "map0"; + + private final Map<String, Object> map; + + public MapSearchExtBuilder(Map<String, String> map) { + this.map = new HashMap<>(); + for (Map.Entry<String, String> e : map.entrySet()) { + this.map.put(e.getKey(), e.getValue()); + } + } + + @Override + public String getWriteableName() { + return EXT_FIELD; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.map); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(EXT_FIELD, this.map); + } + + @Override + public int hashCode() { + return Objects.hash(this.getClass(), this.map); + } + + @Override + public boolean equals(Object obj) { + return false; + } + } + + static class ListSearchExtBuilder extends SearchExtBuilder { + + private final static String EXT_FIELD = "list0"; + + private final List<String> list; + + public ListSearchExtBuilder(List<String> list) { + this.list = new ArrayList<>(); + list.forEach(e -> this.list.add(e)); + } + + @Override + public String getWriteableName() { + return EXT_FIELD; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(this.list, StreamOutput::writeString); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(EXT_FIELD, this.list); + } + + @Override + public int hashCode() { + return Objects.hash(this.getClass(), this.list); + } + + @Override + public boolean equals(Object obj) { + return false; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/NestedIdentityTests.java b/server/src/test/java/org/opensearch/search/NestedIdentityTests.java index 341570247ffa9..913434801c338 100644 --- a/server/src/test/java/org/opensearch/search/NestedIdentityTests.java +++ b/server/src/test/java/org/opensearch/search/NestedIdentityTests.java @@ -32,15 +32,14 @@ package org.opensearch.search; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.SearchHit.NestedIdentity; import org.opensearch.test.OpenSearchTestCase; @@ -66,7 +65,7 @@ public static NestedIdentity createTestItem(int depth) { public void testFromXContent() throws IOException { NestedIdentity nestedIdentity = createTestItem(randomInt(3)); XContentType xcontentType = randomFrom(XContentType.values()); - XContentBuilder builder = XContentFactory.contentBuilder(xcontentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xcontentType); if (randomBoolean()) { builder.prettyPrint(); } @@ -87,7 +86,7 @@ public void testToXContent() throws IOException { builder.endObject(); assertEquals( "{\n" + " \"_nested\" : {\n" + " \"field\" : \"foo\",\n" + " \"offset\" : 5\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); nestedIdentity = new NestedIdentity("foo", 5, new NestedIdentity("bar", 3, null)); @@ -107,7 +106,7 @@ public void testToXContent() throws IOException { + " }\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java index 011723da36a30..fce58eecbafb1 100644 --- a/server/src/test/java/org/opensearch/search/SearchCancellationTests.java +++ b/server/src/test/java/org/opensearch/search/SearchCancellationTests.java @@ -38,24 +38,24 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.PointValues; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.junit.Before; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.index.shard.IndexShard; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; -import org.opensearch.tasks.TaskCancelledException; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/SearchHitTests.java b/server/src/test/java/org/opensearch/search/SearchHitTests.java index 04aa60b2be913..13b4d9f976ed5 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitTests.java @@ -36,35 +36,38 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; -import org.opensearch.common.Strings; +import org.opensearch.common.document.DocumentField; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.document.DocumentField; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.index.get.GetResultTests; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.SearchHit.NestedIdentity; import org.opensearch.search.fetch.subphase.highlight.HighlightField; import org.opensearch.search.fetch.subphase.highlight.HighlightFieldTests; import org.opensearch.test.AbstractWireSerializingTestCase; import org.opensearch.test.RandomObjects; import org.opensearch.test.VersionUtils; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.containsString; @@ -75,11 +78,30 @@ import static org.hamcrest.Matchers.nullValue; public class SearchHitTests extends AbstractWireSerializingTestCase<SearchHit> { + + private Map<String, Float> getSampleMatchedQueries() { + Map<String, Float> matchedQueries = new LinkedHashMap<>(); + matchedQueries.put("query1", 1.0f); + matchedQueries.put("query2", 0.5f); + return matchedQueries; + } + + public static SearchHit createTestItemWithMatchedQueriesScores(boolean withOptionalInnerHits, boolean withShardTarget) { + var searchHit = createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); + int size = randomIntBetween(1, 5); // Ensure at least one matched query + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); + for (int i = 0; i < size; i++) { + matchedQueries.put(randomAlphaOfLength(5), randomFloat()); + } + searchHit.matchedQueriesWithScores(matchedQueries); + return searchHit; + } + public static SearchHit createTestItem(boolean withOptionalInnerHits, boolean withShardTarget) { return createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); } - public static SearchHit createTestItem(XContentType xContentType, boolean withOptionalInnerHits, boolean transportSerialization) { + public static SearchHit createTestItem(final MediaType mediaType, boolean withOptionalInnerHits, boolean transportSerialization) { int internalId = randomInt(); String uid = randomAlphaOfLength(10); NestedIdentity nestedIdentity = null; @@ -90,8 +112,8 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp Map<String, DocumentField> documentFields = new HashMap<>(); if (frequently()) { if (randomBoolean()) { - metaFields = GetResultTests.randomDocumentFields(xContentType, true).v2(); - documentFields = GetResultTests.randomDocumentFields(xContentType, false).v2(); + metaFields = GetResultTests.randomDocumentFields(mediaType, true).v2(); + documentFields = GetResultTests.randomDocumentFields(mediaType, false).v2(); } } @@ -104,7 +126,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp } } if (frequently()) { - hit.sourceRef(RandomObjects.randomSource(random(), xContentType)); + hit.sourceRef(RandomObjects.randomSource(random(), mediaType)); } if (randomBoolean()) { hit.version(randomLong()); @@ -115,7 +137,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp hit.version(randomLongBetween(1, Long.MAX_VALUE)); } if (randomBoolean()) { - hit.sortValues(SearchSortValuesTests.createTestItem(xContentType, transportSerialization)); + hit.sortValues(SearchSortValuesTests.createTestItem(mediaType, transportSerialization)); } if (randomBoolean()) { int size = randomIntBetween(0, 5); @@ -128,11 +150,11 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp } if (randomBoolean()) { int size = randomIntBetween(0, 5); - String[] matchedQueries = new String[size]; + Map<String, Float> matchedQueries = new LinkedHashMap<>(size); for (int i = 0; i < size; i++) { - matchedQueries[i] = randomAlphaOfLength(5); + matchedQueries.put(randomAlphaOfLength(5), Float.NaN); } - hit.matchedQueries(matchedQueries); + hit.matchedQueriesWithScores(matchedQueries); } if (randomBoolean()) { hit.explanation(createExplanation(randomIntBetween(0, 5))); @@ -142,7 +164,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp if (innerHitsSize > 0) { Map<String, SearchHits> innerHits = new HashMap<>(innerHitsSize); for (int i = 0; i < innerHitsSize; i++) { - innerHits.put(randomAlphaOfLength(5), SearchHitsTests.createTestItem(xContentType, false, transportSerialization)); + innerHits.put(randomAlphaOfLength(5), SearchHitsTests.createTestItem(mediaType, false, transportSerialization)); } hit.setInnerHits(innerHits); } @@ -218,6 +240,21 @@ public void testFromXContentLenientParsing() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); } + public void testSerializationDeserializationWithMatchedQueriesScores() throws IOException { + SearchHit searchHit = createTestItemWithMatchedQueriesScores(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_3_0_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + } + + public void testSerializationDeserializationWithMatchedQueriesList() throws IOException { + SearchHit searchHit = createTestItem(true, true); + SearchHit deserializedSearchHit = copyWriteable(searchHit, getNamedWriteableRegistry(), SearchHit::new, Version.V_2_12_0); + assertEquals(searchHit, deserializedSearchHit); + assertEquals(searchHit.getMatchedQueriesAndScores(), deserializedSearchHit.getMatchedQueriesAndScores()); + Assert.assertArrayEquals(searchHit.getMatchedQueries(), deserializedSearchHit.getMatchedQueries()); + } + /** * When e.g. with "stored_fields": "_none_", only "_index" and "_score" are returned. */ @@ -240,7 +277,126 @@ public void testToXContent() throws IOException { searchHit.score(1.5f); XContentBuilder builder = JsonXContent.contentBuilder(); searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals("{\"_id\":\"id1\",\"_score\":1.5}", Strings.toString(builder)); + assertEquals("{\"_id\":\"id1\",\"_score\":1.5}", builder.toString()); + } + + public void testSerializeShardTargetWithNewVersion() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } + + public void testSerializeShardTargetWithNewVersionAndMatchedQueries() throws Exception { + String clusterAlias = randomBoolean() ? null : "cluster_alias"; + SearchShardTarget target = new SearchShardTarget( + "_node_id", + new ShardId(new Index("_index", "_na_"), 0), + clusterAlias, + OriginalIndices.NONE + ); + + Map<String, SearchHits> innerHits = new HashMap<>(); + SearchHit innerHit1 = new SearchHit(0, "_id", null, null); + innerHit1.shard(target); + innerHit1.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerInnerHit2 = new SearchHit(0, "_id", null, null); + innerInnerHit2.shard(target); + innerHits.put("1", new SearchHits(new SearchHit[] { innerInnerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHit1.setInnerHits(innerHits); + SearchHit innerHit2 = new SearchHit(0, "_id", null, null); + innerHit2.shard(target); + innerHit2.matchedQueriesWithScores(getSampleMatchedQueries()); + SearchHit innerHit3 = new SearchHit(0, "_id", null, null); + innerHit3.shard(target); + innerHit3.matchedQueriesWithScores(getSampleMatchedQueries()); + + innerHits = new HashMap<>(); + SearchHit hit1 = new SearchHit(0, "_id", null, null); + innerHits.put("1", new SearchHits(new SearchHit[] { innerHit1, innerHit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + innerHits.put("2", new SearchHits(new SearchHit[] { innerHit3 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f)); + hit1.shard(target); + hit1.setInnerHits(innerHits); + + SearchHit hit2 = new SearchHit(0, "_id", null, null); + hit2.shard(target); + + SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); + + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, Version.V_3_0_0); + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + String[] expectedMatchedQueries = new String[] { "query1", "query2" }; + String[] actualMatchedQueries = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueries(); + assertArrayEquals(expectedMatchedQueries, actualMatchedQueries); + + Map<String, Float> expectedMatchedQueriesAndScores = new LinkedHashMap<>(); + expectedMatchedQueriesAndScores.put("query1", 1.0f); + expectedMatchedQueriesAndScores.put("query2", 0.5f); + + Map<String, Float> actualMatchedQueriesAndScores = results.getAt(0).getInnerHits().get("1").getAt(0).getMatchedQueriesAndScores(); + assertEquals(expectedMatchedQueriesAndScores, actualMatchedQueriesAndScores); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } + } + } + assertThat(results.getAt(1).getShard(), equalTo(target)); } public void testSerializeShardTarget() throws Exception { @@ -319,7 +475,7 @@ public void testHasSource() { public void testWeirdScriptFields() throws Exception { { XContentParser parser = createParser( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), "{\n" + " \"_index\": \"twitter\",\n" + " \"_id\": \"1\",\n" @@ -339,7 +495,7 @@ public void testWeirdScriptFields() throws Exception { } { XContentParser parser = createParser( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), "{\n" + " \"_index\": \"twitter\",\n" + " \"_id\": \"1\",\n" @@ -395,11 +551,11 @@ public void testToXContentEmptyFields() throws IOException { fields.put("bar", new DocumentField("bar", Collections.emptyList())); SearchHit hit = new SearchHit(0, "_id", null, fields, Collections.emptyMap()); { - BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference originalBytes = toShuffledXContent(hit, MediaTypeRegistry.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); // checks that the fields section is completely omitted in the rendering. assertThat(originalBytes.utf8ToString(), not(containsString("fields"))); final SearchHit parsed; - try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + try (XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), originalBytes)) { parser.nextToken(); // jump to first START_OBJECT parsed = SearchHit.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); @@ -413,9 +569,9 @@ public void testToXContentEmptyFields() throws IOException { fields.put("bar", new DocumentField("bar", Collections.singletonList("value"))); hit = new SearchHit(0, "_id", null, fields, Collections.emptyMap()); { - BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference originalBytes = toShuffledXContent(hit, MediaTypeRegistry.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); final SearchHit parsed; - try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + try (XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), originalBytes)) { parser.nextToken(); // jump to first START_OBJECT parsed = SearchHit.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); @@ -429,9 +585,9 @@ public void testToXContentEmptyFields() throws IOException { metadata.put("_routing", new DocumentField("_routing", Collections.emptyList())); hit = new SearchHit(0, "_id", null, fields, Collections.emptyMap()); { - BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); + BytesReference originalBytes = toShuffledXContent(hit, MediaTypeRegistry.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); final SearchHit parsed; - try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + try (XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), originalBytes)) { parser.nextToken(); // jump to first START_OBJECT parsed = SearchHit.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); diff --git a/server/src/test/java/org/opensearch/search/SearchHitsTests.java b/server/src/test/java/org/opensearch/search/SearchHitsTests.java index d5cee765edee8..fd3ba35a4d3bb 100644 --- a/server/src/test/java/org/opensearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/opensearch/search/SearchHitsTests.java @@ -36,18 +36,18 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.TestUtil; import org.opensearch.action.OriginalIndices; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.lucene.LuceneTests; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; @@ -62,13 +62,13 @@ public static SearchHits createTestItem(boolean withOptionalInnerHits, boolean w private static SearchHit[] createSearchHitArray( int size, - XContentType xContentType, + final MediaType mediaType, boolean withOptionalInnerHits, boolean transportSerialization ) { SearchHit[] hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHitTests.createTestItem(xContentType, withOptionalInnerHits, transportSerialization); + hits[i] = SearchHitTests.createTestItem(mediaType, withOptionalInnerHits, transportSerialization); } return hits; } @@ -78,18 +78,18 @@ private static TotalHits randomTotalHits(TotalHits.Relation relation) { return new TotalHits(totalHits, relation); } - public static SearchHits createTestItem(XContentType xContentType, boolean withOptionalInnerHits, boolean transportSerialization) { - return createTestItem(xContentType, withOptionalInnerHits, transportSerialization, randomFrom(TotalHits.Relation.values())); + public static SearchHits createTestItem(final MediaType mediaType, boolean withOptionalInnerHits, boolean transportSerialization) { + return createTestItem(mediaType, withOptionalInnerHits, transportSerialization, randomFrom(TotalHits.Relation.values())); } private static SearchHits createTestItem( - XContentType xContentType, + final MediaType mediaType, boolean withOptionalInnerHits, boolean transportSerialization, TotalHits.Relation totalHitsRelation ) { int searchHits = randomIntBetween(0, 5); - SearchHit[] hits = createSearchHitArray(searchHits, xContentType, withOptionalInnerHits, transportSerialization); + SearchHit[] hits = createSearchHitArray(searchHits, mediaType, withOptionalInnerHits, transportSerialization); TotalHits totalHits = frequently() ? randomTotalHits(totalHitsRelation) : null; float maxScore = frequently() ? randomFloat() : Float.NaN; SortField[] sortFields = null; @@ -224,13 +224,13 @@ protected SearchHits createTestInstance() { } @Override - protected SearchHits createXContextTestInstance(XContentType xContentType) { + protected SearchHits createXContextTestInstance(final MediaType mediaType) { // We don't set SearchHit#shard (withShardTarget is false) in this test // because the rest serialization does not render this information so the // deserialized hit cannot be equal to the original instance. // There is another test (#testFromXContentWithShards) that checks the // rest serialization with shard targets. - return createTestItem(xContentType, true, false); + return createTestItem(mediaType, true, false); } @Override @@ -261,7 +261,7 @@ public void testToXContent() throws IOException { "{\"hits\":{\"total\":{\"value\":1000,\"relation\":\"eq\"},\"max_score\":1.5," + "\"hits\":[{\"_id\":\"id1\",\"_score\":null}," + "{\"_id\":\"id2\",\"_score\":null}]}}", - Strings.toString(builder) + builder.toString() ); } diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 375bceb16d03a..01b8d6d8cdd72 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -33,11 +33,11 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -113,7 +113,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class SearchModuleTests extends OpenSearchTestCase { @@ -431,9 +430,7 @@ public void testDefaultQueryPhaseSearcher() { } public void testConcurrentQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - SearchModule searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); TestSearchContext searchContext = new TestSearchContext(null); searchContext.setConcurrentSegmentSearchEnabled(true); QueryPhase queryPhase = searchModule.getQueryPhase(); @@ -443,8 +440,6 @@ public void testConcurrentQueryPhaseSearcher() { } public void testPluginQueryPhaseSearcher() { - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); QueryPhaseSearcher queryPhaseSearcher = (searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout) -> false; SearchPlugin plugin1 = new SearchPlugin() { @Override @@ -452,7 +447,7 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { return Optional.of(queryPhaseSearcher); } }; - SearchModule searchModule = new SearchModule(settings, Collections.singletonList(plugin1)); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.singletonList(plugin1)); QueryPhase queryPhase = searchModule.getQueryPhase(); TestSearchContext searchContext = new TestSearchContext(null); assertEquals(queryPhaseSearcher, queryPhase.getQueryPhaseSearcher()); @@ -480,18 +475,10 @@ public Optional<QueryPhaseSearcher> getQueryPhaseSearcher() { } public void testIndexSearcher() { - SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); ThreadPool threadPool = mock(ThreadPool.class); - assertNull(searchModule.getIndexSearcherExecutor(threadPool)); - verify(threadPool, times(0)).executor(ThreadPool.Names.INDEX_SEARCHER); - - // enable concurrent segment search feature flag - Settings settings = Settings.builder().put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); - FeatureFlags.initializeFeatureFlags(settings); - searchModule = new SearchModule(settings, Collections.emptyList()); + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); searchModule.getIndexSearcherExecutor(threadPool); verify(threadPool).executor(ThreadPool.Names.INDEX_SEARCHER); - FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } public void testMultiplePluginRegisterIndexSearcherProvider() { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 2371c5812814a..d502bab5918a8 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; @@ -55,16 +54,18 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; @@ -80,12 +81,10 @@ import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.SearchOperationListener; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.settings.InternalOrPrivateSettingsPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.core.rest.RestStatus; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -227,7 +226,7 @@ public void onQueryPhase(SearchContext context, long tookInNanos) { @Override protected Settings nodeSettings() { - return Settings.builder().put("search.default_search_timeout", "5s").put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); + return Settings.builder().put("search.default_search_timeout", "5s").build(); } public void testClearOnClose() { @@ -1184,7 +1183,7 @@ public void testCreateSearchContext() throws IOException { public void testConcurrentSegmentSearchSearchContext() throws IOException { Boolean[][] scenarios = { // cluster setting, index setting, concurrent search enabled? - { null, null, true }, + { null, null, false }, { null, false, false }, { null, true, true }, { true, null, true }, @@ -1269,9 +1268,80 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get() .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) ); - assertEquals(concurrentSearchEnabled, searchContext.isConcurrentSegmentSearchEnabled()); + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchEnabled, searchContext.shouldUseConcurrentSearch()); + // verify executor nullability with concurrent search enabled/disabled + if (concurrentSearchEnabled) { + assertNotNull(searchContext.searcher().getExecutor()); + } else { + assertNull(searchContext.searcher().getExecutor()); + } + } + } + // Cleanup + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey())) + .get(); + } + + /** + * Test that the Search Context for concurrent segment search enabled is set correctly at the time of construction. + * The same is used throughout the context object lifetime even if cluster setting changes before the request completion. + */ + public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws IOException { + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias, + Strings.EMPTY_ARRAY + ); + + Boolean[] concurrentSearchStates = new Boolean[] { true, false }; + for (Boolean concurrentSearchSetting : concurrentSearchStates) { + // update concurrent search cluster setting and create search context + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), concurrentSearchSetting) + ) + .get(); + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + // verify concurrent search state in context + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); + // verify executor state in searcher + assertEquals(concurrentSearchSetting, (searchContext.searcher().getExecutor() != null)); + + // update cluster setting to flip the concurrent segment search state + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), !concurrentSearchSetting) + ) + .get(); + + // verify that concurrent segment search is still set to same expected value for the context + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); } } + // Cleanup client().admin() .cluster() @@ -1680,7 +1750,7 @@ public void testCanMatchSearchAfterAscGreaterThanMax() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); } /** @@ -1693,7 +1763,7 @@ public void testCanMatchSearchAfterAscLessThanMax() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1706,7 +1776,7 @@ public void testCanMatchSearchAfterAscEqualMax() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1719,7 +1789,7 @@ public void testCanMatchSearchAfterDescGreaterThanMin() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1732,7 +1802,7 @@ public void testCanMatchSearchAfterDescLessThanMin() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); } /** @@ -1745,7 +1815,7 @@ public void testCanMatchSearchAfterDescEqualMin() throws IOException { MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1759,9 +1829,24 @@ public void testCanMatchSearchAfterWithMissing() throws IOException { FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); // Should be false without missing values - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); primarySort.missing("_last"); // Should be true with missing values - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); + } + + /** + * Test for DESC order search_after query with track_total_hits=true. + * Min = 0L, Max = 9L, search_after = -1L + * With above min/max and search_after, it should not match, but since + * track_total_hits = true, + * Expected result is canMatch = true + */ + public void testCanMatchSearchAfterDescLessThanMinWithTrackTotalhits() throws IOException { + FieldDoc searchAfter = new FieldDoc(0, 0, new Long[] { -1L }); + MinAndMax<?> minMax = new MinAndMax<Long>(0L, 9L); + FieldSortBuilder primarySort = new FieldSortBuilder("test"); + primarySort.order(SortOrder.DESC); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, 1000), true); } } diff --git a/server/src/test/java/org/opensearch/search/SearchSortValuesAndFormatsTests.java b/server/src/test/java/org/opensearch/search/SearchSortValuesAndFormatsTests.java index 0266f5cfcce60..3149ab0e2905f 100644 --- a/server/src/test/java/org/opensearch/search/SearchSortValuesAndFormatsTests.java +++ b/server/src/test/java/org/opensearch/search/SearchSortValuesAndFormatsTests.java @@ -33,9 +33,9 @@ package org.opensearch.search; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.test.AbstractWireSerializingTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/search/SearchSortValuesTests.java b/server/src/test/java/org/opensearch/search/SearchSortValuesTests.java index 36d01fa4eb988..2f73659c449d4 100644 --- a/server/src/test/java/org/opensearch/search/SearchSortValuesTests.java +++ b/server/src/test/java/org/opensearch/search/SearchSortValuesTests.java @@ -33,14 +33,14 @@ package org.opensearch.search; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.lucene.LuceneTests; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.AbstractSerializingTestCase; import org.opensearch.test.RandomObjects; @@ -49,13 +49,13 @@ public class SearchSortValuesTests extends AbstractSerializingTestCase<SearchSortValues> { - public static SearchSortValues createTestItem(XContentType xContentType, boolean transportSerialization) { + public static SearchSortValues createTestItem(final MediaType mediaType, boolean transportSerialization) { int size = randomIntBetween(1, 20); Object[] values = new Object[size]; if (transportSerialization) { DocValueFormat[] sortValueFormats = new DocValueFormat[size]; for (int i = 0; i < size; i++) { - Object sortValue = randomSortValue(xContentType, transportSerialization); + Object sortValue = randomSortValue(mediaType, transportSerialization); values[i] = sortValue; // make sure that for BytesRef, we provide a specific doc value format that overrides format(BytesRef) sortValueFormats[i] = sortValue instanceof BytesRef ? DocValueFormat.RAW : randomDocValueFormat(); @@ -64,7 +64,7 @@ public static SearchSortValues createTestItem(XContentType xContentType, boolean } else { // xcontent serialization doesn't write/parse the raw sort values, only the formatted ones for (int i = 0; i < size; i++) { - Object sortValue = randomSortValue(xContentType, transportSerialization); + Object sortValue = randomSortValue(mediaType, transportSerialization); // make sure that BytesRef are not provided as formatted values sortValue = sortValue instanceof BytesRef ? DocValueFormat.RAW.format((BytesRef) sortValue) : sortValue; values[i] = sortValue; @@ -73,10 +73,10 @@ public static SearchSortValues createTestItem(XContentType xContentType, boolean } } - private static Object randomSortValue(XContentType xContentType, boolean transportSerialization) { + private static Object randomSortValue(final MediaType mediaType, boolean transportSerialization) { Object randomSortValue = LuceneTests.randomSortValue(); // to simplify things, we directly serialize what we expect we would parse back when testing xcontent serialization - return transportSerialization ? randomSortValue : RandomObjects.getExpectedParsedValue(xContentType, randomSortValue); + return transportSerialization ? randomSortValue : RandomObjects.getExpectedParsedValue(mediaType, randomSortValue); } private static DocValueFormat randomDocValueFormat() { @@ -103,8 +103,8 @@ protected SearchSortValues doParseInstance(XContentParser parser) throws IOExcep } @Override - protected SearchSortValues createXContextTestInstance(XContentType xContentType) { - return createTestItem(xContentType, false); + protected SearchSortValues createXContextTestInstance(final MediaType mediaType) { + return createTestItem(mediaType, false); } @Override @@ -129,7 +129,7 @@ public void testToXContent() throws IOException { builder.startObject(); sortValues.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{\"sort\":[1,\"foo\",3.0]}", Strings.toString(builder)); + assertEquals("{\"sort\":[1,\"foo\",3.0]}", builder.toString()); } { SearchSortValues sortValues = new SearchSortValues(new Object[0]); @@ -137,7 +137,7 @@ public void testToXContent() throws IOException { builder.startObject(); sortValues.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{}", Strings.toString(builder)); + assertEquals("{}", builder.toString()); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationCollectorManagerTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationCollectorManagerTests.java index 7fcf2216040c9..47ce0f120334b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationCollectorManagerTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationCollectorManagerTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Collector; import org.opensearch.search.aggregations.bucket.global.GlobalAggregator; +import org.opensearch.search.profile.query.CollectorResult; import java.util.ArrayList; import java.util.List; @@ -31,9 +32,12 @@ public void testNonGlobalCollectorManagers() throws Exception { assertTrue(aggCollector instanceof MultiBucketCollector); assertEquals(expectedAggCount, ((MultiBucketCollector) aggCollector).getCollectors().length); testCollectorManagerCommon(testAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION, testAggCollectorManager.getCollectorReason()); // test NonGlobalCollectorManager which will be used in concurrent segment search case - testCollectorManagerCommon(new NonGlobalAggCollectorManager(context)); + final NonGlobalAggCollectorManager testNonGlobalAggCollectorManager = new NonGlobalAggCollectorManager(context); + testCollectorManagerCommon(testNonGlobalAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION, testAggCollectorManager.getCollectorReason()); } public void testGlobalCollectorManagers() throws Exception { @@ -45,11 +49,14 @@ public void testGlobalCollectorManagers() throws Exception { context.aggregations(contextAggregations); final AggregationCollectorManager testAggCollectorManager = new GlobalAggCollectorManagerWithSingleCollector(context); testCollectorManagerCommon(testAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION_GLOBAL, testAggCollectorManager.getCollectorReason()); Collector aggCollector = testAggCollectorManager.newCollector(); assertTrue(aggCollector instanceof BucketCollector); // test GlobalAggCollectorManager which will be used in concurrent segment search case - testCollectorManagerCommon(new GlobalAggCollectorManager(context)); + final GlobalAggCollectorManager testGlobalAggCollectorManager = new GlobalAggCollectorManager(context); + testCollectorManagerCommon(testGlobalAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION_GLOBAL, testAggCollectorManager.getCollectorReason()); } public void testAggCollectorManagersWithBothGlobalNonGlobalAggregators() throws Exception { @@ -70,7 +77,9 @@ public void testAggCollectorManagersWithBothGlobalNonGlobalAggregators() throws assertTrue(globalAggCollector instanceof GlobalAggregator); testCollectorManagerCommon(testAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION, testAggCollectorManager.getCollectorReason()); testCollectorManagerCommon(testGlobalAggCollectorManager); + assertEquals(CollectorResult.REASON_AGGREGATION_GLOBAL, testGlobalAggCollectorManager.getCollectorReason()); } public void testAssertionWhenCollectorManagerCreatesNoOPCollector() throws Exception { diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java index cff83b36ce884..d68b0911d3d01 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java @@ -8,24 +8,32 @@ package org.opensearch.search.aggregations; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.mockito.ArgumentMatchers; +import org.opensearch.index.engine.Engine; import org.opensearch.search.aggregations.bucket.global.GlobalAggregator; import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.test.TestSearchContext; import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutorService; + +import org.mockito.ArgumentMatchers; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; public class AggregationProcessorTests extends AggregationSetupTests { private final AggregationProcessor testAggregationProcessor = new ConcurrentAggregationProcessor(); @@ -48,15 +56,19 @@ public void testPreProcessWithOnlyNonGlobalAggregators() throws Exception { } public void testPostProcessWithNonGlobalAggregatorsAndSingleSlice() throws Exception { - testPostProcessCommon(multipleNonGlobalAggs, 1, 0, 2); + testPostProcessCommon(multipleNonGlobalAggs, 1, 0, 2, false); } public void testPostProcessWithNonGlobalAggregatorsAndMultipleSlices() throws Exception { - testPostProcessCommon(multipleNonGlobalAggs, randomIntBetween(2, 5), 0, 2); + testPostProcessCommon(multipleNonGlobalAggs, randomIntBetween(2, 5), 0, 2, false); } public void testPostProcessGlobalAndNonGlobalAggregators() throws Exception { - testPostProcessCommon(globalNonGlobalAggs, randomIntBetween(2, 5), 1, 1); + testPostProcessCommon(globalNonGlobalAggs, randomIntBetween(2, 5), 1, 1, false); + } + + public void testPostProcessGlobalAndNonGlobalAggregatorsWithProfilers() throws Exception { + testPostProcessCommon(globalNonGlobalAggs, randomIntBetween(2, 5), 1, 1, true); } private void testPreProcessCommon(String agg, int expectedGlobalAggs, int expectedNonGlobalAggs) throws Exception { @@ -127,8 +139,13 @@ private void testPreProcessCommon( } } - private void testPostProcessCommon(String aggs, int numSlices, int expectedGlobalAggs, int expectedNonGlobalAggsPerSlice) - throws Exception { + private void testPostProcessCommon( + String aggs, + int numSlices, + int expectedGlobalAggs, + int expectedNonGlobalAggsPerSlice, + boolean withProfilers + ) throws Exception { final Collection<Collector> nonGlobalCollectors = new ArrayList<>(); final Collection<Collector> globalCollectors = new ArrayList<>(); testPreProcessCommon(aggs, expectedGlobalAggs, expectedNonGlobalAggsPerSlice, nonGlobalCollectors, globalCollectors); @@ -141,9 +158,31 @@ private void testPostProcessCommon(String aggs, int numSlices, int expectedGloba globalCollectors.add(context.queryCollectorManagers().get(GlobalAggCollectorManager.class).newCollector()); } } - final ContextIndexSearcher testSearcher = mock(ContextIndexSearcher.class); final IndexSearcher.LeafSlice[] slicesToReturn = new IndexSearcher.LeafSlice[numSlices]; - when(testSearcher.getSlices()).thenReturn(slicesToReturn); + + // Build a ContextIndexSearcher that stubs slices to return slicesToReturn. Slices is protected in IndexReader + // so this builds a real object. The DirectoryReader fetched to build the object is not used for any searches. + final DirectoryReader reader; + try (Engine.Searcher searcher = context.indexShard().acquireSearcher("test")) { + reader = searcher.getDirectoryReader(); + } + ContextIndexSearcher testSearcher = spy( + new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + randomBoolean(), + mock(ExecutorService.class), + context + ) { + @Override + protected LeafSlice[] slices(List<LeafReaderContext> leaves) { + return slicesToReturn; + } + } + ); + ((TestSearchContext) context).setSearcher(testSearcher); AggregationCollectorManager collectorManager; if (expectedNonGlobalAggsPerSlice > 0) { @@ -153,15 +192,26 @@ private void testPostProcessCommon(String aggs, int numSlices, int expectedGloba if (expectedGlobalAggs > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(GlobalAggCollectorManager.class); ReduceableSearchResult result = collectorManager.reduce(globalCollectors); - when(testSearcher.search(nullable(Query.class), ArgumentMatchers.<CollectorManager<?, ReduceableSearchResult>>any())) - .thenReturn(result); + doReturn(result).when(testSearcher) + .search(nullable(Query.class), ArgumentMatchers.<CollectorManager<?, ReduceableSearchResult>>any()); } assertTrue(context.queryResult().hasAggs()); + if (withProfilers) { + ((TestSearchContext) context).withProfilers(); + } testAggregationProcessor.postProcess(context); assertTrue(context.queryResult().hasAggs()); // for global aggs verify that search.search is called with CollectionManager if (expectedGlobalAggs > 0) { verify(testSearcher, times(1)).search(nullable(Query.class), ArgumentMatchers.<CollectorManager<?, ?>>any()); + if (withProfilers) { + // First profiler is from withProfilers() call, second one is from postProcess() call + assertEquals(2, context.getProfilers().getQueryProfilers().size()); + assertEquals( + CollectorResult.REASON_AGGREGATION_GLOBAL, + context.getProfilers().getQueryProfilers().get(1).getCollector().getReason() + ); + } } // after shard level reduce it should have only 1 InternalAggregation instance for each agg in request and internal aggregation // will be equal to sum of expected global and nonglobal aggs diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index fd82cd21e0fa6..73e6d3814fa8d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -32,16 +32,17 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.aggregations.Aggregation.CommonFields; import org.opensearch.search.aggregations.bucket.adjacency.InternalAdjacencyMatrixTests; @@ -70,32 +71,32 @@ import org.opensearch.search.aggregations.bucket.terms.StringRareTermsTests; import org.opensearch.search.aggregations.bucket.terms.StringTermsTests; import org.opensearch.search.aggregations.bucket.terms.UnsignedLongTermsTests; +import org.opensearch.search.aggregations.metrics.InternalAvgTests; +import org.opensearch.search.aggregations.metrics.InternalCardinalityTests; import org.opensearch.search.aggregations.metrics.InternalExtendedStatsTests; +import org.opensearch.search.aggregations.metrics.InternalGeoCentroidTests; +import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesRanksTests; +import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesTests; import org.opensearch.search.aggregations.metrics.InternalMaxTests; import org.opensearch.search.aggregations.metrics.InternalMedianAbsoluteDeviationTests; import org.opensearch.search.aggregations.metrics.InternalMinTests; +import org.opensearch.search.aggregations.metrics.InternalScriptedMetricTests; import org.opensearch.search.aggregations.metrics.InternalStatsBucketTests; import org.opensearch.search.aggregations.metrics.InternalStatsTests; import org.opensearch.search.aggregations.metrics.InternalSumTests; -import org.opensearch.search.aggregations.metrics.InternalAvgTests; -import org.opensearch.search.aggregations.metrics.InternalCardinalityTests; -import org.opensearch.search.aggregations.metrics.InternalGeoCentroidTests; -import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesRanksTests; -import org.opensearch.search.aggregations.metrics.InternalHDRPercentilesTests; import org.opensearch.search.aggregations.metrics.InternalTDigestPercentilesRanksTests; import org.opensearch.search.aggregations.metrics.InternalTDigestPercentilesTests; -import org.opensearch.search.aggregations.metrics.InternalScriptedMetricTests; import org.opensearch.search.aggregations.metrics.InternalTopHitsTests; import org.opensearch.search.aggregations.metrics.InternalValueCountTests; import org.opensearch.search.aggregations.metrics.InternalWeightedAvgTests; -import org.opensearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValueTests; -import org.opensearch.search.aggregations.pipeline.InternalPercentilesBucketTests; -import org.opensearch.search.aggregations.pipeline.InternalExtendedStatsBucketTests; import org.opensearch.search.aggregations.pipeline.InternalDerivativeTests; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.search.aggregations.pipeline.InternalExtendedStatsBucketTests; +import org.opensearch.search.aggregations.pipeline.InternalPercentilesBucketTests; +import org.opensearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.InternalMultiBucketAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.After; import org.junit.Before; @@ -211,7 +212,7 @@ public void testFromXContentWithRandomFields() throws IOException { /** * Test that parsing works for a randomly created Aggregations object with a * randomized aggregation tree. The test randomly chooses an - * {@link XContentType}, randomizes the order of the {@link XContent} fields + * {@link MediaType}, randomizes the order of the {@link XContent} fields * and randomly sets the `humanReadable` flag when rendering the * {@link XContent}. * @@ -221,10 +222,10 @@ public void testFromXContentWithRandomFields() throws IOException { * responses */ private void parseAndAssert(boolean addRandomFields) throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); + MediaType mediaType = randomFrom(XContentType.values()); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); Aggregations aggregations = createTestInstance(1, 0, 3); - BytesReference originalBytes = toShuffledXContent(aggregations, xContentType, params, randomBoolean()); + BytesReference originalBytes = toShuffledXContent(aggregations, mediaType, params, randomBoolean()); BytesReference mutated; if (addRandomFields) { /* @@ -254,18 +255,18 @@ private void parseAndAssert(boolean addRandomFields) throws IOException { || path.endsWith("correlation") || path.contains(CommonFields.VALUE.getPreferredName()) || path.endsWith(CommonFields.KEY.getPreferredName())) || path.contains("top_hits"); - mutated = insertRandomFields(xContentType, originalBytes, excludes, random()); + mutated = insertRandomFields(mediaType, originalBytes, excludes, random()); } else { mutated = originalBytes; } - try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { + try (XContentParser parser = createParser(mediaType.xContent(), mutated)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); assertEquals(Aggregations.AGGREGATIONS_FIELD, parser.currentName()); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); Aggregations parsedAggregations = Aggregations.fromXContent(parser); - BytesReference parsedBytes = XContentHelper.toXContent(parsedAggregations, xContentType, randomBoolean()); - OpenSearchAssertions.assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + BytesReference parsedBytes = XContentHelper.toXContent(parsedAggregations, mediaType, randomBoolean()); + OpenSearchAssertions.assertToXContentEquivalent(originalBytes, parsedBytes, mediaType); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java index 312ebfcfae5e8..ce96623ea06df 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java @@ -38,16 +38,16 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; import org.opensearch.test.OpenSearchSingleNodeTestCase; @@ -135,7 +135,7 @@ private ValuesSourceConfig getVSConfig( indexed, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() @@ -184,7 +184,7 @@ public void testShortcutIsApplicable() throws IOException { assertNull(pointReaderShim(mockSearchContext(null), null, getVSConfig("number", resolution, false, context))); } // Check that we decode a dates "just like" the doc values instance. - Instant expected = Instant.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2020-01-01T00:00:00Z")); + Instant expected = Instant.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2020-01-01T00:00:00Z")); byte[] scratch = new byte[8]; LongPoint.encodeDimension(DateFieldMapper.Resolution.MILLISECONDS.convert(expected), scratch, 0); assertThat( diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesBuilderTests.java index 32fd3fe2ec325..0e77838a62292 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesBuilderTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable.Reader; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchModule; diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java index 1f29a17deeaa8..c930d27b068f8 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java @@ -31,16 +31,16 @@ package org.opensearch.search.aggregations; -import org.opensearch.action.ActionListener; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; @@ -239,7 +239,7 @@ public void testInvalidType() throws Exception { public void testRewriteAggregation() throws Exception { XContentType xContentType = randomFrom(XContentType.values()); BytesReference bytesReference; - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType)) { builder.startObject(); { builder.startObject("terms"); diff --git a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java index 354c635e06fab..6fabd36a23b27 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java @@ -34,20 +34,21 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchModule; import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.StringTermsTests; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; @@ -76,15 +77,14 @@ public void testNonFinalReduceTopLevelPipelineAggs() { "name", BucketOrder.key(true), BucketOrder.key(true), - 10, - 1, Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), - 0 + 0, + new TermsAggregator.BucketCountThresholds(1, 0, 10, 25) ); List<InternalAggregations> aggs = singletonList(InternalAggregations.from(Collections.singletonList(terms))); InternalAggregations reducedAggs = InternalAggregations.topLevelReduce(aggs, maxBucketReduceContext().forPartialReduction()); @@ -96,15 +96,14 @@ public void testFinalReduceTopLevelPipelineAggs() { "name", BucketOrder.key(true), BucketOrder.key(true), - 10, - 1, Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), - 0 + 0, + new TermsAggregator.BucketCountThresholds(1, 0, 10, 25) ); InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(terms)); diff --git a/server/src/test/java/org/opensearch/search/aggregations/InternalMultiBucketAggregationTests.java b/server/src/test/java/org/opensearch/search/aggregations/InternalMultiBucketAggregationTests.java index bc48b546e6d28..b7f4094da9990 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/InternalMultiBucketAggregationTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/InternalMultiBucketAggregationTests.java @@ -37,6 +37,7 @@ import org.opensearch.search.aggregations.bucket.terms.InternalTerms; import org.opensearch.search.aggregations.bucket.terms.LongTerms; import org.opensearch.search.aggregations.bucket.terms.StringTerms; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.InternalAvg; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchTestCase; @@ -164,20 +165,18 @@ public void testResolveToSpecificBucket() { DocValueFormat.RAW ) ); - InternalTerms termsAgg = new StringTerms( "string_terms", BucketOrder.count(false), BucketOrder.count(false), - 1, - 0, Collections.emptyMap(), DocValueFormat.RAW, 1, false, 0, stringBuckets, - 0 + 0, + new TermsAggregator.BucketCountThresholds(0, 0, 1, 1) ); InternalAggregations internalAggregations = InternalAggregations.from(Collections.singletonList(termsAgg)); LongTerms.Bucket bucket = new LongTerms.Bucket(19, 1, internalAggregations, false, 0, DocValueFormat.RAW); @@ -208,15 +207,14 @@ public void testResolveToMissingSpecificBucket() { "string_terms", BucketOrder.count(false), BucketOrder.count(false), - 1, - 0, Collections.emptyMap(), DocValueFormat.RAW, 1, false, 0, stringBuckets, - 0 + 0, + new TermsAggregator.BucketCountThresholds(0, 0, 1, 1) ); InternalAggregations internalAggregations = InternalAggregations.from(Collections.singletonList(termsAgg)); LongTerms.Bucket bucket = new LongTerms.Bucket(19, 1, internalAggregations, false, 0, DocValueFormat.RAW); diff --git a/server/src/test/java/org/opensearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketCollectorTests.java index ae1d5c60c231a..8c94a9854ef10 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/MultiBucketCollectorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketCollectorTests.java @@ -35,13 +35,13 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java new file mode 100644 index 0000000000000..dff7dbc8901a8 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations; + +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.LongAdder; + +import org.mockito.Mockito; + +import static org.opensearch.search.aggregations.MultiBucketConsumerService.DEFAULT_MAX_BUCKETS; + +public class MultiBucketConsumerTests extends OpenSearchTestCase { + + public void testMultiConsumerAcceptWhenCBTripped() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + new LongAdder(), + true, + 1 + ); + // exception is thrown upfront since the circuit breaker has already tripped + expectThrows(CircuitBreakingException.class, () -> multiBucketConsumer.accept(0)); + Mockito.verify(breaker, Mockito.times(0)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } + + public void testMultiConsumerAcceptToTripCB() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + LongAdder callCount = new LongAdder(); + for (int i = 0; i < 1024; i++) { + callCount.increment(); + } + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + callCount, + false, + 2 + ); + // circuit breaker check is performed as the value of call count would be 1025 which is still in range + Mockito.when(breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets")).thenThrow(CircuitBreakingException.class); + expectThrows(CircuitBreakingException.class, () -> multiBucketConsumer.accept(0)); + Mockito.verify(breaker, Mockito.times(1)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } + + public void testMultiConsumerAccept() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + LongAdder callCount = new LongAdder(); + for (int i = 0; i < 1100; i++) { + callCount.increment(); + } + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + callCount, + false, + 1 + ); + // no exception is thrown as the call count value is not in the expected range and CB is not checked + multiBucketConsumer.accept(0); + Mockito.verify(breaker, Mockito.times(0)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java index cc9628a13c060..692032c4795de 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -47,6 +46,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.BucketCollector; import org.opensearch.search.aggregations.LeafBucketCollector; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/BucketsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/BucketsAggregatorTests.java index 55f8d11e2d934..6a84b45526b44 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/BucketsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/BucketsAggregatorTests.java @@ -37,12 +37,12 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; -import org.opensearch.common.breaker.CircuitBreaker; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateRangeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateRangeTests.java index f828ff8b11222..94d533752a5dc 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateRangeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateRangeTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.bucket; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.range.DateRangeAggregationBuilder; import org.opensearch.search.aggregations.bucket.range.RangeAggregator.Range; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java index 5a9a9e2b6cb51..d6ba4eedd3a19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java @@ -43,7 +43,7 @@ /** * Mock scripts shared by DateRangeIT and DateHistogramIT. - * + * <p> * Provides {@link DateScriptMocksPlugin#EXTRACT_FIELD}, {@link DateScriptMocksPlugin#DOUBLE_PLUS_ONE_MONTH}, * and {@link DateScriptMocksPlugin#LONG_PLUS_ONE_MONTH} scripts. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java index 3a8829d5a6433..56f7f450dbdfb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations.bucket; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; @@ -106,7 +106,7 @@ public void testFiltersSortedByKey() { } public void testOtherBucket() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); builder.startArray("filters").startObject().startObject("term").field("field", "foo").endObject().endObject().endArray(); builder.endObject(); @@ -116,7 +116,7 @@ public void testOtherBucket() throws IOException { // The other bucket is disabled by default assertFalse(filters.otherBucket()); - builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); builder.startArray("filters").startObject().startObject("term").field("field", "foo").endObject().endObject().endArray(); builder.field("other_bucket_key", "some_key"); @@ -128,7 +128,7 @@ public void testOtherBucket() throws IOException { // but setting a key enables it automatically assertTrue(filters.otherBucket()); - builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); builder.startArray("filters").startObject().startObject("term").field("field", "foo").endObject().endObject().endArray(); builder.field("other_bucket", false); @@ -207,7 +207,7 @@ public void testRewritePreservesOtherBucket() throws IOException { public void testEmptyFilters() throws IOException { { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); builder.startArray("filters").endArray(); // unkeyed array builder.endObject(); @@ -221,7 +221,7 @@ public void testEmptyFilters() throws IOException { } { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); builder.startObject("filters").endObject(); // keyed object builder.endObject(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoDistanceRangeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoDistanceRangeTests.java index 3f3606ec8388b..c85ad6e215748 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoDistanceRangeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/GeoDistanceRangeTests.java @@ -35,9 +35,9 @@ import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder; import org.opensearch.search.aggregations.bucket.range.GeoDistanceAggregationBuilder.Range; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/GlobalAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/GlobalAggregatorTests.java index abcaf06645a20..726fb4bcb741c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/GlobalAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/GlobalAggregatorTests.java @@ -35,10 +35,10 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java index b4ec55543f494..270ba863001c0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java @@ -33,8 +33,8 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.search.aggregations.BaseAggregationTestCase; -import org.opensearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.opensearch.search.aggregations.BucketOrder; +import org.opensearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import java.util.ArrayList; import java.util.List; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/RangeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/RangeTests.java index 5c9cdf9d954a7..c8a6f5e923d5a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/RangeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/RangeTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.bucket; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; import org.opensearch.search.aggregations.bucket.range.RangeAggregator.Range; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java index d6981d1c34652..eef7e4c45849d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -32,21 +32,38 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.is; -public abstract class ShardSizeTestCase extends OpenSearchIntegTestCase { +public abstract class ShardSizeTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ShardSizeTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected int numberOfShards() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index eabc4b7764eed..13a3d8145743b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -35,10 +35,14 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.opensearch.OpenSearchParseException; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.Aggregator; +import org.opensearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1253,6 +1257,74 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ); } + public void testDateHistogramSourceWithSize() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).size(1); + }, + (result) -> { + assertEquals(1, result.getBuckets().size()); + assertEquals("{date=1474329600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + } + ); + } + + public void testDateHistogramSourceWithDocCountField() throws IOException { + final List<Map<String, List<Object>>> dataset = new ArrayList<>( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45"), "_doc_count", 5), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00"), "_doc_count", 2), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24"), "_doc_count", 3), + createDocument("long", 4L) + ) + ); + testSearchCase( + Arrays.asList( + new MatchAllDocsQuery(), + new FieldExistsQuery("date"), + LongPoint.newRangeQuery("date", asLong("2016-09-20T09:00:34"), asLong("2017-10-20T06:09:24")) + ), + dataset, + () -> { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") + .calendarInterval(DateHistogramInterval.days(1)); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(3L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=1508457600000}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(6L, result.getBuckets().get(2).getDocCount()); + } + ); + } + public void testWithDateHistogram() throws IOException { final List<Map<String, List<Object>>> dataset = new ArrayList<>(); dataset.addAll( @@ -1279,7 +1351,7 @@ public void testWithDateHistogram() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); - assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); @@ -1300,9 +1372,8 @@ public void testWithDateHistogram() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") .calendarInterval(DateHistogramInterval.days(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).aggregateAfter( - createAfterKey("date", 1474329600000L) + createAfterKey("date", 1474329600000L) // 2016-09-20T00:00:00 ); - }, (result) -> { assertEquals(2, result.getBuckets().size()); @@ -2242,21 +2313,20 @@ private <T extends Comparable<T>, V extends Comparable<T>> void testRandomTerms( Function<Object, V> transformKey ) throws IOException { int numTerms = randomIntBetween(10, 500); - List<T> terms = new ArrayList<>(); + List<T> terms = new ArrayList<>(); // possible values for the terms for (int i = 0; i < numTerms; i++) { terms.add(randomSupplier.get()); } int numDocs = randomIntBetween(100, 200); List<Map<String, List<Object>>> dataset = new ArrayList<>(); - - Set<T> valuesSet = new HashSet<>(); - Map<Comparable<?>, AtomicLong> expectedDocCounts = new HashMap<>(); + Set<T> valuesSet = new HashSet<>(); // how many different values + Map<Comparable<?>, AtomicLong> expectedDocCounts = new HashMap<>(); // how many docs for each value for (int i = 0; i < numDocs; i++) { int numValues = randomIntBetween(1, 5); Set<Object> values = new HashSet<>(); for (int j = 0; j < numValues; j++) { int rand = randomIntBetween(0, terms.size() - 1); - if (values.add(terms.get(rand))) { + if (values.add(terms.get(rand))) { // values are unique for one doc AtomicLong count = expectedDocCounts.computeIfAbsent(terms.get(rand), (k) -> new AtomicLong(0)); count.incrementAndGet(); valuesSet.add(terms.get(rand)); @@ -2264,9 +2334,8 @@ private <T extends Comparable<T>, V extends Comparable<T>> void testRandomTerms( } dataset.add(Collections.singletonMap(field, new ArrayList<>(values))); } - List<T> expected = new ArrayList<>(valuesSet); + List<T> expected = new ArrayList<>(valuesSet); // how many buckets expected Collections.sort(expected); - List<Comparable<T>> seen = new ArrayList<>(); AtomicBoolean finish = new AtomicBoolean(false); int size = randomIntBetween(1, expected.size()); @@ -2463,4 +2532,41 @@ public void testIndexSortWithDuplicate() throws Exception { ); } } + + public void testUnderFilterAggregator() throws IOException { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + FilterAggregationBuilder filterAggregatorBuilder = new FilterAggregationBuilder( + "filter_mcmilterface", + new MatchAllQueryBuilder() + ); + filterAggregatorBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return filterAggregatorBuilder; + }, (ic) -> {}); + } + + public void testUnderBucketAggregator() throws IOException { + try { + executeTestCase(false, false, new MatchAllDocsQuery(), Collections.emptyList(), () -> { + TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("terms").field("keyword"); + termsAggregationBuilder.subAggregation( + new CompositeAggregationBuilder( + "compo", + Collections.singletonList(new TermsValuesSourceBuilder("keyword").field("keyword")) + ) + ); + return termsAggregationBuilder; + }, (ic) -> {}); + fail("Should have thrown an IllegalArgumentException"); + } catch (IllegalArgumentException iae) { + assertTrue( + iae.getMessage() + .contains("[composite] aggregation cannot be used with a parent aggregation of type: [TermsAggregatorFactory]") + ); + } + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java index 8bb7feb372788..74673b56a7de5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -44,7 +44,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.sandbox.document.BigIntegerPoint; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.MatchAllDocsQuery; @@ -53,6 +52,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 4121954c1ede2..59bc3a31a5a8a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -57,7 +57,6 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.stream.Collectors.toList; @@ -67,6 +66,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLengthBetween; public class InternalCompositeTests extends InternalMultiBucketAggregationTestCase<InternalComposite> { private List<String> sourceNames; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java index aa51f9b11ea19..39054f15826f0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java @@ -44,7 +44,7 @@ /** * Duplicates the tests from {@link CompositeAggregationBuilderTests}, except using the deprecated * interval on date histo. Separated to make testing the warnings easier. - * + * <p> * Can be removed in when the legacy interval options are gone */ public class LegacyIntervalCompositeAggBuilderTests extends BaseAggregationTestCase<CompositeAggregationBuilder> { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorTests.java index 4ed95bf7391a6..2125748cd9661 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorTests.java @@ -35,10 +35,10 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.MatchAllQueryBuilder; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index 8ff2e57d5dbb6..38530d8ccc623 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -35,10 +35,10 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 315f148ad5a02..dda053af78b30 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -39,12 +39,12 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -969,6 +969,7 @@ private void indexSampleData(List<ZonedDateTime> dataset, RandomIndexWriter inde for (final ZonedDateTime date : dataset) { final long instant = date.toInstant().toEpochMilli(); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); + document.add(new LongPoint(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); document.add(new SortedNumericDocValuesField(NUMERIC_FIELD, i)); indexWriter.addDocument(document); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java index f3cda87342c18..fdbc0160e51a3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java @@ -34,8 +34,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedBiConsumer; import org.opensearch.index.mapper.DateFieldMapper; @@ -125,7 +125,7 @@ protected final DateFieldMapper.DateFieldType aggregableDateFieldType(boolean us isSearchable, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), useNanosecondResolution ? DateFieldMapper.Resolution.NANOSECONDS : DateFieldMapper.Resolution.MILLISECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 7bd39c72ae325..2a4fbca7a8541 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -34,17 +34,19 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -1178,6 +1180,181 @@ public void testOverlappingBounds() { ); } + public void testHardBoundsNotOverlapping() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2018-01-01", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2016-01-01", "2017-02-03")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + false + ); + + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2017-02-03", "2020-01-01")) + .field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + false + ); + } + + public void testFilterRewriteOptimizationWithRangeQuery() throws IOException { + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2018-01-01"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true // force AGGREGABLE_DATE field to be searchable to test the filter rewrite optimization path + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(0, buckets.size()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2016-01-01"), asLong("2017-02-02")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + + testSearchCase( + LongPoint.newRangeQuery(AGGREGABLE_DATE, asLong("2017-02-03"), asLong("2020-01-01")), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }, + 10000, + false, + false, + true + ); + } + + public void testDocCountField() throws IOException { + testSearchCase( + new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(AGGREGABLE_DATE), + histogram -> { + List<? extends Histogram.Bucket> buckets = histogram.getBuckets(); + assertEquals(2, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(5, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + }, + 10000, + false, + true + ); + } + public void testIllegalInterval() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -1211,13 +1388,42 @@ private void testSearchCase( int maxBucket, boolean useNanosecondResolution ) throws IOException { - boolean aggregableDateIsSearchable = randomBoolean(); + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, false); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField + ) throws IOException { + testSearchCase(query, dataset, configure, verify, maxBucket, useNanosecondResolution, useDocCountField, randomBoolean()); + } + + private void testSearchCase( + Query query, + List<String> dataset, + Consumer<DateHistogramAggregationBuilder> configure, + Consumer<InternalDateHistogram> verify, + int maxBucket, + boolean useNanosecondResolution, + boolean useDocCountField, + boolean aggregableDateIsSearchable + ) throws IOException { + logger.debug("Aggregable date is searchable {}", aggregableDateIsSearchable); DateFieldMapper.DateFieldType fieldType = aggregableDateFieldType(useNanosecondResolution, aggregableDateIsSearchable); try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); + if (useDocCountField) { + // add the doc count field to the first document + document.add(new NumericDocValuesField(DocCountFieldMapper.NAME, 5)); + } for (String date : dataset) { long instant = asLong(date, fieldType); document.add(new SortedNumericDocValuesField(AGGREGABLE_DATE, instant)); @@ -1245,7 +1451,7 @@ private void testSearchCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java index 87f3ed166e5d0..5c12d070824f2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -36,12 +36,12 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.lucene.search.Queries; @@ -1086,7 +1086,7 @@ private void testCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static ZonedDateTime asZDT(String dateTime) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBoundsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBoundsTests.java index 67fe82c414198..525cf0333a92a 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBoundsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBoundsTests.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogramTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogramTests.java index 72b11c1657fb5..99a5c6642abbb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogramTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogramTests.java @@ -33,11 +33,11 @@ package org.opensearch.search.aggregations.bucket.histogram; import org.apache.lucene.tests.util.TestUtil; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.script.ScriptService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/LongBoundsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/LongBoundsTests.java index 6eadc12330bba..7a071abd7e380 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/LongBoundsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/LongBoundsTests.java @@ -34,16 +34,16 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.QueryShardContext; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java index bb9ed263ca3f6..3cafa872ef443 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/NumericHistogramAggregatorTests.java @@ -37,10 +37,10 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java index 3ee9765e445fd..b3d58d07fb41c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/RangeHistogramAggregatorTests.java @@ -37,10 +37,10 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.network.InetAddresses; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java index 33f2a0b56b5ba..1922d7dcf395b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java @@ -36,11 +36,11 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java index c544dcce45cce..b530dc31d30ed 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorTests.java @@ -37,10 +37,10 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 65ce02333bae0..406c411494d60 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -49,6 +48,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java index 61df6d01aef64..a0efe2bffc64c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -37,9 +37,9 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NestedPathFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java index d9a83549f8a97..77e59375ef012 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java @@ -31,13 +31,8 @@ package org.opensearch.search.aggregations.bucket.range; -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -import org.apache.lucene.util.BytesRef; import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.aggregations.LeafBucketCollector; @@ -45,6 +40,11 @@ import org.opensearch.search.aggregations.bucket.range.BinaryRangeAggregator.SortedSetRangeLeafCollector; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + public class BinaryRangeAggregatorTests extends OpenSearchTestCase { private static class FakeSortedSetDocValues extends AbstractSortedSetDocValues { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java index 9a2ef3de1dfe4..96c8be1a25cc3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java @@ -37,11 +37,11 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchParseException; import org.opensearch.common.CheckedConsumer; @@ -273,7 +273,7 @@ private void testCase( true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregatorTests.java index b74f21ef09037..b57295431d678 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/IpRangeAggregatorTests.java @@ -35,10 +35,10 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.collect.Tuple; import org.opensearch.common.network.NetworkAddress; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java index b926a5a7895f2..4362ce48003cc 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregationBuilderTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.bucket.range; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.Writeable.Reader; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.AbstractSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java index 8b9bd388eb641..dd7ae915c3b45 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java @@ -37,11 +37,11 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.DateFieldMapper; @@ -136,7 +136,7 @@ public void testDateFieldNanosecondResolution() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() @@ -167,7 +167,7 @@ public void testMissingDateWithDateField() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java index 427d0b89aa688..67f7f70b88e7e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java @@ -37,7 +37,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; @@ -45,10 +44,11 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.BucketCollector; import org.opensearch.search.aggregations.LeafBucketCollector; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java index bb07b9c4af37e..1e1dee7ab16f6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java @@ -40,10 +40,10 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java index 34cc29d40a9fd..4f20649ad566b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java @@ -34,12 +34,12 @@ import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.Numbers; import org.opensearch.index.mapper.BinaryFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrdsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrdsTests.java index f73146038a5f1..c593ed4147e7e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrdsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrdsTests.java @@ -36,7 +36,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DoubleTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DoubleTermsTests.java index 853d56202c360..5fe9c1dee358d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DoubleTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/DoubleTermsTests.java @@ -59,6 +59,12 @@ public class DoubleTermsTests extends InternalTermsTestCase { long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + minDocCount, + 0, + requiredSize, + shardSize + ); DocValueFormat format = randomNumericDocValueFormat(); long otherDocCount = 0; List<DoubleTerms.Bucket> buckets = new ArrayList<>(); @@ -75,15 +81,14 @@ public class DoubleTermsTests extends InternalTermsTestCase { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -158,15 +163,14 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { name, doubleTerms.reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, shardSize) ); } else { String name = instance.getName(); @@ -195,7 +199,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java index 2657f2bdd5138..9f8bab1179ad6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTermsTests.java @@ -45,6 +45,12 @@ public class InternalMultiTermsTests extends InternalTermsTestCase { int requiredSize = 3; int shardSize = requiredSize + 2; long otherDocCount = 0; + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + minDocCount, + 0, + requiredSize, + shardSize + ); final int numBuckets = randomNumberOfBuckets(); @@ -70,15 +76,14 @@ public class InternalMultiTermsTests extends InternalTermsTestCase { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, shardSize, showTermDocCountError, otherDocCount, docCountError, formats, - buckets + buckets, + bucketCountThresholds ); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTermsTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTermsTestCase.java index aab9e91576b18..e640aa92ac782 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTermsTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalSignificantTermsTestCase.java @@ -114,7 +114,7 @@ protected abstract InternalSignificantTerms createTestInstance( @Override protected InternalSignificantTerms createUnmappedInstance(String name, Map<String, Object> metadata) { InternalSignificantTerms<?, ?> testInstance = createTestInstance(name, metadata); - return new UnmappedSignificantTerms(name, testInstance.requiredSize, testInstance.minDocCount, metadata); + return new UnmappedSignificantTerms(name, testInstance.bucketCountThresholds, metadata); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalTermsTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalTermsTestCase.java index d3f7c62021243..2e00248a70771 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalTermsTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/InternalTermsTestCase.java @@ -73,7 +73,7 @@ public void init() { @Override protected InternalTerms<?, ?> createUnmappedInstance(String name, Map<String, Object> metadata) { InternalTerms<?, ?> testInstance = createTestInstance(name, metadata); - return new UnmappedTerms(name, testInstance.order, testInstance.requiredSize, testInstance.minDocCount, metadata); + return new UnmappedTerms(name, testInstance.order, testInstance.bucketCountThresholds, metadata); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java index 05197c7e85844..753644dce81d5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/KeywordTermsAggregatorTests.java @@ -32,16 +32,15 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.common.TriConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.search.aggregations.AggregatorTestCase; @@ -57,6 +56,8 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { private static final String KEYWORD_FIELD = "keyword"; + private static final Consumer<TermsAggregationBuilder> CONFIGURE_KEYWORD_FIELD = agg -> agg.field(KEYWORD_FIELD); + private static final List<String> dataset; static { List<String> d = new ArrayList<>(45); @@ -68,51 +69,63 @@ public class KeywordTermsAggregatorTests extends AggregatorTestCase { dataset = d; } + private static final Consumer<InternalMappedTerms> VERIFY_MATCH_ALL_DOCS = agg -> { + assertEquals(9, agg.getBuckets().size()); + for (int i = 0; i < 9; i++) { + StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); + assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); + assertThat(bucket.getDocCount(), equalTo(9L - i)); + } + }; + + private static final Consumer<InternalMappedTerms> VERIFY_MATCH_NO_DOCS = agg -> { assertEquals(0, agg.getBuckets().size()); }; + + private static final Query MATCH_ALL_DOCS_QUERY = new MatchAllDocsQuery(); + + private static final Query MATCH_NO_DOCS_QUERY = new MatchNoDocsQuery(); + public void testMatchNoDocs() throws IOException { testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - null // without type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + null // without type hint ); testSearchCase( - new MatchNoDocsQuery(), + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_NO_DOCS_QUERY, dataset, - aggregation -> aggregation.field(KEYWORD_FIELD), - agg -> assertEquals(0, agg.getBuckets().size()), - ValueType.STRING // with type hint + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_NO_DOCS, + ValueType.STRING // with type hint ); } public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - null // without type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + null // without type hint ); - testSearchCase(query, dataset, aggregation -> aggregation.field(KEYWORD_FIELD), agg -> { - assertEquals(9, agg.getBuckets().size()); - for (int i = 0; i < 9; i++) { - StringTerms.Bucket bucket = (StringTerms.Bucket) agg.getBuckets().get(i); - assertThat(bucket.getKey(), equalTo(String.valueOf(9L - i))); - assertThat(bucket.getDocCount(), equalTo(9L - i)); - } - }, - ValueType.STRING // with type hint + testSearchCase( + ADD_SORTED_SET_FIELD_NOT_INDEXED, + MATCH_ALL_DOCS_QUERY, + dataset, + CONFIGURE_KEYWORD_FIELD, + VERIFY_MATCH_ALL_DOCS, + ValueType.STRING // with type hint ); } private void testSearchCase( + TriConsumer<Document, String, String> addField, Query query, List<String> dataset, Consumer<TermsAggregationBuilder> configure, @@ -123,7 +136,7 @@ private void testSearchCase( try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { Document document = new Document(); for (String value : dataset) { - document.add(new SortedSetDocValuesField(KEYWORD_FIELD, new BytesRef(value))); + addField.apply(document, KEYWORD_FIELD, value); indexWriter.addDocument(document); document.clear(); } @@ -147,5 +160,4 @@ private void testSearchCase( } } } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrdsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrdsTests.java index d0a6500ecf9e7..bdecae8f7dfa3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrdsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrdsTests.java @@ -35,7 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongTermsTests.java index becf89c4603f3..44fa9f5e79593 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/LongTermsTests.java @@ -59,6 +59,12 @@ public class LongTermsTests extends InternalTermsTestCase { long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + minDocCount, + 0, + requiredSize, + shardSize + ); DocValueFormat format = randomNumericDocValueFormat(); long otherDocCount = 0; List<LongTerms.Bucket> buckets = new ArrayList<>(); @@ -75,15 +81,14 @@ public class LongTermsTests extends InternalTermsTestCase { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -158,15 +163,14 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { name, longTerms.reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, shardSize) ); } else { String name = instance.getName(); @@ -195,7 +199,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java index a2792114e9529..d550c4c354c0f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregatorTests.java @@ -17,19 +17,19 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; -import org.hamcrest.MatcherAssert; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.IndexService; import org.opensearch.index.cache.IndexCache; import org.opensearch.index.mapper.BooleanFieldMapper; @@ -41,7 +41,6 @@ import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; @@ -65,6 +64,7 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.lookup.LeafDocLookup; import org.opensearch.test.TestSearchContext; +import org.hamcrest.MatcherAssert; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java index 846f71b12dab0..4988e7141bb9c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/NumericTermsAggregatorTests.java @@ -36,12 +36,12 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.AggregationExecutionException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java index ff0b74b1d9d28..28cec4df5f7d7 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorTests.java @@ -41,7 +41,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -49,6 +48,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java index 810be2d796740..89b7076104a02 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java @@ -33,19 +33,18 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Version; -import org.opensearch.common.Strings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchModule; import org.opensearch.search.aggregations.InternalAggregation; @@ -56,8 +55,8 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.MutualInformation; import org.opensearch.search.aggregations.bucket.terms.heuristic.PercentageScore; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -126,7 +125,16 @@ public void testStreamResponse() throws Exception { DocValueFormat.RAW, randomDoubleBetween(0, 100, true) ); - return new SignificantLongTerms("some_name", 1, 1, null, DocValueFormat.RAW, 10, 20, heuristic, singletonList(bucket)); + return new SignificantLongTerms( + "some_name", + null, + DocValueFormat.RAW, + 10, + 20, + heuristic, + singletonList(bucket), + new TermsAggregator.BucketCountThresholds(1, 0, 1, 0) + ); } else { SignificantStringTerms.Bucket bucket = new SignificantStringTerms.Bucket( new BytesRef("someterm"), @@ -138,7 +146,16 @@ public void testStreamResponse() throws Exception { DocValueFormat.RAW, randomDoubleBetween(0, 100, true) ); - return new SignificantStringTerms("some_name", 1, 1, null, DocValueFormat.RAW, 10, 20, heuristic, singletonList(bucket)); + return new SignificantStringTerms( + "some_name", + null, + DocValueFormat.RAW, + 10, + 20, + heuristic, + singletonList(bucket), + new TermsAggregator.BucketCountThresholds(1, 0, 1, 0) + ); } } @@ -205,14 +222,13 @@ SignificantStringTerms createAggregation( ) { return new SignificantStringTerms( "sig_terms", - 2, - -1, emptyMap(), DocValueFormat.RAW, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + new TermsAggregator.BucketCountThresholds(-1, 0, 2, 0) ); } @@ -241,14 +257,13 @@ SignificantLongTerms createAggregation( ) { return new SignificantLongTerms( "sig_terms", - 2, - -1, emptyMap(), DocValueFormat.RAW, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + new TermsAggregator.BucketCountThresholds(-1, 0, 2, 0) ); } @@ -332,7 +347,7 @@ protected SignificanceHeuristic parseFromBuilder(SignificanceHeuristic significa stBuilder.significanceHeuristic(significanceHeuristic).field("text").minDocCount(200); XContentBuilder stXContentBuilder = XContentFactory.jsonBuilder(); stBuilder.internalXContent(stXContentBuilder, null); - XContentParser stParser = createParser(JsonXContent.jsonXContent, Strings.toString(stXContentBuilder)); + XContentParser stParser = createParser(JsonXContent.jsonXContent, stXContentBuilder.toString()); return parseSignificanceHeuristic(stParser); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTermsTests.java index 886e4d8267578..38b478efd004b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantLongTermsTests.java @@ -85,7 +85,17 @@ protected InternalSignificantTerms createTestInstance( bucket.updateScore(significanceHeuristic); buckets.add(bucket); } - return new SignificantLongTerms(name, requiredSize, 1L, metadata, format, subsetSize, supersetSize, significanceHeuristic, buckets); + + return new SignificantLongTerms( + name, + metadata, + format, + subsetSize, + supersetSize, + significanceHeuristic, + buckets, + new TermsAggregator.BucketCountThresholds(1L, 0, requiredSize, 0) + ); } @Override @@ -150,14 +160,13 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { } return new SignificantLongTerms( name, - requiredSize, - minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0) ); } else { String name = instance.getName(); @@ -185,7 +194,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedSignificantTerms(name, requiredSize, minDocCount, metadata); + return new UnmappedSignificantTerms(name, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTermsTests.java index 63a08a7aa1683..3ac30248ef353 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantStringTermsTests.java @@ -80,14 +80,13 @@ protected InternalSignificantTerms createTestInstance( } return new SignificantStringTerms( name, - requiredSize, - 1L, metadata, format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + new TermsAggregator.BucketCountThresholds(1L, 0, requiredSize, 0) ); } @@ -153,14 +152,13 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { } return new SignificantStringTerms( name, - requiredSize, - minDocCount, metadata, format, subsetSize, supersetSize, significanceHeuristic, - buckets + buckets, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0) ); } else { String name = instance.getName(); @@ -188,7 +186,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedSignificantTerms(name, requiredSize, minDocCount, metadata); + return new UnmappedSignificantTerms(name, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java index 883196d290154..a106ef47dc469 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorTests.java @@ -43,12 +43,12 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/StringTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/StringTermsTests.java index 6757c8e00f83d..deba96fd3ae19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/StringTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/StringTermsTests.java @@ -140,15 +140,14 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { name, stringTerms.reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, shardSize) ); } else { String name = instance.getName(); @@ -177,7 +176,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } @@ -206,6 +205,12 @@ private BytesRef[] generateRandomDict() { long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + minDocCount, + 0, + requiredSize, + shardSize + ); DocValueFormat format = DocValueFormat.RAW; long otherDocCount = 0; List<StringTerms.Bucket> buckets = new ArrayList<>(); @@ -226,15 +231,14 @@ private BytesRef[] generateRandomDict() { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 4ce66e0495f83..6d105c27a692f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -44,21 +44,25 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.TriConsumer; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.text.Text; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.IdFieldMapper; @@ -74,7 +78,6 @@ import org.opensearch.index.mapper.Uid; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; @@ -120,6 +123,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -136,9 +140,6 @@ import static org.mockito.Mockito.when; public class TermsAggregatorTests extends AggregatorTestCase { - - private boolean randomizeAggregatorImpl = true; - // Constants for a script that returns a string private static final String STRING_SCRIPT_NAME = "string_script"; private static final String STRING_SCRIPT_OUTPUT = "Orange"; @@ -171,9 +172,22 @@ protected ScriptService getMockScriptService() { return new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); } + protected CountingAggregator createCountingAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, + MappedFieldType... fieldTypes + ) throws IOException { + return new CountingAggregator( + new AtomicInteger(), + createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldTypes) + ); + } + protected <A extends Aggregator> A createAggregator( AggregationBuilder aggregationBuilder, IndexSearcher indexSearcher, + boolean randomizeAggregatorImpl, MappedFieldType... fieldTypes ) throws IOException { try { @@ -188,6 +202,14 @@ protected <A extends Aggregator> A createAggregator( } } + protected <A extends Aggregator> A createAggregator( + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + MappedFieldType... fieldTypes + ) throws IOException { + return createAggregator(aggregationBuilder, indexSearcher, true, fieldTypes); + } + @Override protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) { return new TermsAggregationBuilder("foo").field(fieldName); @@ -207,8 +229,7 @@ protected List<ValuesSourceType> getSupportedValuesSourceTypes() { } public void testUsesGlobalOrdinalsByDefault() throws Exception { - randomizeAggregatorImpl = false; - + boolean randomizeAggregatorImpl = false; Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); indexWriter.close(); @@ -220,35 +241,35 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { .field("string"); MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); GlobalOrdinalsStringTermsAggregator globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); // Infers depth_first because the maxOrd is 0 which is less than the size aggregationBuilder.subAggregation(AggregationBuilders.cardinality("card").field("string")); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.DEPTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.DEPTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); aggregationBuilder.collectMode(Aggregator.SubAggCollectionMode.BREADTH_FIRST); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.collectMode, equalTo(Aggregator.SubAggCollectionMode.BREADTH_FIRST)); assertThat(globalAgg.descriptCollectionStrategy(), equalTo("dense")); aggregationBuilder.order(BucketOrder.aggregation("card", true)); - aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator = createAggregator(aggregationBuilder, indexSearcher, randomizeAggregatorImpl, fieldType); assertThat(aggregator, instanceOf(GlobalOrdinalsStringTermsAggregator.class)); globalAgg = (GlobalOrdinalsStringTermsAggregator) aggregator; assertThat(globalAgg.descriptCollectionStrategy(), equalTo("remap")); @@ -257,51 +278,145 @@ public void testUsesGlobalOrdinalsByDefault() throws Exception { directory.close(); } - public void testSimple() throws Exception { + /** + * This test case utilizes the default implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is false + */ + public void testSimpleAggregation() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, false, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + } + + /** + * This test case utilizes the LowCardinality implementation of GlobalOrdinalsStringTermsAggregator since collectSegmentOrds is true + */ + public void testSimpleAggregationLowCardinality() throws Exception { + // Fields not indexed: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_NOT_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, deleted documents in segment: cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, true, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + + // Fields indexed, no deleted documents in segment: will use LeafBucketCollector#termDocFreqCollector - no documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, false, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 0); + + // Fields indexed, no deleted documents, but _doc_field value present in document: + // cannot use LeafBucketCollector#termDocFreqCollector - all documents are visited + testSimple(ADD_SORTED_SET_FIELD_INDEXED, false, true, true, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS, 4); + } + + /** + * This test case utilizes the MapStringTermsAggregator. + */ + public void testSimpleMapStringAggregation() throws Exception { + testSimple( + ADD_SORTED_SET_FIELD_INDEXED, + randomBoolean(), + randomBoolean(), + randomBoolean(), + TermsAggregatorFactory.ExecutionMode.MAP, + 4 + ); + } + + /** + * This is a utility method to test out string terms aggregation + * @param addFieldConsumer a function that determines how a field is added to the document + * @param includeDeletedDocumentsInSegment to include deleted documents in the segment or not + * @param collectSegmentOrds collect segment ords or not - set true to utilize LowCardinality implementation for GlobalOrdinalsStringTermsAggregator + * @param executionMode execution mode MAP or GLOBAL_ORDINALS + * @param expectedCollectCount expected number of documents visited as part of collect() invocation + */ + private void testSimple( + TriConsumer<Document, String, String> addFieldConsumer, + final boolean includeDeletedDocumentsInSegment, + final boolean includeDocCountField, + boolean collectSegmentOrds, + TermsAggregatorFactory.ExecutionMode executionMode, + final int expectedCollectCount + ) throws Exception { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try ( + RandomIndexWriter indexWriter = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { Document document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); + addFieldConsumer.apply(document, "string", "a"); + addFieldConsumer.apply(document, "string", "b"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); - document.add(new SortedSetDocValuesField("string", new BytesRef("c"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("a"))); + addFieldConsumer.apply(document, "string", ""); + addFieldConsumer.apply(document, "string", "c"); + addFieldConsumer.apply(document, "string", "a"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef("b"))); - document.add(new SortedSetDocValuesField("string", new BytesRef("d"))); + addFieldConsumer.apply(document, "string", "b"); + addFieldConsumer.apply(document, "string", "d"); indexWriter.addDocument(document); document = new Document(); - document.add(new SortedSetDocValuesField("string", new BytesRef(""))); + addFieldConsumer.apply(document, "string", ""); + if (includeDocCountField) { + // Adding _doc_count to one document + document.add(new NumericDocValuesField("_doc_count", 10)); + } indexWriter.addDocument(document); + + if (includeDeletedDocumentsInSegment) { + document = new Document(); + ADD_SORTED_SET_FIELD_INDEXED.apply(document, "string", "e"); + indexWriter.addDocument(document); + indexWriter.deleteDocuments(new Term("string", "e")); + assertEquals(5, indexWriter.getDocStats().maxDoc); // deleted document still in segment + } + try (IndexReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) { IndexSearcher indexSearcher = newIndexSearcher(indexReader); - for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) { - TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint( - ValueType.STRING - ).executionHint(executionMode.toString()).field("string").order(BucketOrder.key(true)); - MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); - TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); - aggregator.preCollection(); - indexSearcher.search(new MatchAllDocsQuery(), aggregator); - aggregator.postCollection(); - Terms result = reduce(aggregator); - assertEquals(5, result.getBuckets().size()); - assertEquals("", result.getBuckets().get(0).getKeyAsString()); + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(ValueType.STRING) + .executionHint(executionMode.toString()) + .field("string") + .order(BucketOrder.key(true)); + MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType("string"); + + TermsAggregatorFactory.COLLECT_SEGMENT_ORDS = collectSegmentOrds; + TermsAggregatorFactory.REMAP_GLOBAL_ORDS = false; + CountingAggregator aggregator = createCountingAggregator(aggregationBuilder, indexSearcher, false, fieldType); + + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + Terms result = reduce(aggregator); + assertEquals(5, result.getBuckets().size()); + assertEquals("", result.getBuckets().get(0).getKeyAsString()); + if (includeDocCountField) { + assertEquals(11L, result.getBuckets().get(0).getDocCount()); + } else { assertEquals(2L, result.getBuckets().get(0).getDocCount()); - assertEquals("a", result.getBuckets().get(1).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(1).getDocCount()); - assertEquals("b", result.getBuckets().get(2).getKeyAsString()); - assertEquals(2L, result.getBuckets().get(2).getDocCount()); - assertEquals("c", result.getBuckets().get(3).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(3).getDocCount()); - assertEquals("d", result.getBuckets().get(4).getKeyAsString()); - assertEquals(1L, result.getBuckets().get(4).getDocCount()); - assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); } + assertEquals("a", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + assertEquals("b", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + assertEquals("c", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("d", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms) result)); + + assertEquals(expectedCollectCount, aggregator.getCollectCount().get()); } } } @@ -1543,5 +1658,4 @@ private <T extends InternalAggregation> T reduce(Aggregator agg) throws IOExcept doAssertReducedMultiBucketConsumer(result, reduceBucketConsumer); return result; } - } diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTermsTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTermsTests.java index b961039e50501..478961c2a404c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTermsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/UnsignedLongTermsTests.java @@ -36,6 +36,12 @@ public class UnsignedLongTermsTests extends InternalTermsTestCase { long minDocCount = 1; int requiredSize = 3; int shardSize = requiredSize + 2; + TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds( + minDocCount, + 0, + requiredSize, + shardSize + ); DocValueFormat format = randomNumericDocValueFormat(); long otherDocCount = 0; List<UnsignedLongTerms.Bucket> buckets = new ArrayList<>(); @@ -52,15 +58,14 @@ public class UnsignedLongTermsTests extends InternalTermsTestCase { name, reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + bucketCountThresholds ); } @@ -135,15 +140,14 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { name, longTerms.reduceOrder, order, - requiredSize, - minDocCount, metadata, format, shardSize, showTermDocCountError, otherDocCount, buckets, - docCountError + docCountError, + new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, shardSize) ); } else { String name = instance.getName(); @@ -172,7 +176,7 @@ protected Class<? extends ParsedMultiBucketAggregation> implementationClass() { default: throw new AssertionError("Illegal randomisation branch"); } - return new UnmappedTerms(name, order, requiredSize, minDocCount, metadata); + return new UnmappedTerms(name, order, new TermsAggregator.BucketCountThresholds(minDocCount, 0, requiredSize, 0), metadata); } } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index 036bb39e790ae..94cb4c7955a21 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -32,34 +32,39 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.Strings; import org.opensearch.common.document.DocumentField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.geometry.utils.Geohash; import org.opensearch.search.SearchHit; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.geo.RandomGeoGenerator; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractGeoTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; @@ -70,7 +75,6 @@ public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { protected static final String DATELINE_IDX_NAME = "dateline_idx"; protected static final String HIGH_CARD_IDX_NAME = "high_card_idx"; protected static final String IDX_ZERO_NAME = "idx_zero"; - protected static int numDocs; protected static int numUniqueGeoPoints; protected static GeoPoint[] singleValues, multiValues; @@ -80,6 +84,18 @@ public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { protected static Map<String, GeoPoint> expectedCentroidsForGeoHash = null; protected static final double GEOHASH_TOLERANCE = 1E-5D; + public AbstractGeoTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); @@ -259,7 +275,7 @@ public void setupSuiteScopeCluster() throws Exception { long totalHits = response.getHits().getTotalHits().value; XContentBuilder builder = XContentFactory.jsonBuilder(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); - logger.info("Full high_card_idx Response Content:\n{ {} }", Strings.toString(builder)); + logger.info("Full high_card_idx Response Content:\n{ {} }", builder.toString()); for (int i = 0; i < totalHits; i++) { SearchHit searchHit = response.getHits().getAt(i); assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx")); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesTestCase.java index 187a089aee180..0144f9413e4b0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesTestCase.java @@ -32,10 +32,9 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregation.CommonFields; import org.opensearch.search.aggregations.InternalAggregation; @@ -162,6 +161,6 @@ public void testEmptyRanksXContent() throws IOException { + "}"; } - assertThat(Strings.toString(builder), equalTo(expected)); + assertThat(builder.toString(), equalTo(expected)); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AvgAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AvgAggregatorTests.java index 99c6ed121011e..f8eab720f8d6c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AvgAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -39,12 +39,12 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java index e33851f286613..cdd17e2fa7dd6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoPoint; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index 6fab4436182c2..69c5ac88b62f1 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java index 6883a7ff15953..af936d253dd7e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java @@ -34,10 +34,10 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.geo.GeoPoint; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java index e459b63aa058b..1aaaa1e1bd262 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java index e08ac840e1785..4c0534c95a116 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java @@ -39,12 +39,12 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.KeywordFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java index 5968605fc0c34..408a2069d2306 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparseTests.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitMixer; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java index 6f5f7494331a5..53349e4a840d1 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusTests.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitMixer; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.util.HashSet; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java index a95533bb3c33c..0b45328641df3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalCardinalityTests.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BitMixer; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.common.lease.Releasables; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.aggregations.ParsedAggregation; import org.opensearch.test.InternalAggregationTestCase; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java index 3540227d839a6..3d1b385147e50 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesRanksTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.search.DocValueFormat; import java.util.Arrays; @@ -40,6 +39,8 @@ import java.util.List; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + public class InternalHDRPercentilesRanksTests extends InternalPercentilesRanksTestCase<InternalHDRPercentileRanks> { @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesTests.java index 6d947c6b68e36..f96a88c86ea1c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalHDRPercentilesTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; -import org.HdrHistogram.DoubleHistogram; import org.opensearch.search.DocValueFormat; import java.util.Arrays; @@ -41,6 +40,8 @@ import java.util.List; import java.util.Map; +import org.HdrHistogram.DoubleHistogram; + import static java.util.Collections.emptyMap; public class InternalHDRPercentilesTests extends InternalPercentilesTestCase<InternalHDRPercentiles> { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalStatsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalStatsTests.java index 57b0c09814d5e..7aa6daa9768b0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalStatsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalStatsTests.java @@ -31,10 +31,9 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.ParsedAggregation; @@ -263,7 +262,7 @@ public void testDoXContentBody() throws IOException { + "\""; } expected += "\n}"; - assertEquals(expected, Strings.toString(builder)); + assertEquals(expected, builder.toString()); // count is zero format = randomNumericDocValueFormat(); @@ -285,7 +284,7 @@ public void testDoXContentBody() throws IOException { + " \"avg\" : null,\n" + " \"sum\" : 0.0\n" + "}", - Strings.toString(builder) + builder.toString() ); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java index 786ac01db6ab6..01e259f84660d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -35,26 +35,27 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.document.DocumentField; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.ParsedAggregation; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.NotEqualMessageBuilder; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; @@ -356,7 +357,7 @@ private Comparator<ScoreDoc> sortFieldsComparator(SortField[] sortFields) { FieldComparator[] comparators = new FieldComparator[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { // Values passed to getComparator shouldn't matter - comparators[i] = sortFields[i].getComparator(0, false); + comparators[i] = sortFields[i].getComparator(0, Pruning.NONE); } return (lhs, rhs) -> { FieldDoc l = (FieldDoc) lhs; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MaxAggregatorTests.java index 3d5ad3f5c163c..3b965a51275b6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -49,13 +49,13 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.PointValues; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.Bits; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index 57267d4cf9c9b..794dd44b2db94 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java index cdc7ef260c750..b4dbc8017bc3b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java @@ -32,6 +32,10 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.script.MockScriptPlugin; +import org.opensearch.search.lookup.LeafDocLookup; +import org.opensearch.test.OpenSearchTestCase; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -39,13 +43,9 @@ import java.util.function.BiFunction; import java.util.function.Function; -import org.opensearch.script.MockScriptPlugin; -import org.opensearch.search.lookup.LeafDocLookup; -import org.opensearch.test.OpenSearchTestCase; - /** * Provides a number of dummy scripts for tests. - * + * <p> * Each script provided allows for an {@code inc} parameter which will * be added to each value read from a document. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java index f0273b46ef97e..69cd63e4b9e84 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MinAggregatorTests.java @@ -50,18 +50,19 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; @@ -69,7 +70,6 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/PercentilesTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/PercentilesTests.java index 82f10161afe83..4e3454f41d810 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/PercentilesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/PercentilesTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.BaseAggregationTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index 6a4ecf01577d8..53e5f2bfb53bb 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -37,23 +37,23 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.script.MockScriptEngine; import org.opensearch.script.Script; import org.opensearch.script.ScriptEngine; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java index c215c0959b342..85a1ed20df4c9 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -37,10 +37,10 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java index 72b09d7509b02..b029417999c6e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/SumAggregatorTests.java @@ -40,7 +40,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; @@ -48,6 +47,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java index 2bdc8d88bc366..d4a4245a16628 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java @@ -36,10 +36,10 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java index fd98a090367b2..c5b1cdae1e7e2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java @@ -37,12 +37,12 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.NumberFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 20d1cf4ffd904..c355f0078108b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -40,7 +40,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.BooleanClause.Occur; @@ -51,6 +50,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.text.Text; import org.opensearch.index.mapper.DocumentMapper; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsTests.java index 5b812ccab8618..52073cb1cc0c3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/TopHitsTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.metrics; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.aggregations.AggregationInitializationException; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.BaseAggregationTestCase; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorTests.java index 1527f28ec3269..9babdbe78bf14 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorTests.java @@ -41,10 +41,10 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.geo.GeoPoint; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java index 844f05f1d2208..8d5992e15d983 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java @@ -37,12 +37,12 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.CheckedConsumer; import org.opensearch.index.mapper.MappedFieldType; diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilderTests.java index 9bc0aff862b84..9032fbb456e9d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregationBuilderTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.metrics.weighted_avg; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.SearchModule; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index a5779d4a289f8..de213a154c3c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -36,11 +36,11 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -80,15 +80,15 @@ public class AvgBucketAggregatorTests extends AggregatorTestCase { /** * Test for issue #30608. Under the following circumstances: - * + * <p> * A. Multi-bucket agg in the first entry of our internal list * B. Regular agg as the immediate child of the multi-bucket in A * C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list * D. Finally, a pipeline agg with the path down to B - * + * <p> * BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures * it is fixed. - * + * <p> * Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private * `reduce()` needed for testing this */ @@ -144,6 +144,6 @@ public void testSameAggNames() throws IOException { } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptAggregatorTests.java index a1ff2a40d0404..39eac127918de 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptAggregatorTests.java @@ -37,11 +37,11 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptTests.java index b1296801da988..723f151a8b927 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/BucketScriptTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.BasePipelineAggregationTestCase; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index fdd898b8fbc4c..5d7636208bd70 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -37,12 +37,12 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; @@ -344,6 +344,6 @@ private void executeTestCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/DerivativeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/DerivativeAggregatorTests.java index 88628cd44c721..6f94772303158 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/DerivativeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/DerivativeAggregatorTests.java @@ -36,11 +36,11 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.common.CheckedConsumer; @@ -60,8 +60,8 @@ import org.opensearch.search.aggregations.metrics.StatsAggregationBuilder; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; -import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; +import org.opensearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.List; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java index b3b44d2b3f794..7602053bb1ed6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/GapPolicyTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/GapPolicyTests.java index 6a3b7e4f9dcb0..5e78bff3530b7 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/GapPolicyTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/GapPolicyTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.io.stream.AbstractWriteableEnumTestCase; +import org.opensearch.core.common.ParsingException; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java index 430b3aa55da4d..e6e7b4f228ed3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java @@ -32,10 +32,9 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregation.CommonFields; import org.opensearch.search.aggregations.ParsedAggregation; @@ -192,7 +191,7 @@ public void testEmptyRanksXContent() throws IOException { + "}"; } - assertThat(Strings.toString(builder), equalTo(expected)); + assertThat(builder.toString(), equalTo(expected)); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovAvgTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovAvgTests.java index eb07138b7bff3..958dc3082df71 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovAvgTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovAvgTests.java @@ -32,18 +32,18 @@ package org.opensearch.search.aggregations.pipeline; -import static java.util.Collections.emptyList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; - -import java.io.IOException; - import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.BasePipelineAggregationTestCase; import org.opensearch.search.aggregations.PipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.pipeline.HoltWintersModel.SeasonalityType; +import java.io.IOException; + +import static java.util.Collections.emptyList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + public class MovAvgTests extends BasePipelineAggregationTestCase<MovAvgPipelineAggregationBuilder> { @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java index d841625272828..d6abe50ea5201 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java @@ -38,11 +38,11 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatters; import org.opensearch.index.mapper.DateFieldMapper; @@ -169,6 +169,6 @@ private void executeTestCase(Query query, DateHistogramAggregationBuilder aggBui } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketTests.java index d1a13ec918e1a..2a382d72059e7 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index 1423fd6771674..f223648de6ef4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -36,12 +36,12 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongBitSet; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -218,7 +218,7 @@ public void testRegexExclude() throws IOException { // Serializes/deserializes an IncludeExclude statement with a single clause private IncludeExclude serialize(IncludeExclude incExc, ParseField field) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -264,7 +264,7 @@ public void testRegexIncludeAndExclude() throws IOException { // Serializes/deserializes the IncludeExclude statement with include AND // exclude clauses private IncludeExclude serializeMixedRegex(IncludeExclude incExc) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java index 3dcab9edb444a..c44e0fcca7584 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java @@ -37,8 +37,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.BytesRef; import org.opensearch.common.geo.GeoPoint; import org.opensearch.index.fielddata.AbstractSortedNumericDocValues; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java index 3b668f3c78c06..62e3cf2fac479 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MultiTermsValuesSourceConfigTests.java @@ -8,9 +8,9 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.script.Script; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java index 3f348e34f4c27..40242e6711d85 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations.support; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java index 98e4e8f881b1b..98dde2c7a31b3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ScriptValuesTests.java @@ -33,7 +33,7 @@ package org.opensearch.search.aggregations.support; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import java.util.Collections; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorable; import org.apache.lucene.util.BytesRef; @@ -47,6 +47,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -59,7 +60,7 @@ private static class FakeAggregationScript extends AggregationScript { int index; FakeAggregationScript(Object[][] values) { - super(Collections.emptyMap(), new SearchLookup(null, null) { + super(Collections.emptyMap(), new SearchLookup(null, null, SearchLookup.UNKNOWN_SHARD_ID) { @Override public LeafSearchLookup getLeafSearchLookup(LeafReaderContext context) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceRegistryTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceRegistryTests.java index 128030eec735b..aa7852e12cf73 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceRegistryTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/ValuesSourceRegistryTests.java @@ -36,10 +36,11 @@ import org.opensearch.script.AggregationScript; import org.opensearch.search.aggregations.bucket.histogram.HistogramAggregatorSupplier; import org.opensearch.test.OpenSearchTestCase; -import org.mockito.Mockito; import java.util.Collections; +import org.mockito.Mockito; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index f62ef8af22828..9778798b706f4 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -8,24 +8,22 @@ package org.opensearch.search.backpressure; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchTask; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.backpressure.settings.SearchBackpressureMode; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.search.backpressure.settings.SearchShardTaskSettings; import org.opensearch.search.backpressure.settings.SearchTaskSettings; +import org.opensearch.search.backpressure.stats.SearchBackpressureStats; import org.opensearch.search.backpressure.stats.SearchShardTaskStats; import org.opensearch.search.backpressure.stats.SearchTaskStats; import org.opensearch.search.backpressure.trackers.NodeDuressTracker; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.search.backpressure.stats.SearchBackpressureStats; import org.opensearch.search.backpressure.trackers.TaskResourceUsageTracker; import org.opensearch.search.backpressure.trackers.TaskResourceUsageTrackerType; import org.opensearch.tasks.CancellableTask; @@ -34,10 +32,13 @@ import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -51,6 +52,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; @@ -60,7 +62,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; public class SearchBackpressureServiceTests extends OpenSearchTestCase { MockTransportService transportService; @@ -70,7 +71,7 @@ public class SearchBackpressureServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); @@ -248,10 +249,11 @@ public void testSearchTaskInFlightCancellation() { verify(mockTaskManager, times(10)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. Since we are not marking any task as completed the completionCount will be 0 + // for SearchTaskStats here. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(10, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), - new SearchShardTaskStats(0, 0, Collections.emptyMap()), + new SearchTaskStats(10, 3, 0, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(10))), + new SearchShardTaskStats(0, 0, 0, Collections.emptyMap()), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); @@ -322,10 +324,11 @@ public void testSearchShardTaskInFlightCancellation() { verify(mockTaskManager, times(12)).cancelTaskAndDescendants(any(), anyString(), anyBoolean(), any()); assertEquals(3, service.getSearchBackpressureState(SearchShardTask.class).getLimitReachedCount()); - // Verify search backpressure stats. + // Verify search backpressure stats. We are marking 20 SearchShardTasks as completed this should get + // reflected in SearchShardTaskStats. SearchBackpressureStats expectedStats = new SearchBackpressureStats( - new SearchTaskStats(0, 0, Collections.emptyMap()), - new SearchShardTaskStats(12, 3, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), + new SearchTaskStats(0, 0, 0, Collections.emptyMap()), + new SearchShardTaskStats(12, 3, 20, Map.of(TaskResourceUsageTrackerType.CPU_USAGE_TRACKER, new MockStats(12))), SearchBackpressureMode.ENFORCED ); SearchBackpressureStats actualStats = service.nodeStats(); diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java index 6478fdfff61d4..f28b82cad30d3 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchShardTaskStatsTests.java @@ -39,6 +39,11 @@ public static SearchShardTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchShardTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchShardTaskStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + resourceUsageTrackerStats + ); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java index eb33bc1c37b7e..cc7aa92826b41 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/stats/SearchTaskStatsTests.java @@ -40,6 +40,6 @@ public static SearchTaskStats randomInstance() { new ElapsedTimeTracker.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) ); - return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); + return new SearchTaskStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), resourceUsageTrackerStats); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java b/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java index b2ef3215f869f..3950d00b0c8b5 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/trackers/HeapUsageTrackerTests.java @@ -24,9 +24,9 @@ import java.util.List; import java.util.Optional; +import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -import static org.opensearch.search.backpressure.SearchBackpressureTestHelpers.createMockTaskWithResourceStats; public class HeapUsageTrackerTests extends OpenSearchTestCase { private static final long HEAP_BYTES_THRESHOLD_SEARCH_SHARD_TASK = 100; diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index 461812077eba9..3d0e5c3eaf1c0 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -33,19 +33,20 @@ package org.opensearch.search.builder; import com.fasterxml.jackson.core.JsonParseException; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -57,8 +58,8 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.ScoreSortBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.Map; @@ -72,7 +73,7 @@ public class SearchSourceBuilderTests extends AbstractSearchTestCase { public void testFromXContent() throws IOException { SearchSourceBuilder testSearchSourceBuilder = createSearchSourceBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -425,7 +426,7 @@ public void testToXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType); searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); BytesReference source = BytesReference.bytes(builder); Map<String, Object> sourceAsMap = XContentHelper.convertToMap(source, false, xContentType).v2(); @@ -434,7 +435,7 @@ public void testToXContent() throws IOException { { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(RandomQueryBuilder.createQuery(random())); - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType); searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); BytesReference source = BytesReference.bytes(builder); Map<String, Object> sourceAsMap = XContentHelper.convertToMap(source, false, xContentType).v2(); @@ -448,7 +449,7 @@ public void testToXContentWithPointInTime() throws IOException { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); TimeValue keepAlive = randomBoolean() ? TimeValue.timeValueHours(1) : null; searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder("id").setKeepAlive(keepAlive)); - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType); searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); BytesReference bytes = BytesReference.bytes(builder); Map<String, Object> sourceAsMap = XContentHelper.convertToMap(bytes, false, xContentType).v2(); diff --git a/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java b/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java index afbde486f4b92..fff27ecf34509 100644 --- a/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/collapse/CollapseBuilderTests.java @@ -31,16 +31,16 @@ package org.opensearch.search.collapse; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.Query; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.KeywordFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java index 882065866243a..6159e4a5c79b1 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FetchSourcePhaseTests.java @@ -34,9 +34,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.memory.MemoryIndex; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.fetch.FetchContext; diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java index 447b4a0354637..1c8a93f6483ae 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/FieldFetcherTests.java @@ -34,11 +34,11 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java new file mode 100644 index 0000000000000..7ca5977a1c276 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/InnerHitsPhaseTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch.subphase; + +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.fetch.FetchContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class InnerHitsPhaseTests extends OpenSearchTestCase { + + /* + Returns mock search context reused across test methods + */ + private SearchContext getMockSearchContext(final boolean hasInnerHits) { + final QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.newFetchLookup()).thenReturn(mock(SearchLookup.class)); + + final SearchContext searchContext = mock(SearchContext.class); + when(searchContext.hasInnerHits()).thenReturn(hasInnerHits); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + + return searchContext; + } + + /* + Validates that InnerHitsPhase processor is not initialized when no inner hits + */ + public void testInnerHitsNull() { + assertNull(new InnerHitsPhase(null).getProcessor(new FetchContext(getMockSearchContext(false)))); + } + + /* + Validates that InnerHitsPhase processor is initialized when inner hits are present + */ + public void testInnerHitsNonNull() { + final SearchContext searchContext = getMockSearchContext(true); + when(searchContext.innerHits()).thenReturn(new InnerHitsContext()); + + assertNotNull(new InnerHitsPhase(null).getProcessor(new FetchContext(searchContext))); + } + +} diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java new file mode 100644 index 0000000000000..eb6338997ab9f --- /dev/null +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/ScriptFieldsPhaseTests.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.fetch.subphase; + +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.fetch.FetchContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ScriptFieldsPhaseTests extends OpenSearchTestCase { + + /* + Returns mock search context reused across test methods + */ + private SearchContext getMockSearchContext(final boolean hasScriptFields) { + final QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.newFetchLookup()).thenReturn(mock(SearchLookup.class)); + + final SearchContext searchContext = mock(SearchContext.class); + when(searchContext.hasScriptFields()).thenReturn(hasScriptFields); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + + return searchContext; + } + + /* + Validates that ScriptFieldsPhase processor is not initialized when no script fields + */ + public void testScriptFieldsNull() { + assertNull(new ScriptFieldsPhase().getProcessor(new FetchContext(getMockSearchContext(false)))); + } + + /* + Validates that ScriptFieldsPhase processor is initialized when script fields are present + */ + public void testScriptFieldsNonNull() { + final SearchContext searchContext = getMockSearchContext(true); + when(searchContext.scriptFields()).thenReturn(new ScriptFieldsContext()); + + assertNotNull(new ScriptFieldsPhase().getProcessor(new FetchContext(searchContext))); + } + +} diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index b1dfd836f7461..2bc30b4fba380 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -35,21 +35,21 @@ import org.apache.lucene.search.Query; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.MappedFieldType; @@ -66,8 +66,8 @@ import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.Field; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder.Order; import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext.FieldOptions; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -137,7 +137,7 @@ public void testEqualsAndHashcode() throws IOException { public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { HighlightBuilder highlightBuilder = randomHighlighterBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -547,7 +547,7 @@ public void testOrderSerialization() throws Exception { } protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(contentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(contentType); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java index a66c7de6ced74..38dbb0a4d1cad 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightFieldTests.java @@ -32,16 +32,15 @@ package org.opensearch.search.fetch.subphase.highlight; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -82,7 +81,7 @@ public void testReplaceUnicodeControlCharacters() { public void testFromXContent() throws IOException { HighlightField highlightField = createTestItem(); XContentType xcontentType = randomFrom(XContentType.values()); - XContentBuilder builder = XContentFactory.contentBuilder(xcontentType); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(xcontentType); if (randomBoolean()) { builder.prettyPrint(); } @@ -109,7 +108,7 @@ public void testToXContent() throws IOException { builder.startObject(); field.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{\n" + " \"foo\" : [\n" + " \"bar\",\n" + " \"baz\"\n" + " ]\n" + "}", Strings.toString(builder)); + assertEquals("{\n" + " \"foo\" : [\n" + " \"bar\",\n" + " \"baz\"\n" + " ]\n" + "}", builder.toString()); field = new HighlightField("foo", null); builder = JsonXContent.contentBuilder(); @@ -117,7 +116,7 @@ public void testToXContent() throws IOException { builder.startObject(); field.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertEquals("{\n" + " \"foo\" : null\n" + "}", Strings.toString(builder)); + assertEquals("{\n" + " \"foo\" : null\n" + "}", builder.toString()); } /** diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighterTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighterTests.java index 18288aeac13e1..1e5f7d3f9f489 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighterTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/PlainHighlighterTests.java @@ -32,11 +32,11 @@ package org.opensearch.search.fetch.subphase.highlight; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; +import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.util.LuceneTestCase; public class PlainHighlighterTests extends LuceneTestCase { diff --git a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java index 16b2043330167..b6b2a86ac7549 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoPointShapeQueryTests.java @@ -42,8 +42,8 @@ import org.opensearch.common.geo.builders.MultiLineStringBuilder; import org.opensearch.common.geo.builders.MultiPointBuilder; import org.opensearch.common.geo.builders.PointBuilder; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Line; import org.opensearch.geometry.LinearRing; import org.opensearch.geometry.MultiLine; diff --git a/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java index 8af50bc498d90..1a1650c91776a 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoQueryTests.java @@ -42,9 +42,9 @@ import org.opensearch.common.geo.builders.GeometryCollectionBuilder; import org.opensearch.common.geo.builders.MultiPolygonBuilder; import org.opensearch.common.geo.builders.PolygonBuilder; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.Rectangle; import org.opensearch.index.query.GeoShapeQueryBuilder; @@ -53,11 +53,12 @@ import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; -import org.locationtech.jts.geom.Coordinate; import java.util.Collection; import java.util.Collections; +import org.locationtech.jts.geom.Coordinate; + import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -87,7 +88,7 @@ public void testNullShape() throws Exception { client().prepareIndex(defaultIndexName) .setId("aNullshape") - .setSource("{\"geo\": null}", XContentType.JSON) + .setSource("{\"geo\": null}", MediaTypeRegistry.JSON) .setRefreshPolicy(IMMEDIATE) .get(); GetResponse result = client().prepareGet(defaultIndexName, "aNullshape").get(); diff --git a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java index 182bdbf5bf610..1262ea4750a99 100644 --- a/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/geo/GeoShapeQueryTests.java @@ -33,11 +33,11 @@ package org.opensearch.search.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.lucene.tests.geo.GeoTestUtil; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.CheckedSupplier; -import org.opensearch.common.Strings; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.SpatialStrategy; import org.opensearch.common.geo.builders.CircleBuilder; @@ -51,23 +51,23 @@ import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.query.ExistsQueryBuilder; import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.test.geo.RandomShapeGenerator; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.spatial4j.shape.Rectangle; import java.io.IOException; import java.util.Locale; -import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeTrue; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.spatial4j.shape.Rectangle; + import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.geoIntersectionQuery; @@ -83,6 +83,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.not; +import static com.carrotsearch.randomizedtesting.RandomizedTest.assumeTrue; public class GeoShapeQueryTests extends GeoQueryTests { protected static final String[] PREFIX_TREES = new String[] { @@ -156,7 +157,7 @@ public void testShapeFetchingPath() throws Exception { .setId("1") .setSource( String.format(Locale.ROOT, "{ %s, \"1\" : { %s, \"2\" : { %s, \"3\" : { %s } }} }", location, location, location, location), - XContentType.JSON + MediaTypeRegistry.JSON ) .setRefreshPolicy(IMMEDIATE) .get(); @@ -292,7 +293,7 @@ public void testEnvelopeSpanningDateline() throws Exception { + "],\r\n" + "\"type\": \"Point\"\r\n" + "}}"; - client().index(new IndexRequest("test").id("1").source(doc1, XContentType.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + client().index(new IndexRequest("test").id("1").source(doc1, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); String doc2 = "{\"geo\": {\r\n" + "\"coordinates\": [\r\n" @@ -301,7 +302,7 @@ public void testEnvelopeSpanningDateline() throws Exception { + "],\r\n" + "\"type\": \"Point\"\r\n" + "}}"; - client().index(new IndexRequest("test").id("2").source(doc2, XContentType.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + client().index(new IndexRequest("test").id("2").source(doc2, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); String doc3 = "{\"geo\": {\r\n" + "\"coordinates\": [\r\n" @@ -310,7 +311,7 @@ public void testEnvelopeSpanningDateline() throws Exception { + "],\r\n" + "\"type\": \"Point\"\r\n" + "}}"; - client().index(new IndexRequest("test").id("3").source(doc3, XContentType.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); + client().index(new IndexRequest("test").id("3").source(doc3, MediaTypeRegistry.JSON).setRefreshPolicy(IMMEDIATE)).actionGet(); @SuppressWarnings("unchecked") CheckedSupplier<GeoShapeQueryBuilder, IOException> querySupplier = randomFrom( @@ -438,7 +439,7 @@ public void testEdgeCases() throws Exception { .endObject() .endObject() .endObject(); - String mapping = Strings.toString(xcb); + String mapping = xcb.toString(); client().admin().indices().prepareCreate("test").setMapping(mapping).get(); ensureGreen(); @@ -524,9 +525,9 @@ public void testReusableBuilder() throws IOException { } private void assertUnmodified(ShapeBuilder builder) throws IOException { - String before = Strings.toString(jsonBuilder().startObject().field("area", builder).endObject()); + String before = jsonBuilder().startObject().field("area", builder).endObject().toString(); builder.buildS4J(); - String after = Strings.toString(jsonBuilder().startObject().field("area", builder).endObject()); + String after = jsonBuilder().startObject().field("area", builder).endObject().toString(); assertThat(before, equalTo(after)); } @@ -614,20 +615,19 @@ public void testExistsQuery() throws Exception { } public void testPointsOnly() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("tree", randomBoolean() ? "quadtree" : "geohash") - .field("tree_levels", "6") - .field("distance_error_pct", "0.01") - .field("points_only", true) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("location") + .field("type", "geo_shape") + .field("tree", randomBoolean() ? "quadtree" : "geohash") + .field("tree_levels", "6") + .field("distance_error_pct", "0.01") + .field("points_only", true) + .endObject() + .endObject() + .endObject() + .toString(); client().admin().indices().prepareCreate("geo_points_only").setMapping(mapping).get(); ensureGreen(); @@ -652,20 +652,19 @@ public void testPointsOnly() throws Exception { } public void testPointsOnlyExplicit() throws Exception { - String mapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("geo") - .field("type", "geo_shape") - .field("tree", randomBoolean() ? "quadtree" : "geohash") - .field("tree_levels", "6") - .field("distance_error_pct", "0.01") - .field("points_only", true) - .endObject() - .endObject() - .endObject() - ); + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("geo") + .field("type", "geo_shape") + .field("tree", randomBoolean() ? "quadtree" : "geohash") + .field("tree_levels", "6") + .field("distance_error_pct", "0.01") + .field("points_only", true) + .endObject() + .endObject() + .endObject() + .toString(); client().admin().indices().prepareCreate("geo_points_only").setMapping(mapping).get(); ensureGreen(); @@ -693,7 +692,7 @@ public void testPointsOnlyExplicit() throws Exception { } public void testIndexedShapeReference() throws Exception { - String mapping = Strings.toString(createDefaultMapping()); + String mapping = createDefaultMapping().toString(); client().admin().indices().prepareCreate("test").setMapping(mapping).get(); createIndex("shapes"); ensureGreen(); @@ -888,7 +887,7 @@ public void testShapeFilterWithDefinedGeoCollection() throws Exception { } public void testDistanceQuery() throws Exception { - String mapping = Strings.toString(createRandomMapping()); + String mapping = createRandomMapping().toString(); client().admin().indices().prepareCreate("test_distance").setMapping(mapping).get(); ensureGreen(); @@ -930,7 +929,7 @@ public void testDistanceQuery() throws Exception { } public void testIndexRectangleSpanningDateLine() throws Exception { - String mapping = Strings.toString(createRandomMapping()); + String mapping = createRandomMapping().toString(); client().admin().indices().prepareCreate("test").setMapping(mapping).get(); ensureGreen(); diff --git a/server/src/test/java/org/opensearch/search/internal/AliasFilterTests.java b/server/src/test/java/org/opensearch/search/internal/AliasFilterTests.java index 52b7d383fb9b8..e968e0740b150 100644 --- a/server/src/test/java/org/opensearch/search/internal/AliasFilterTests.java +++ b/server/src/test/java/org/opensearch/search/internal/AliasFilterTests.java @@ -36,8 +36,8 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index f3907355ac6ec..a707c8b34e0a4 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -40,7 +40,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; @@ -78,28 +77,32 @@ import org.opensearch.common.lucene.index.SequentialStoredFieldsLeafReader; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.bitset.BitsetFilterCache; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShard; +import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.LeafBucketCollector; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collections; import java.util.IdentityHashMap; +import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutorService; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.opensearch.search.internal.ContextIndexSearcher.intersectScorerAndBitSet; import static org.opensearch.search.internal.ExitableDirectoryReader.ExitableLeafReader; import static org.opensearch.search.internal.ExitableDirectoryReader.ExitablePointValues; import static org.opensearch.search.internal.ExitableDirectoryReader.ExitableTerms; +import static org.opensearch.search.internal.IndexReaderUtils.getLeaves; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ContextIndexSearcherTests extends OpenSearchTestCase { public void testIntersectScorerAndRoleBits() throws Exception { @@ -304,6 +307,123 @@ public void onRemoval(ShardId shardId, Accountable accountable) { IOUtils.close(reader, w, dir); } + public void testSlicesInternal() throws Exception { + final List<LeafReaderContext> leaves = getLeaves(10); + try ( + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + SearchContext searchContext = mock(SearchContext.class); + IndexShard indexShard = mock(IndexShard.class); + when(searchContext.indexShard()).thenReturn(indexShard); + when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + ContextIndexSearcher searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null, + searchContext + ); + // Case 1: Verify the slice count when lucene default slice computation is used + IndexSearcher.LeafSlice[] slices = searcher.slicesInternal( + leaves, + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE + ); + int expectedSliceCount = 2; + // 2 slices will be created since max segment per slice of 5 will be reached + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + assertEquals(5, slices[i].leaves.length); + } + + // Case 2: Verify the slice count when custom max slice computation is used + expectedSliceCount = 4; + slices = searcher.slicesInternal(leaves, expectedSliceCount); + + // 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + if (i < 2) { + assertEquals(3, slices[i].leaves.length); + } else { + assertEquals(2, slices[i].leaves.length); + } + } + } + } + } + + public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { + final List<LeafReaderContext> leaves = getLeaves(10); + try ( + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory);) { + SearchContext searchContext = mock(SearchContext.class); + IndexShard indexShard = mock(IndexShard.class); + when(searchContext.indexShard()).thenReturn(indexShard); + when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(false); + ContextIndexSearcher searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null, + searchContext + ); + // Case 1: Verify getSlices returns not null when concurrent segment search is disabled + assertEquals(1, searcher.getSlices().length); + + // Case 2: Verify the slice count when custom max slice computation is used + searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + mock(ExecutorService.class), + searchContext + ); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(true); + when(searchContext.getTargetMaxSliceCount()).thenReturn(4); + int expectedSliceCount = 4; + IndexSearcher.LeafSlice[] slices = searcher.slices(leaves); + + // 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + if (i < 2) { + assertEquals(3, slices[i].leaves.length); + } else { + assertEquals(2, slices[i].leaves.length); + } + } + } + } + } + private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException { SparseFixedBitSet sparseFixedBitSet = new SparseFixedBitSet(leaf.reader().maxDoc()); TermsEnum tenum = leaf.reader().terms(field).iterator(); @@ -466,12 +586,12 @@ public String toString(String field) { } @Override - public Query rewrite(IndexReader reader) throws IOException { - Query queryRewritten = query.rewrite(reader); + public Query rewrite(IndexSearcher searcher) throws IOException { + Query queryRewritten = query.rewrite(searcher); if (query != queryRewritten) { return new CreateScorerOnceQuery(queryRewritten); } - return super.rewrite(reader); + return super.rewrite(searcher); } @Override diff --git a/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java b/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java new file mode 100644 index 0000000000000..16958da77f1a3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.store.Directory; + +import java.util.List; + +import static org.apache.lucene.tests.util.LuceneTestCase.newDirectory; + +public class IndexReaderUtils { + + /** + * Utility to create leafCount number of {@link LeafReaderContext} + * @param leafCount count of leaves to create + * @return created leaves + */ + public static List<LeafReaderContext> getLeaves(int leafCount) throws Exception { + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < leafCount; ++i) { + Document document = new Document(); + final String fieldValue = "value" + i; + document.add(new StringField("field1", fieldValue, Field.Store.NO)); + document.add(new StringField("field2", fieldValue, Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + } + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List<LeafReaderContext> leaves = directoryReader.leaves(); + return leaves; + } + } + } +} diff --git a/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java b/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java new file mode 100644 index 0000000000000..2684cf901f080 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/internal/MaxTargetSliceSupplierTests.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.search.internal.IndexReaderUtils.getLeaves; + +public class MaxTargetSliceSupplierTests extends OpenSearchTestCase { + + public void testSliceCountGreaterThanLeafCount() throws Exception { + int expectedSliceCount = 2; + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(getLeaves(expectedSliceCount), 5); + // verify slice count is same as leaf count + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + assertEquals(1, slices[i].leaves.length); + } + } + + public void testNegativeSliceCount() { + assertThrows(IllegalArgumentException.class, () -> MaxTargetSliceSupplier.getSlices(new ArrayList<>(), randomIntBetween(-3, 0))); + } + + public void testSingleSliceWithMultipleLeaves() throws Exception { + int leafCount = randomIntBetween(1, 10); + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(getLeaves(leafCount), 1); + assertEquals(1, slices.length); + assertEquals(leafCount, slices[0].leaves.length); + } + + public void testSliceCountLessThanLeafCount() throws Exception { + int leafCount = 12; + List<LeafReaderContext> leaves = getLeaves(leafCount); + + // Case 1: test with equal number of leaves per slice + int expectedSliceCount = 3; + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(leaves, expectedSliceCount); + int expectedLeavesPerSlice = leafCount / expectedSliceCount; + + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + assertEquals(expectedLeavesPerSlice, slices[i].leaves.length); + } + + // Case 2: test with first 2 slice more leaves than others + expectedSliceCount = 5; + slices = MaxTargetSliceSupplier.getSlices(leaves, expectedSliceCount); + int expectedLeavesInFirst2Slice = 3; + int expectedLeavesInOtherSlice = 2; + + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + if (i < 2) { + assertEquals(expectedLeavesInFirst2Slice, slices[i].leaves.length); + } else { + assertEquals(expectedLeavesInOtherSlice, slices[i].leaves.length); + } + } + } + + public void testEmptyLeaves() { + IndexSearcher.LeafSlice[] slices = MaxTargetSliceSupplier.getSlices(new ArrayList<>(), 2); + assertEquals(0, slices.length); + } +} diff --git a/server/src/test/java/org/opensearch/search/internal/ShardSearchRequestTests.java b/server/src/test/java/org/opensearch/search/internal/ShardSearchRequestTests.java index 1f632251cf588..c65262253d1fc 100644 --- a/server/src/test/java/org/opensearch/search/internal/ShardSearchRequestTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ShardSearchRequestTests.java @@ -38,20 +38,21 @@ import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.Strings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RandomQueryBuilder; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.search.AbstractSearchTestCase; import org.opensearch.search.SearchSortValuesAndFormatsTests; @@ -205,7 +206,7 @@ public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOExc XContentBuilder builder = XContentFactory.jsonBuilder(); filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.close(); - return new CompressedXContent(Strings.toString(builder)); + return new CompressedXContent(builder.toString()); } private IndexMetadata remove(IndexMetadata indexMetadata, String alias) { @@ -220,7 +221,7 @@ public QueryBuilder aliasFilter(IndexMetadata indexMetadata, String... aliasName return ShardSearchRequest.parseAliasFilter(bytes -> { try ( InputStream inputStream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream) + XContentParser parser = MediaTypeRegistry.xContentType(inputStream) .xContent() .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputStream) ) { diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java index b047f16583ee5..0f3a9e49f263e 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafDocLookupTests.java @@ -31,8 +31,8 @@ package org.opensearch.search.lookup; -import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.LeafFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java index 85aacfbd63ee2..8c4b8ad6d1776 100644 --- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java @@ -85,6 +85,7 @@ public void setUp() throws Exception { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); diff --git a/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java new file mode 100644 index 0000000000000..e942c3ab17420 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/lookup/SearchLookupTests.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.lookup; + +import org.opensearch.index.mapper.MapperService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.mock; + +public class SearchLookupTests extends OpenSearchTestCase { + public void testDeprecatedConstructorShardId() { + final SearchLookup searchLookup = new SearchLookup(mock(MapperService.class), (a, b) -> null); + assertThrows(IllegalStateException.class, searchLookup::shardId); + } +} diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java index c19c1ebcb5c26..ce7344fa0d2d6 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java @@ -46,7 +46,7 @@ public void testSerializationRoundtrip() throws IOException { /** * When serializing / deserializing to / from old versions, processor type info is lost. - * + * <p> * Also, we only supported request/response processors. */ public void testSerializationRoundtripBackcompat() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index d0f4a974fc305..f5851e669a2da 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.junit.Before; import org.opensearch.OpenSearchParseException; import org.opensearch.ResourceNotFoundException; import org.opensearch.Version; @@ -37,16 +36,18 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.plugins.SearchPipelinePlugin; @@ -60,6 +61,7 @@ import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; import java.util.Collections; import java.util.Comparator; @@ -67,6 +69,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import static org.mockito.ArgumentMatchers.anyString; @@ -173,7 +176,7 @@ public void testResolveIndexDefaultPipeline() throws Exception { new PipelineConfiguration( "p1", new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -194,7 +197,7 @@ public void testResolveIndexDefaultPipeline() throws Exception { service.applyClusterState(cce); SearchRequest searchRequest = new SearchRequest("my_index").source(SearchSourceBuilder.searchSource().size(5)); - PipelinedRequest pipelinedRequest = service.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(service.resolvePipeline(searchRequest)); assertEquals("p1", pipelinedRequest.getPipeline().getId()); assertEquals(10, pipelinedRequest.source().size()); @@ -403,7 +406,7 @@ public void testUpdatePipelines() { + "\"phase_results_processors\" : [ { \"max_score\" : { \"score\": 100 } } ]" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); SearchPipelineMetadata pipelineMetadata = new SearchPipelineMetadata(Map.of("_id", pipeline)); clusterState = ClusterState.builder(clusterState) @@ -438,7 +441,7 @@ public void testPutPipeline() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); - PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest(id, new BytesArray("{}"), XContentType.JSON); + PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest(id, new BytesArray("{}"), MediaTypeRegistry.JSON); ClusterState previousClusterState = clusterState; clusterState = SearchPipelineService.innerPut(putRequest, clusterState); searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -450,7 +453,7 @@ public void testPutPipeline() { assertEquals(0, pipeline.pipeline.getSearchResponseProcessors().size()); // Overwrite pipeline - putRequest = new PutSearchPipelineRequest(id, new BytesArray("{ \"description\": \"empty pipeline\"}"), XContentType.JSON); + putRequest = new PutSearchPipelineRequest(id, new BytesArray("{ \"description\": \"empty pipeline\"}"), MediaTypeRegistry.JSON); previousClusterState = clusterState; clusterState = SearchPipelineService.innerPut(putRequest, clusterState); searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -473,7 +476,7 @@ public void testPutInvalidPipeline() throws IllegalAccessException { PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest( id, new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : \"foo\" } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ); clusterState = SearchPipelineService.innerPut(putRequest, clusterState); try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(SearchPipelineService.class))) { @@ -496,7 +499,7 @@ public void testDeletePipeline() { PipelineConfiguration config = new PipelineConfiguration( "_id", new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ); SearchPipelineMetadata searchPipelineMetadata = new SearchPipelineMetadata(Map.of("_id", config)); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); @@ -529,11 +532,11 @@ public void testDeletePipelinesWithWildcard() { SearchPipelineMetadata metadata = new SearchPipelineMetadata( Map.of( "p1", - new PipelineConfiguration("p1", definition, XContentType.JSON), + new PipelineConfiguration("p1", definition, MediaTypeRegistry.JSON), "p2", - new PipelineConfiguration("p2", definition, XContentType.JSON), + new PipelineConfiguration("p2", definition, MediaTypeRegistry.JSON), "q1", - new PipelineConfiguration("q1", definition, XContentType.JSON) + new PipelineConfiguration("q1", definition, MediaTypeRegistry.JSON) ) ); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); @@ -582,7 +585,7 @@ public void testTransformRequest() throws Exception { new PipelineConfiguration( "p1", new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -597,7 +600,7 @@ public void testTransformRequest() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).size(size); SearchRequest request = new SearchRequest("_index").source(sourceBuilder).pipeline("p1"); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(request); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(request)); assertEquals(2 * size, pipelinedRequest.source().size()); assertEquals(size, request.source().size()); @@ -617,7 +620,7 @@ public void testTransformResponse() throws Exception { new PipelineConfiguration( "p1", new BytesArray("{\"response_processors\" : [ { \"fixed_score\": { \"score\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -641,19 +644,57 @@ public void testTransformResponse() throws Exception { // First try without specifying a pipeline, which should be a no-op. SearchRequest searchRequest = new SearchRequest(); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse notTransformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse notTransformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertSame(searchResponse, notTransformedResponse); // Now apply a pipeline searchRequest = new SearchRequest().pipeline("p1"); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse transformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertEquals(size, transformedResponse.getHits().getHits().length); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001f); } } + /** + * Helper to synchronously apply a response pipeline, returning the transformed response. + */ + private static SearchResponse syncTransformResponse(PipelinedRequest pipelinedRequest, SearchResponse searchResponse) throws Exception { + SearchResponse[] responseBox = new SearchResponse[1]; + Exception[] exceptionBox = new Exception[1]; + ActionListener<SearchResponse> responseListener = pipelinedRequest.transformResponseListener(ActionListener.wrap(r -> { + responseBox[0] = r; + }, e -> { exceptionBox[0] = e; })); + responseListener.onResponse(searchResponse); + + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return responseBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline, returning the transformed request. + */ + private static PipelinedRequest syncTransformRequest(PipelinedRequest request) throws Exception { + PipelinedRequest[] requestBox = new PipelinedRequest[1]; + Exception[] exceptionBox = new Exception[1]; + + request.transformRequest(ActionListener.wrap(r -> requestBox[0] = (PipelinedRequest) r, e -> exceptionBox[0] = e)); + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return requestBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline and response pipeline, returning the transformed response. + */ + private static SearchResponse syncExecutePipeline(PipelinedRequest request, SearchResponse response) throws Exception { + return syncTransformResponse(syncTransformRequest(request), response); + } + public void testTransformSearchPhase() { SearchPipelineService searchPipelineService = createWithProcessors(); SearchPipelineMetadata metadata = new SearchPipelineMetadata( @@ -662,7 +703,7 @@ public void testTransformSearchPhase() { new PipelineConfiguration( "p1", new BytesArray("{\"phase_results_processors\" : [ { \"max_score\" : { } } ]}"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -747,19 +788,19 @@ public void testGetPipelines() { new PipelineConfiguration( "p1", new BytesArray("{\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ), "p2", new PipelineConfiguration( "p2", new BytesArray("{\"response_processors\" : [ { \"fixed_score\": { \"score\" : 2 } } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ), "p3", new PipelineConfiguration( "p3", new BytesArray("{\"phase_results_processors\" : [ { \"max_score\" : { } } ]}"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -818,7 +859,7 @@ public void testValidatePipeline() throws Exception { + "\"phase_results_processors\" : [ { \"max_score\" : { } } ]" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); SearchPipelineInfo completePipelineInfo = new SearchPipelineInfo( @@ -843,7 +884,7 @@ public void testValidatePipeline() throws Exception { + "\"response_processors\": [{ \"fixed_score\": { \"score\" : 2 } }]" + "}" ), - XContentType.JSON + MediaTypeRegistry.JSON ); expectThrows( ClassCastException.class, @@ -875,7 +916,7 @@ public void testInlinePipeline() throws Exception { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Verify pipeline - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); Pipeline pipeline = pipelinedRequest.getPipeline(); assertEquals(SearchPipelineService.AD_HOC_PIPELINE_ID, pipeline.getId()); assertEquals(1, pipeline.getSearchRequestProcessors().size()); @@ -894,7 +935,7 @@ public void testInlinePipeline() throws Exception { SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); SearchResponse searchResponse = new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); - SearchResponse transformedResponse = pipeline.transformResponse(searchRequest, searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001); } @@ -946,7 +987,10 @@ public void testExceptionOnRequestProcessing() { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Exception thrown when processing the request - expectThrows(SearchPipelineProcessingException.class, () -> searchPipelineService.resolvePipeline(searchRequest)); + expectThrows( + SearchPipelineProcessingException.class, + () -> syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)) + ); } public void testExceptionOnResponseProcessing() throws Exception { @@ -974,10 +1018,10 @@ public void testExceptionOnResponseProcessing() throws Exception { SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); // Exception thrown when processing response - expectThrows(SearchPipelineProcessingException.class, () -> pipelinedRequest.transformResponse(response)); + expectThrows(SearchPipelineProcessingException.class, () -> syncTransformResponse(pipelinedRequest, response)); } - public void testCatchExceptionOnRequestProcessing() throws IllegalAccessException { + public void testCatchExceptionOnRequestProcessing() throws Exception { SearchRequestProcessor throwingRequestProcessor = new FakeRequestProcessor("throwing_request", null, null, true, r -> { throw new RuntimeException(); }); @@ -1008,7 +1052,7 @@ public void testCatchExceptionOnRequestProcessing() throws IllegalAccessExceptio "The exception from request processor [throwing_request] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); mockAppender.assertAllExpectationsMatched(); } } @@ -1048,7 +1092,7 @@ public void testCatchExceptionOnResponseProcessing() throws Exception { "The exception from response processor [throwing_response] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - pipelinedRequest.transformResponse(response); + syncTransformResponse(pipelinedRequest, response); mockAppender.assertAllExpectationsMatched(); } } @@ -1078,15 +1122,15 @@ public void testStats() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response) ); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response) ); SearchPipelineStats stats = searchPipelineService.stats(); @@ -1164,12 +1208,12 @@ public void testStatsEnabledIgnoreFailure() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response); // when ignoreFailure enabled, the search pipelines will all succeed. SearchPipelineStats stats = searchPipelineService.stats(); @@ -1241,25 +1285,25 @@ private SearchPipelineService getSearchPipelineService( new PipelineConfiguration( "good_response_pipeline", new BytesArray("{\"response_processors\" : [ { \"successful_response\": {} } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ), "bad_response_pipeline", new PipelineConfiguration( "bad_response_pipeline", new BytesArray("{\"response_processors\" : [ { \"throwing_response\": {} } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ), "good_request_pipeline", new PipelineConfiguration( "good_request_pipeline", new BytesArray("{\"request_processors\" : [ { \"successful_request\": {} } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ), "bad_request_pipeline", new PipelineConfiguration( "bad_request_pipeline", new BytesArray("{\"request_processors\" : [ { \"throwing_request\": {} } ] }"), - XContentType.JSON + MediaTypeRegistry.JSON ) ) ); @@ -1273,8 +1317,8 @@ private SearchPipelineService getSearchPipelineService( } private static void assertPipelineStats(OperationStats stats, long count, long failed) { - assertEquals(stats.getCount(), count); - assertEquals(stats.getFailedCount(), failed); + assertEquals(count, stats.getCount()); + assertEquals(failed, stats.getFailedCount()); } public void testAdHocRejectingProcessor() { @@ -1299,7 +1343,7 @@ public void testAdHocRejectingProcessor() { PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest( id, new BytesArray("{\"request_processors\":[" + " { \"" + processorType + "\": {}}" + "]}"), - XContentType.JSON + MediaTypeRegistry.JSON ); ClusterState previousClusterState = clusterState; clusterState = SearchPipelineService.innerPut(putRequest, clusterState); @@ -1336,4 +1380,92 @@ public void testExtraParameterInProcessorConfig() { fail("Wrong exception type: " + e.getClass()); } } + + private static class FakeStatefulRequestProcessor extends AbstractProcessor implements StatefulSearchRequestProcessor { + private final String type; + private final Consumer<PipelineProcessingContext> stateConsumer; + + public FakeStatefulRequestProcessor(String type, Consumer<PipelineProcessingContext> stateConsumer) { + super(null, null, false); + this.type = type; + this.stateConsumer = stateConsumer; + } + + @Override + public String getType() { + return type; + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { + stateConsumer.accept(requestContext); + return request; + } + } + + private static class FakeStatefulResponseProcessor extends AbstractProcessor implements StatefulSearchResponseProcessor { + private final String type; + private final Consumer<PipelineProcessingContext> stateConsumer; + + public FakeStatefulResponseProcessor(String type, Consumer<PipelineProcessingContext> stateConsumer) { + super(null, null, false); + this.type = type; + this.stateConsumer = stateConsumer; + } + + @Override + public String getType() { + return type; + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) + throws Exception { + stateConsumer.accept(requestContext); + return response; + } + } + + public void testStatefulProcessors() throws Exception { + AtomicReference<String> contextHolder = new AtomicReference<>(); + SearchPipelineService searchPipelineService = createWithProcessors( + Map.of( + "write_context", + (pf, t, d, igf, cfg, ctx) -> new FakeStatefulRequestProcessor("write_context", (c) -> c.setAttribute("a", "b")) + ), + Map.of( + "read_context", + (pf, t, d, igf, cfg, ctx) -> new FakeStatefulResponseProcessor( + "read_context", + (c) -> contextHolder.set((String) c.getAttribute("a")) + ) + ), + Collections.emptyMap() + ); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "p1", + new PipelineConfiguration( + "p1", + new BytesArray( + "{\"request_processors\" : [ { \"write_context\": {} } ], \"response_processors\": [ { \"read_context\": {} }] }" + ), + XContentType.JSON + ) + ) + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + + PipelinedRequest request = searchPipelineService.resolvePipeline(new SearchRequest().pipeline("p1")); + assertNull(contextHolder.get()); + syncExecutePipeline(request, new SearchResponse(null, null, 0, 0, 0, 0, null, null)); + assertNotNull(contextHolder.get()); + assertEquals("b", contextHolder.get()); + } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java index 90a6e99057b0e..2b904b5bc627f 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java @@ -11,12 +11,12 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; @@ -178,8 +178,8 @@ public void testToXContent() throws IOException { expectedBuilder.generator().copyCurrentStructure(expectedParser); assertEquals( - XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) XContentType.JSON), - XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) XContentType.JSON) + XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) MediaTypeRegistry.JSON), + XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) MediaTypeRegistry.JSON) ); } } diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java index 2ddd6eda122b4..01d5979bfbf0c 100644 --- a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -8,11 +8,11 @@ package org.opensearch.search.pit; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.SetOnce; +import org.opensearch.core.action.ActionListener; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java index 4880afb896a40..b60541825e3ed 100644 --- a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -8,13 +8,13 @@ package org.opensearch.search.pit; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.SetOnce; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.test.OpenSearchTestCase; @@ -35,7 +35,7 @@ public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exc RestDeletePitAction action = new RestDeletePitAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{invalid_json}"), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); @@ -54,7 +54,7 @@ public void deletePits(DeletePitRequest request, ActionListener<DeletePitRespons RestDeletePitAction action = new RestDeletePitAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{\"pit_id\": [\"BODY\"]}"), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); action.handleRequest(request, channel, nodeClient); @@ -95,7 +95,7 @@ public void deletePits(DeletePitRequest request, ActionListener<DeletePitRespons RestDeletePitAction action = new RestDeletePitAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{\"pit_id\": [\"BODY\"]}"), - XContentType.JSON + MediaTypeRegistry.JSON ).withPath("/_all").build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); diff --git a/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java b/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java index ae5a07478e814..ad63fa928dfcd 100644 --- a/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/ProfileResultTests.java @@ -32,13 +32,12 @@ package org.opensearch.search.profile; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -49,14 +48,14 @@ import java.util.Map; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public class ProfileResultTests extends OpenSearchTestCase { - public static ProfileResult createTestItem(int depth) { + public static ProfileResult createTestItem(int depth, boolean concurrentSegmentSearchEnabled) { String type = randomAlphaOfLengthBetween(5, 10); String description = randomAlphaOfLengthBetween(5, 10); int breakdownsSize = randomIntBetween(0, 5); @@ -77,13 +76,28 @@ public static ProfileResult createTestItem(int depth) { int childrenSize = depth > 0 ? randomIntBetween(0, 1) : 0; List<ProfileResult> children = new ArrayList<>(childrenSize); for (int i = 0; i < childrenSize; i++) { - children.add(createTestItem(depth - 1)); + children.add(createTestItem(depth - 1, concurrentSegmentSearchEnabled)); + } + if (concurrentSegmentSearchEnabled) { + return new ProfileResult( + type, + description, + breakdown, + debug, + randomNonNegativeLong(), + children, + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + } else { + return new ProfileResult(type, description, breakdown, debug, randomNonNegativeLong(), children); } - return new ProfileResult(type, description, breakdown, debug, randomNonNegativeLong(), children); } public void testFromXContent() throws IOException { - doFromXContentTestWithRandomFields(false); + doFromXContentTestWithRandomFields(false, false); + doFromXContentTestWithRandomFields(false, true); } /** @@ -91,11 +105,12 @@ public void testFromXContent() throws IOException { * back to be forward compatible with additions to the xContent */ public void testFromXContentWithRandomFields() throws IOException { - doFromXContentTestWithRandomFields(true); + doFromXContentTestWithRandomFields(true, false); + doFromXContentTestWithRandomFields(true, true); } - private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws IOException { - ProfileResult profileResult = createTestItem(2); + private void doFromXContentTestWithRandomFields(boolean addRandomFields, boolean concurrentSegmentSearchEnabled) throws IOException { + ProfileResult profileResult = createTestItem(2, concurrentSegmentSearchEnabled); XContentType xContentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); BytesReference originalBytes = toShuffledXContent(profileResult, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); @@ -116,6 +131,9 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws assertNull(parser.nextToken()); } assertEquals(profileResult.getTime(), parsed.getTime()); + assertEquals(profileResult.getMaxSliceTime(), parsed.getMaxSliceTime()); + assertEquals(profileResult.getMinSliceTime(), parsed.getMinSliceTime()); + assertEquals(profileResult.getAvgSliceTime(), parsed.getAvgSliceTime()); assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } @@ -164,7 +182,7 @@ public void testToXContent() throws IOException { + " }\n" + " ]\n" + "}", - Strings.toString(builder) + builder.toString() ); builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true); @@ -204,7 +222,7 @@ public void testToXContent() throws IOException { + " }\n" + " ]\n" + "}", - Strings.toString(builder) + builder.toString() ); result = new ProfileResult("profileName", "some description", Map.of("key1", 12345678L), Map.of(), 12345678L, List.of()); @@ -220,7 +238,7 @@ public void testToXContent() throws IOException { + " \"key1\" : 12345678\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() ); result = new ProfileResult("profileName", "some description", Map.of("key1", 1234567890L), Map.of(), 1234567890L, List.of()); @@ -236,7 +254,74 @@ public void testToXContent() throws IOException { + " \"key1\" : 1234567890\n" + " }\n" + "}", - Strings.toString(builder) + builder.toString() + ); + + result = new ProfileResult("profileName", "some description", Map.of("key1", 1234L), Map.of(), 1234L, List.of(), 321L, 123L, 222L); + builder = XContentFactory.jsonBuilder().prettyPrint(); + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals( + "{\n" + + " \"type\" : \"profileName\",\n" + + " \"description\" : \"some description\",\n" + + " \"time_in_nanos\" : 1234,\n" + + " \"max_slice_time_in_nanos\" : 321,\n" + + " \"min_slice_time_in_nanos\" : 123,\n" + + " \"avg_slice_time_in_nanos\" : 222,\n" + + " \"breakdown\" : {\n" + + " \"key1\" : 1234\n" + + " }\n" + + "}", + builder.toString() + ); + + result = new ProfileResult( + "profileName", + "some description", + Map.of("key1", 1234567890L), + Map.of(), + 1234567890L, + List.of(), + 87654321L, + 12345678L, + 54637281L + ); + builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true); + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals( + "{\n" + + " \"type\" : \"profileName\",\n" + + " \"description\" : \"some description\",\n" + + " \"time\" : \"1.2s\",\n" + + " \"max_slice_time\" : \"87.6ms\",\n" + + " \"min_slice_time\" : \"12.3ms\",\n" + + " \"avg_slice_time\" : \"54.6ms\",\n" + + " \"time_in_nanos\" : 1234567890,\n" + + " \"max_slice_time_in_nanos\" : 87654321,\n" + + " \"min_slice_time_in_nanos\" : 12345678,\n" + + " \"avg_slice_time_in_nanos\" : 54637281,\n" + + " \"breakdown\" : {\n" + + " \"key1\" : 1234567890\n" + + " }\n" + + "}", + builder.toString() ); + + } + + public void testRemoveStartTimeFields() { + Map<String, Long> breakdown = new HashMap<>(); + breakdown.put("initialize_start_time", 123456L); + breakdown.put("initialize_count", 1L); + breakdown.put("initialize", 654321L); + Map<String, Long> modifiedBreakdown = new LinkedHashMap<>(breakdown); + assertEquals(3, modifiedBreakdown.size()); + assertEquals(123456L, (long) modifiedBreakdown.get("initialize_start_time")); + assertEquals(1L, (long) modifiedBreakdown.get("initialize_count")); + assertEquals(654321L, (long) modifiedBreakdown.get("initialize")); + ProfileResult.removeStartTimeFields(modifiedBreakdown); + assertFalse(modifiedBreakdown.containsKey("initialize_start_time")); + assertTrue(modifiedBreakdown.containsKey("initialize_count")); + assertTrue(modifiedBreakdown.containsKey("initialize")); } } diff --git a/server/src/test/java/org/opensearch/search/profile/SearchProfileShardResultsTests.java b/server/src/test/java/org/opensearch/search/profile/SearchProfileShardResultsTests.java index 4c6853e598ffe..71511652c0856 100644 --- a/server/src/test/java/org/opensearch/search/profile/SearchProfileShardResultsTests.java +++ b/server/src/test/java/org/opensearch/search/profile/SearchProfileShardResultsTests.java @@ -32,10 +32,10 @@ package org.opensearch.search.profile; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.profile.aggregation.AggregationProfileShardResult; import org.opensearch.search.profile.aggregation.AggregationProfileShardResultTests; import org.opensearch.search.profile.query.QueryProfileShardResult; @@ -49,7 +49,7 @@ import java.util.Map; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; import static org.opensearch.test.XContentTestUtils.insertRandomFields; diff --git a/server/src/test/java/org/opensearch/search/profile/TimerTests.java b/server/src/test/java/org/opensearch/search/profile/TimerTests.java index deed451c21933..5997292eb8f56 100644 --- a/server/src/test/java/org/opensearch/search/profile/TimerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/TimerTests.java @@ -71,10 +71,14 @@ long nanoTime() { return time += 42; } }; - for (int i = 1; i < 100000; ++i) { + t.start(); + t.stop(); + long timerStartTime = t.getEarliestTimerStartTime(); + for (int i = 2; i < 100000; ++i) { t.start(); t.stop(); assertEquals(i, t.getCount()); + assertEquals(timerStartTime, t.getEarliestTimerStartTime()); // Make sure the cumulated timing is 42 times the number of calls as expected assertEquals(i * 42L, t.getApproximateTiming()); } diff --git a/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java b/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java index 33c95725dcd13..1b4f34fe3e517 100644 --- a/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/aggregation/AggregationProfileShardResultTests.java @@ -32,11 +32,12 @@ package org.opensearch.search.profile.aggregation; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileResultTests; import org.opensearch.test.OpenSearchTestCase; @@ -48,7 +49,7 @@ import java.util.List; import java.util.Map; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public class AggregationProfileShardResultTests extends OpenSearchTestCase { @@ -57,7 +58,7 @@ public static AggregationProfileShardResult createTestItem(int depth) { int size = randomIntBetween(0, 5); List<ProfileResult> aggProfileResults = new ArrayList<>(size); for (int i = 0; i < size; i++) { - aggProfileResults.add(ProfileResultTests.createTestItem(1)); + aggProfileResults.add(ProfileResultTests.createTestItem(depth, false)); } return new AggregationProfileShardResult(aggProfileResults); } @@ -91,7 +92,7 @@ public void testToXContent() throws IOException { ProfileResult profileResult = new ProfileResult("someType", "someDescription", breakdown, debug, 6000L, Collections.emptyList()); profileResults.add(profileResult); AggregationProfileShardResult aggProfileResults = new AggregationProfileShardResult(profileResults); - BytesReference xContent = toXContent(aggProfileResults, XContentType.JSON, false); + BytesReference xContent = toXContent(aggProfileResults, MediaTypeRegistry.JSON, false); assertEquals( "{\"aggregations\":[" + "{\"type\":\"someType\"," @@ -104,7 +105,7 @@ public void testToXContent() throws IOException { xContent.utf8ToString() ); - xContent = toXContent(aggProfileResults, XContentType.JSON, true); + xContent = toXContent(aggProfileResults, MediaTypeRegistry.JSON, true); assertEquals( "{\"aggregations\":[" + "{\"type\":\"someType\"," diff --git a/server/src/test/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfilerTests.java new file mode 100644 index 0000000000000..e36b65f0a7b69 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/aggregation/ConcurrentAggregationProfilerTests.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.profile.aggregation; + +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public class ConcurrentAggregationProfilerTests extends OpenSearchTestCase { + + public static List<ProfileResult> createConcurrentSearchProfileTree() { + List<ProfileResult> tree = new ArrayList<>(); + // Aggregation + tree.add( + new ProfileResult( + "NumericTermsAggregator", + "test_scoped_agg", + new LinkedHashMap<>(), + new HashMap<>(), + 10847417L, + List.of( + new ProfileResult( + "GlobalOrdinalsStringTermsAggregator", + "test_terms", + new LinkedHashMap<>(), + new HashMap<>(), + 3359835L, + List.of(), + 1490667L, + 1180123L, + 1240676L + ) + ), + 94582L, + 18667L, + 211749L + ) + ); + tree.add( + new ProfileResult( + "NumericTermsAggregator", + "test_scoped_agg", + new LinkedHashMap<>(), + new HashMap<>(), + 10776655L, + List.of( + new ProfileResult( + "GlobalOrdinalsStringTermsAggregator", + "test_terms", + new LinkedHashMap<>(), + new HashMap<>(), + 3359567L, + List.of(), + 1390554L, + 1180321L, + 1298776L + ) + ), + 94560L, + 11237L, + 236440L + ) + ); + // Global Aggregation + tree.add( + new ProfileResult( + "GlobalAggregator", + "test_global_agg", + new LinkedHashMap<>(), + new HashMap<>(), + 19631335L, + List.of(), + 563002L, + 142210L, + 1216631L + ) + ); + tree.add( + new ProfileResult( + "GlobalAggregator", + "test_global_agg", + new LinkedHashMap<>(), + new HashMap<>(), + 19634567L, + List.of(), + 563333L, + 146783L, + 1496600L + ) + ); + return tree; + } + + public void testBuildTimeStatsBreakdownMap() { + List<ProfileResult> tree = createConcurrentSearchProfileTree(); + Map<String, Long> breakdown = new HashMap<>(); + Map<String, Long> timeStatsMap = new HashMap<>(); + timeStatsMap.put("max_initialize", 30L); + timeStatsMap.put("min_initialize", 10L); + timeStatsMap.put("avg_initialize", 60L); + ConcurrentAggregationProfiler.buildBreakdownMap(tree.size(), breakdown, timeStatsMap, "initialize"); + assertTrue(breakdown.containsKey("max_initialize")); + assertTrue(breakdown.containsKey("min_initialize")); + assertTrue(breakdown.containsKey("avg_initialize")); + assertEquals(30L, (long) breakdown.get("max_initialize")); + assertEquals(10L, (long) breakdown.get("min_initialize")); + assertEquals(15L, (long) breakdown.get("avg_initialize")); + } + + public void testBuildCountStatsBreakdownMap() { + List<ProfileResult> tree = createConcurrentSearchProfileTree(); + Map<String, Long> breakdown = new HashMap<>(); + Map<String, Long> countStatsMap = new HashMap<>(); + countStatsMap.put("max_collect_count", 3L); + countStatsMap.put("min_collect_count", 1L); + countStatsMap.put("avg_collect_count", 6L); + ConcurrentAggregationProfiler.buildBreakdownMap(tree.size(), breakdown, countStatsMap, "collect_count"); + assertTrue(breakdown.containsKey("max_collect_count")); + assertTrue(breakdown.containsKey("min_collect_count")); + assertTrue(breakdown.containsKey("avg_collect_count")); + assertEquals(3L, (long) breakdown.get("max_collect_count")); + assertEquals(1L, (long) breakdown.get("min_collect_count")); + assertEquals(1L, (long) breakdown.get("avg_collect_count")); + } + + public void testBuildBreakdownStatsMap() { + Map<String, Long> statsMap = new HashMap<>(); + ConcurrentAggregationProfiler.buildBreakdownStatsMap( + statsMap, + new ProfileResult("NumericTermsAggregator", "desc", Map.of("initialize", 100L), Map.of(), 130L, List.of()), + "initialize" + ); + assertTrue(statsMap.containsKey("max_initialize")); + assertTrue(statsMap.containsKey("min_initialize")); + assertTrue(statsMap.containsKey("avg_initialize")); + assertEquals(100L, (long) statsMap.get("max_initialize")); + assertEquals(100L, (long) statsMap.get("min_initialize")); + assertEquals(100L, (long) statsMap.get("avg_initialize")); + ConcurrentAggregationProfiler.buildBreakdownStatsMap( + statsMap, + new ProfileResult("NumericTermsAggregator", "desc", Map.of("initialize", 50L), Map.of(), 120L, List.of()), + "initialize" + ); + assertEquals(100L, (long) statsMap.get("max_initialize")); + assertEquals(50L, (long) statsMap.get("min_initialize")); + assertEquals(150L, (long) statsMap.get("avg_initialize")); + } + + public void testGetSliceLevelAggregationMap() { + List<ProfileResult> tree = createConcurrentSearchProfileTree(); + Map<String, List<ProfileResult>> aggregationMap = ConcurrentAggregationProfiler.getSliceLevelAggregationMap(tree); + assertEquals(2, aggregationMap.size()); + assertTrue(aggregationMap.containsKey("test_scoped_agg")); + assertTrue(aggregationMap.containsKey("test_global_agg")); + assertEquals(2, aggregationMap.get("test_scoped_agg").size()); + assertEquals(2, aggregationMap.get("test_global_agg").size()); + for (int slice_id : new int[] { 0, 1 }) { + assertEquals(1, aggregationMap.get("test_scoped_agg").get(slice_id).getProfiledChildren().size()); + assertEquals( + "test_terms", + aggregationMap.get("test_scoped_agg").get(slice_id).getProfiledChildren().get(0).getLuceneDescription() + ); + assertEquals(0, aggregationMap.get("test_global_agg").get(slice_id).getProfiledChildren().size()); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/CollectorResultTests.java b/server/src/test/java/org/opensearch/search/profile/query/CollectorResultTests.java index a3ac26a28ab1f..b6a20359c7a0b 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/CollectorResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/CollectorResultTests.java @@ -32,13 +32,12 @@ package org.opensearch.search.profile.query; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -46,7 +45,7 @@ import java.util.Collections; import java.util.List; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -144,7 +143,7 @@ public void testToXContent() throws IOException { + " }\n" + " ]\n" + "}", - Strings.toString(builder) + builder.toString() ); builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true); @@ -170,7 +169,7 @@ public void testToXContent() throws IOException { + " }\n" + " ]\n" + "}", - Strings.toString(builder) + builder.toString() ); result = new CollectorResult("collectorName", "some reason", 12345678L, Collections.emptyList()); @@ -183,7 +182,7 @@ public void testToXContent() throws IOException { + " \"time\" : \"12.3ms\",\n" + " \"time_in_nanos\" : 12345678\n" + "}", - Strings.toString(builder) + builder.toString() ); result = new CollectorResult("collectorName", "some reason", 1234567890L, Collections.emptyList()); @@ -196,7 +195,7 @@ public void testToXContent() throws IOException { + " \"time\" : \"1.2s\",\n" + " \"time_in_nanos\" : 1234567890\n" + "}", - Strings.toString(builder) + builder.toString() ); result = new CollectorResult( @@ -224,7 +223,7 @@ public void testToXContent() throws IOException { + " \"avg_slice_time_in_nanos\" : 123456789,\n" + " \"slice_count\" : 3\n" + "}", - Strings.toString(builder) + builder.toString() ); } } diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java new file mode 100644 index 0000000000000..db14eb90ef839 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -0,0 +1,432 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Collector; +import org.apache.lucene.store.Directory; +import org.opensearch.search.profile.AbstractProfileBreakdown; +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_COUNT_SUFFIX; +import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.MIN_PREFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_END_TIME_SUFFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_START_TIME_SUFFIX; +import static org.mockito.Mockito.mock; + +public class ConcurrentQueryProfileBreakdownTests extends OpenSearchTestCase { + private ConcurrentQueryProfileBreakdown testQueryProfileBreakdown; + private Timer createWeightTimer; + + @Before + public void setup() { + testQueryProfileBreakdown = new ConcurrentQueryProfileBreakdown(); + createWeightTimer = testQueryProfileBreakdown.getTimer(QueryTimingType.CREATE_WEIGHT); + try { + createWeightTimer.start(); + Thread.sleep(10); + } catch (InterruptedException ex) { + // ignore + } finally { + createWeightTimer.stop(); + } + } + + public void testBreakdownMapWithNoLeafContext() throws Exception { + final Map<String, Long> queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertTrue(createWeightTime > 0); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + // verify total/min/max/avg node time is same as weight time + assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + continue; + } + assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + } + + public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); + final Collector sliceCollector = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map<String, Long> leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); + testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); + final Map<Collector, Map<String, Long>> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(1, sliceBreakdownMap.size()); + assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); + + final Map<String, Long> sliceBreakdown = sliceBreakdownMap.entrySet().iterator().next().getValue(); + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map<String, Long> leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map<String, Long> leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + final Map<Collector, Map<String, Long>> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(2, sliceBreakdownMap.size()); + + for (Map.Entry<Collector, Map<String, Long>> sliceBreakdowns : sliceBreakdownMap.entrySet()) { + Map<String, Long> sliceBreakdown = sliceBreakdowns.getValue(); + Map<String, Long> leafProfileBreakdownMap; + if (sliceBreakdowns.getKey().equals(sliceCollector_1)) { + leafProfileBreakdownMap = leafProfileBreakdownMap_1; + } else { + leafProfileBreakdownMap = leafProfileBreakdownMap_2; + } + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single + // leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map<String, Long> leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map<String, Long> leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + + Map<String, Long> queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(50, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(20, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(15, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(2, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(15, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map<String, Long> leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + // leaf2 profile breakdown is not present in contexts map + + Map<String, Long> queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(10, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(5, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + // min of 0 means one of the slice didn't worked on any leaf context + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(5, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map<String, Long> leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown<QueryTimingType> leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + final Map<String, Long> queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(26, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertNotNull(queryBreakDownMap.get(timingTypeKey)); + assertNotNull(queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for current breakdown type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + } + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + private Map<String, Long> getLeafBreakdownMap(long startTime, long timeTaken, long count) { + Map<String, Long> leafBreakDownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // don't add anything + continue; + } + String timingTypeKey = timingType.toString(); + leafBreakDownMap.put(timingTypeKey, timeTaken); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_COUNT_SUFFIX, count); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX, startTime); + } + return leafBreakDownMap; + } + + private DirectoryReader getDirectoryReader(int numLeaves) throws Exception { + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); + + for (int i = 0; i < numLeaves; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value" + i, Field.Store.NO)); + document.add(new StringField("field2", "value" + i, Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + } + iw.deleteDocuments(new Term("field1", "value3")); + iw.close(); + return DirectoryReader.open(directory); + } + + private static class TestQueryProfileBreakdown extends AbstractProfileBreakdown<QueryTimingType> { + private Map<String, Long> breakdownMap; + + public TestQueryProfileBreakdown(Class<QueryTimingType> clazz, Map<String, Long> breakdownMap) { + super(clazz); + this.breakdownMap = breakdownMap; + } + + @Override + public Map<String, Long> toBreakdownMap() { + return breakdownMap; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java new file mode 100644 index 0000000000000..736bbcdd9e8dd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.LinkedList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { + + public void testMergeRewriteTimeIntervals() { + ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); + List<Timer> timers = new LinkedList<>(); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + LinkedList<long[]> mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); + assertThat(mergedIntervals.size(), equalTo(2)); + long[] interval = mergedIntervals.get(0); + assertThat(interval[0], equalTo(553074509287335L)); + assertThat(interval[1], equalTo(553074509516290L)); + interval = mergedIntervals.get(1); + assertThat(interval[0], equalTo(553074511206907L)); + assertThat(interval[1], equalTo(553074511424041L)); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfileShardResultTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfileShardResultTests.java index e703396f5cf02..5281c36fb66e2 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfileShardResultTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfileShardResultTests.java @@ -32,11 +32,11 @@ package org.opensearch.search.profile.query; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileResultTests; import org.opensearch.test.OpenSearchTestCase; @@ -45,7 +45,7 @@ import java.util.ArrayList; import java.util.List; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public class QueryProfileShardResultTests extends OpenSearchTestCase { @@ -54,7 +54,7 @@ public static QueryProfileShardResult createTestItem() { int size = randomIntBetween(0, 5); List<ProfileResult> queryProfileResults = new ArrayList<>(size); for (int i = 0; i < size; i++) { - queryProfileResults.add(ProfileResultTests.createTestItem(1)); + queryProfileResults.add(ProfileResultTests.createTestItem(1, false)); } CollectorResult profileCollector = CollectorResultTests.createTestItem(2, false); long rewriteTime = randomNonNegativeLong(); diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index 528d65bcc5ef2..481a224f2ff0e 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -33,6 +33,7 @@ package org.opensearch.search.profile.query; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; @@ -40,7 +41,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -49,7 +49,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryVisitor; -import org.apache.lucene.tests.search.RandomApproximationQuery; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; @@ -58,6 +57,8 @@ import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.RandomApproximationQuery; import org.apache.lucene.tests.util.TestUtil; import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.shard.IndexShard; @@ -65,9 +66,9 @@ import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.ProfileResult; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; -import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Arrays; @@ -80,6 +81,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -88,6 +92,10 @@ public class QueryProfilerTests extends OpenSearchTestCase { private IndexReader reader; private ContextIndexSearcher searcher; private ExecutorService executor; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; @ParametersFactory public static Collection<Object[]> concurrency() { @@ -153,13 +161,16 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); List<ProfileResult> results = profiler.getTree(); assertEquals(1, results.size()); - Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map<String, Long> breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -167,25 +178,68 @@ public void testBasic() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed List<ProfileResult> results = profiler.getTree(); assertEquals(1, results.size()); - Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map<String, Long> breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -193,19 +247,61 @@ public void testNoScoring() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -219,13 +315,16 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); List<ProfileResult> results = profiler.getTree(); assertEquals(1, results.size()); - Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map<String, Long> breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -233,12 +332,52 @@ public void testApproximations() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 61b78905334ec..4bd4d406e4391 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -33,6 +33,7 @@ package org.opensearch.search.query; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; @@ -68,6 +69,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -87,6 +89,7 @@ import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.tasks.TaskCancelledException; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -105,7 +108,6 @@ import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.SortAndFormats; -import org.opensearch.tasks.TaskCancelledException; import org.opensearch.test.TestSearchContext; import org.opensearch.threadpool.ThreadPool; @@ -119,6 +121,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -126,14 +130,13 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; -import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; +import static org.mockito.Mockito.when; public class QueryPhaseTests extends IndexShardTestCase { private IndexShard indexShard; @@ -435,10 +438,16 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + // Do not expect an exact match when terminate_after is used in conjunction to size = 0 as an optimization introduced by + // https://issues.apache.org/jira/browse/LUCENE-10620 can produce a total hit count >= terminated_after, because + // TotalHitCountCollector is used in this case as part of Weight#count() optimization context.setSize(0); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } @@ -464,7 +473,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { @@ -484,9 +496,12 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectorManagers().put(TotalHitCountCollector.class, manager); QueryPhase.executeInternal(context.withCleanQueryResult(), queryPhaseSearcher); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(1L), lessThanOrEqualTo((long) numDocs)) + ); assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); - assertThat(manager.getTotalHits(), equalTo(1)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(1), lessThanOrEqualTo(numDocs))); } // tests with trackTotalHits and terminateAfter @@ -501,7 +516,10 @@ public void testTerminateAfterEarlyTermination() throws Exception { if (trackTotalHits == -1) { assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(0L)); } else { - assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) Math.min(trackTotalHits, 10))); + assertThat( + context.queryResult().topDocs().topDocs.totalHits.value, + allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10L)), lessThanOrEqualTo((long) numDocs)) + ); } assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); // The concurrent search terminates the collection when the number of hits is reached by each @@ -509,7 +527,7 @@ public void testTerminateAfterEarlyTermination() throws Exception { // slices (as the unit of concurrency). To address that, we have to use the shared global state, // much as HitsThresholdChecker does. if (executor == null) { - assertThat(manager.getTotalHits(), equalTo(10)); + assertThat(manager.getTotalHits(), allOf(greaterThanOrEqualTo(Math.min(trackTotalHits, 10)), lessThanOrEqualTo(numDocs))); } } @@ -658,7 +676,7 @@ public void testIndexSortScrollOptimization() throws Exception { @SuppressWarnings("unchecked") FieldComparator<Object> comparator = (FieldComparator<Object>) searchSortAndFormat.sort.getSort()[i].getComparator( 1, - false + Pruning.NONE ); int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); if (cmp == 0) { @@ -1208,6 +1226,12 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader, Execu IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); + if (executor != null) { + when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); + } else { + when(searchContext.getTargetMaxSliceCount()).thenThrow(IllegalStateException.class); + } return new ContextIndexSearcher( reader, IndexSearcher.getDefaultSimilarity(), @@ -1225,6 +1249,12 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); + if (executor != null) { + when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); + } else { + when(searchContext.getTargetMaxSliceCount()).thenThrow(IllegalStateException.class); + } return new ContextIndexSearcher( reader, IndexSearcher.getDefaultSimilarity(), diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 6d30d7993c850..6af04e15acef0 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -9,6 +9,7 @@ package org.opensearch.search.query; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; @@ -24,7 +25,6 @@ import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldComparator; @@ -35,19 +35,21 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.opensearch.index.mapper.NumberFieldMapper.NumberType; import org.opensearch.index.query.ParsedQuery; @@ -86,14 +88,14 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.hamcrest.Matchers.hasSize; public class QueryProfilePhaseTests extends IndexShardTestCase { private IndexShard indexShard; @@ -335,8 +337,21 @@ public void testMinScoreDisablesCountOptimization() throws Exception { assertEquals(TotalHits.Relation.EQUAL_TO, context.queryResult().topDocs().topDocs.totalHits.relation); assertProfileData(context, "MatchAllDocsQuery", query -> { assertThat(query.getTimeBreakdown().keySet(), not(empty())); - assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(1L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + if (executor != null) { + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + assertThat(maxScore, greaterThanOrEqualTo(1L)); + assertThat(minScore, greaterThanOrEqualTo(1L)); + assertThat(avgScore, greaterThanOrEqualTo(1L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -476,6 +491,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -546,6 +569,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -584,6 +615,24 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -706,15 +755,51 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThanOrEqualTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(0L)); + if (executor != null) { + long maxScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score"); + long minScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score"); + long avgScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThanOrEqualTo(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThanOrEqualTo(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); + } assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThanOrEqualTo(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(0L)); + if (executor != null) { + long maxScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score"); + long minScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score"); + long avgScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThanOrEqualTo(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThanOrEqualTo(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); + } }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); assertThat(collector.getTime(), greaterThan(0L)); @@ -1002,7 +1087,7 @@ public void testIndexSortScrollOptimization() throws Exception { @SuppressWarnings("unchecked") FieldComparator<Object> comparator = (FieldComparator<Object>) searchSortAndFormat.sort.getSort()[i].getComparator( i, - randomBoolean() + randomFrom(Pruning.values()) ); int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); if (cmp == 0) { @@ -1053,6 +1138,14 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -1132,6 +1225,14 @@ public void testMinScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(10L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1209,6 +1310,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1244,6 +1353,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1314,6 +1431,24 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThan(0L)); + assertThat(avgScore, greaterThan(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThan(0L)); + assertThat(avgScoreCount, greaterThan(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); @@ -1341,6 +1476,24 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThan(0L)); + assertThat(avgScore, greaterThan(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThan(0L)); + assertThat(avgScoreCount, greaterThan(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); diff --git a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java index 912929db10a5b..41e4e1ae45a73 100644 --- a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java @@ -40,10 +40,10 @@ import org.opensearch.action.OriginalIndicesTests; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchModule; diff --git a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java index e1002e114822e..55c50b8cf854d 100644 --- a/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java +++ b/server/src/test/java/org/opensearch/search/query/ScriptScoreQueryTests.java @@ -39,9 +39,14 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.opensearch.Version; @@ -49,6 +54,7 @@ import org.opensearch.common.lucene.search.function.ScriptScoreQuery; import org.opensearch.script.ScoreScript; import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; @@ -56,6 +62,8 @@ import org.junit.Before; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.function.Function; import static org.hamcrest.CoreMatchers.containsString; @@ -177,6 +185,37 @@ public void testScriptScoreErrorOnNegativeScore() { assertTrue(e.getMessage().contains("Must be a non-negative score!")); } + public void testTwoPhaseIteratorDelegation() throws IOException { + Map<String, Object> params = new HashMap<>(); + String scriptSource = "doc['field'].value != null ? 2.0 : 0.0"; // Adjust based on actual field and logic + Script script = new Script(ScriptType.INLINE, "painless", scriptSource, params); + float minScore = 1.0f; // This should be below the score produced by the script for all docs + ScoreScript.LeafFactory factory = newFactory(script, false, explanation -> 2.0); + + Query subQuery = new MatchAllDocsQuery(); + ScriptScoreQuery scriptScoreQuery = new ScriptScoreQuery(subQuery, script, factory, minScore, "index", 0, Version.CURRENT); + + Weight weight = searcher.createWeight(searcher.rewrite(scriptScoreQuery), ScoreMode.COMPLETE, 1f); + + boolean foundMatchingDoc = false; + for (LeafReaderContext leafContext : searcher.getIndexReader().leaves()) { + Scorer scorer = weight.scorer(leafContext); + if (scorer != null) { + TwoPhaseIterator twoPhaseIterator = scorer.twoPhaseIterator(); + assertNotNull("TwoPhaseIterator should not be null", twoPhaseIterator); + DocIdSetIterator docIdSetIterator = twoPhaseIterator.approximation(); + int docId; + while ((docId = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + if (twoPhaseIterator.matches()) { + foundMatchingDoc = true; + break; + } + } + } + } + assertTrue("Expected to find at least one matching document", foundMatchingDoc); + } + private ScoreScript.LeafFactory newFactory( Script script, boolean needsScore, @@ -184,6 +223,7 @@ private ScoreScript.LeafFactory newFactory( ) { SearchLookup lookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + IndexSearcher indexSearcher = mock(IndexSearcher.class); when(lookup.getLeafSearchLookup(any())).thenReturn(leafLookup); return new ScoreScript.LeafFactory() { @Override @@ -193,7 +233,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { - return new ScoreScript(script.getParams(), lookup, leafReaderContext) { + return new ScoreScript(script.getParams(), lookup, indexSearcher, leafReaderContext) { @Override public double execute(ExplanationHolder explanation) { return function.apply(explanation); @@ -202,5 +242,4 @@ public double execute(ExplanationHolder explanation) { } }; } - } diff --git a/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java index aa7bcf4d63e3d..a71c18aa2266b 100644 --- a/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/rescore/QueryRescorerBuilderTests.java @@ -36,19 +36,19 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.MappedFieldType; @@ -61,8 +61,8 @@ import org.opensearch.index.query.Rewriteable; import org.opensearch.search.SearchModule; import org.opensearch.search.rescore.QueryRescorer.QueryRescoreContext; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -130,7 +130,7 @@ private RescorerBuilder<?> copy(RescorerBuilder<?> original) throws IOException public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { RescorerBuilder<?> rescoreBuilder = randomRescoreBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/scroll/RestClearScrollActionTests.java b/server/src/test/java/org/opensearch/search/scroll/RestClearScrollActionTests.java index 8625c0004c66e..748bbb81b0513 100644 --- a/server/src/test/java/org/opensearch/search/scroll/RestClearScrollActionTests.java +++ b/server/src/test/java/org/opensearch/search/scroll/RestClearScrollActionTests.java @@ -32,13 +32,13 @@ package org.opensearch.search.scroll; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.SetOnce; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.test.OpenSearchTestCase; @@ -57,7 +57,7 @@ public void testParseClearScrollRequestWithInvalidJsonThrowsException() throws E RestClearScrollAction action = new RestClearScrollAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{invalid_json}"), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); @@ -76,7 +76,7 @@ public void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollRe RestClearScrollAction action = new RestClearScrollAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( Collections.singletonMap("scroll_id", "QUERY_STRING") - ).withContent(new BytesArray("{\"scroll_id\": [\"BODY\"]}"), XContentType.JSON).build(); + ).withContent(new BytesArray("{\"scroll_id\": [\"BODY\"]}"), MediaTypeRegistry.JSON).build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); action.handleRequest(request, channel, nodeClient); diff --git a/server/src/test/java/org/opensearch/search/scroll/RestSearchScrollActionTests.java b/server/src/test/java/org/opensearch/search/scroll/RestSearchScrollActionTests.java index 591f9e5564467..3c73e6a865409 100644 --- a/server/src/test/java/org/opensearch/search/scroll/RestSearchScrollActionTests.java +++ b/server/src/test/java/org/opensearch/search/scroll/RestSearchScrollActionTests.java @@ -32,13 +32,13 @@ package org.opensearch.search.scroll; -import org.opensearch.action.ActionListener; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.SetOnce; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.search.RestSearchScrollAction; import org.opensearch.test.OpenSearchTestCase; @@ -57,7 +57,7 @@ public void testParseSearchScrollRequestWithInvalidJsonThrowsException() throws RestSearchScrollAction action = new RestSearchScrollAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( new BytesArray("{invalid_json}"), - XContentType.JSON + MediaTypeRegistry.JSON ).build(); Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); @@ -78,7 +78,7 @@ public void searchScroll(SearchScrollRequest request, ActionListener<SearchRespo params.put("scroll_id", "QUERY_STRING"); params.put("scroll", "1000m"); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) - .withContent(new BytesArray("{\"scroll_id\":\"BODY\", \"scroll\":\"1m\"}"), XContentType.JSON) + .withContent(new BytesArray("{\"scroll_id\":\"BODY\", \"scroll\":\"1m\"}"), MediaTypeRegistry.JSON) .build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); action.handleRequest(request, channel, nodeClient); diff --git a/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java b/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java index 47579e31dc7c2..53c07674f5bd7 100644 --- a/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java @@ -34,19 +34,21 @@ import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.text.Text; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; @@ -190,7 +192,7 @@ public void testEqualsAndHashcode() throws Exception { public void testFromXContent() throws Exception { for (int runs = 0; runs < 20; runs++) { SearchAfterBuilder searchAfterBuilder = randomJsonSearchFromBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -213,10 +215,10 @@ public void testFromXContentIllegalType() throws Exception { for (XContentType type : XContentType.values()) { // BIG_DECIMAL // ignore json and yaml, they parse floating point numbers as floats/doubles - if (type == XContentType.JSON || type == XContentType.YAML) { + if (type == MediaTypeRegistry.JSON || type == XContentType.YAML) { continue; } - XContentBuilder xContent = XContentFactory.contentBuilder(type); + XContentBuilder xContent = MediaTypeRegistry.contentBuilder(type); xContent.startObject().startArray("search_after").value(new BigDecimal("9223372036854776003.3")).endArray().endObject(); try (XContentParser parser = createParser(xContent)) { parser.nextToken(); @@ -278,7 +280,7 @@ public SortField.Type reducedType() { } @Override - public FieldComparator<?> newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator<?> newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { return null; } diff --git a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java index 4d2a441a180a6..70c5e0a4a8237 100644 --- a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java +++ b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java @@ -38,14 +38,14 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.util.NumericUtils; import org.opensearch.common.UUIDs; import org.opensearch.common.util.BitMixer; diff --git a/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java index 28d96c916bc41..2761efc00f4fe 100644 --- a/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/slice/SliceBuilderTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.slice; -import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; @@ -42,6 +41,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.SearchRequest; @@ -54,11 +54,12 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.IndexSettings; @@ -67,7 +68,6 @@ import org.opensearch.index.mapper.TextSearchInfo; import org.opensearch.index.mapper.ValueFetcher; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.test.OpenSearchTestCase; @@ -208,7 +208,7 @@ public void testEqualsAndHashcode() throws Exception { public void testFromXContent() throws Exception { SliceBuilder sliceBuilder = randomSliceBuilder(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java b/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java index 8efa4eeef80dd..c7fb94edd9af0 100644 --- a/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java +++ b/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java @@ -38,14 +38,14 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.QueryUtils; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; import org.opensearch.common.UUIDs; diff --git a/server/src/test/java/org/opensearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/opensearch/search/sort/AbstractSortTestCase.java index e8d713d15b337..257ff1015e3b4 100644 --- a/server/src/test/java/org/opensearch/search/sort/AbstractSortTestCase.java +++ b/server/src/test/java/org/opensearch/search/sort/AbstractSortTestCase.java @@ -37,17 +37,17 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.TriFunction; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.env.Environment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.fielddata.IndexFieldData; @@ -71,11 +71,10 @@ import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchModule; import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.mockito.Mockito; import java.io.IOException; import java.util.Collections; @@ -83,6 +82,8 @@ import java.util.function.Function; import java.util.function.Supplier; +import org.mockito.Mockito; + import static java.util.Collections.emptyList; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; @@ -131,7 +132,7 @@ public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { T testItem = createTestItem(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/sort/BucketedSortTestCase.java b/server/src/test/java/org/opensearch/search/sort/BucketedSortTestCase.java index 98c04eaf9415a..649c3f06bbf7c 100644 --- a/server/src/test/java/org/opensearch/search/sort/BucketedSortTestCase.java +++ b/server/src/test/java/org/opensearch/search/sort/BucketedSortTestCase.java @@ -33,14 +33,14 @@ package org.opensearch.search.sort; import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.BitArray; import org.opensearch.common.util.IntArray; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.common.lease.Releasable; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java index bf3cde7ec6f44..9b8cd1b5f1ce0 100644 --- a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java @@ -42,11 +42,9 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint; -import org.apache.lucene.tests.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSelector; @@ -55,10 +53,12 @@ import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.AssertingIndexSearcher; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.mapper.DateFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index a82e658f69856..385ced3655116 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -41,9 +41,9 @@ import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.mapper.GeoPointFieldMapper; diff --git a/server/src/test/java/org/opensearch/search/sort/NestedSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/NestedSortBuilderTests.java index bf37e822bfc3c..4b0b2514dfc3d 100644 --- a/server/src/test/java/org/opensearch/search/sort/NestedSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/NestedSortBuilderTests.java @@ -32,28 +32,29 @@ package org.opensearch.search.sort; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.search.SearchModule; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.mockito.Mockito; import java.io.IOException; +import org.mockito.Mockito; + import static java.util.Collections.emptyList; public class NestedSortBuilderTests extends OpenSearchTestCase { @@ -83,7 +84,7 @@ protected NamedXContentRegistry xContentRegistry() { public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { NestedSortBuilder testItem = createRandomNestedSort(3); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); testItem.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentBuilder shuffled = shuffleXContent(builder); try (XContentParser parser = createParser(shuffled)) { diff --git a/server/src/test/java/org/opensearch/search/sort/ScoreSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/ScoreSortBuilderTests.java index c180034e69407..eefe3157fe366 100644 --- a/server/src/test/java/org/opensearch/search/sort/ScoreSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/ScoreSortBuilderTests.java @@ -33,8 +33,8 @@ package org.opensearch.search.sort; import org.apache.lucene.search.SortField; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.DocValueFormat; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java index 35139c0307f90..a124fdfeeb508 100644 --- a/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/ScriptSortBuilderTests.java @@ -36,9 +36,9 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource; import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; diff --git a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java index 4c1ea3540e299..63db202be0c31 100644 --- a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java @@ -32,15 +32,14 @@ package org.opensearch.search.sort; -import org.opensearch.common.Strings; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.SearchModule; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; @@ -195,7 +194,7 @@ public void testRandomSortBuilders() throws IOException { xContentBuilder.endArray(); } xContentBuilder.endObject(); - List<SortBuilder<?>> parsedSort = parseSort(Strings.toString(xContentBuilder)); + List<SortBuilder<?>> parsedSort = parseSort(xContentBuilder.toString()); assertEquals(testBuilders.size(), parsedSort.size()); Iterator<SortBuilder<?>> iterator = testBuilders.iterator(); for (SortBuilder<?> parsedBuilder : parsedSort) { diff --git a/server/src/test/java/org/opensearch/search/sort/SortValueTests.java b/server/src/test/java/org/opensearch/search/sort/SortValueTests.java index e11a8064f7a85..91be089526485 100644 --- a/server/src/test/java/org/opensearch/search/sort/SortValueTests.java +++ b/server/src/test/java/org/opensearch/search/sort/SortValueTests.java @@ -32,10 +32,10 @@ package org.opensearch.search.sort; -import org.opensearch.common.Strings; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.time.DateFormatter; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.DateFieldMapper; @@ -120,7 +120,7 @@ public void testCompareLongs() { } public String toXContent(SortValue sortValue, DocValueFormat format) { - return Strings.toString(XContentType.JSON, new ToXContentFragment() { + return Strings.toString(MediaTypeRegistry.JSON, new ToXContentFragment() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("test"); diff --git a/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java index 8c5736fefb9ba..c3d790cb99cbd 100644 --- a/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java +++ b/server/src/test/java/org/opensearch/search/sort/plugin/CustomSortBuilder.java @@ -8,8 +8,6 @@ package org.opensearch.search.sort.plugin; -import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; - import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -28,6 +26,8 @@ import java.io.IOException; import java.util.Objects; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; + /** * Custom sort builder that just rewrites to a basic field sort */ diff --git a/server/src/test/java/org/opensearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/server/src/test/java/org/opensearch/search/suggest/AbstractSuggestionBuilderTestCase.java index eee13ec56d841..f3f1ef6d39fff 100644 --- a/server/src/test/java/org/opensearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -35,17 +35,17 @@ import org.apache.lucene.analysis.core.SimpleAnalyzer; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -59,8 +59,8 @@ import org.opensearch.script.ScriptService; import org.opensearch.search.SearchModule; import org.opensearch.search.suggest.SuggestionSearchContext.SuggestionContext; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -146,7 +146,7 @@ public void testEqualsAndHashcode() { public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB suggestionBuilder = randomTestBuilder(); - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { xContentBuilder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/suggest/CompletionSuggestionOptionTests.java b/server/src/test/java/org/opensearch/search/suggest/CompletionSuggestionOptionTests.java index d312299b924af..0f1ea9db0a95a 100644 --- a/server/src/test/java/org/opensearch/search/suggest/CompletionSuggestionOptionTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/CompletionSuggestionOptionTests.java @@ -32,11 +32,12 @@ package org.opensearch.search.suggest; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHitTests; import org.opensearch.search.suggest.completion.CompletionSuggestion; @@ -51,7 +52,7 @@ import java.util.Set; import java.util.function.Predicate; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -126,7 +127,7 @@ private void doTestFromXContent(boolean addRandomFields) throws IOException { public void testToXContent() throws IOException { Map<String, Set<String>> contexts = Collections.singletonMap("key", Collections.singleton("value")); CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts); - BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(option, MediaTypeRegistry.JSON, randomBoolean()); assertEquals("{\"text\":\"someText\",\"score\":1.3,\"contexts\":{\"key\":[\"value\"]}}", xContent.utf8ToString()); } } diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java index f8996c528cf32..49f2b7869010b 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestBuilderTests.java @@ -32,20 +32,20 @@ package org.opensearch.search.suggest; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchModule; import org.opensearch.search.suggest.completion.CompletionSuggesterBuilderTests; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilderTests; import org.opensearch.search.suggest.term.TermSuggestionBuilderTests; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -82,7 +82,7 @@ public static void afterClass() { public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { SuggestBuilder suggestBuilder = randomSuggestBuilder(); - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { xContentBuilder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestTests.java index e046b415dbcf3..466eb33f3e6b7 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestTests.java @@ -33,20 +33,21 @@ package org.opensearch.search.suggest; import org.opensearch.Version; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.settings.Settings; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.SearchModule; import org.opensearch.search.suggest.Suggest.Suggestion; @@ -66,7 +67,7 @@ import static java.util.Collections.emptyList; import static org.opensearch.common.xcontent.XContentHelper.stripWhitespace; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.core.xcontent.XContentParserUtils.ensureFieldName; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -162,7 +163,7 @@ public void testToXContent() throws IOException { PhraseSuggestion suggestion = new PhraseSuggestion("suggestionName", 5); suggestion.addTerm(entry); Suggest suggest = new Suggest(Collections.singletonList(suggestion)); - BytesReference xContent = toXContent(suggest, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(suggest, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( stripWhitespace( "{" diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestionEntryTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestionEntryTests.java index f8cc6bb5c9f2e..09551e6453ee7 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestionEntryTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestionEntryTests.java @@ -32,11 +32,12 @@ package org.opensearch.search.suggest; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.suggest.Suggest.Suggestion.Entry; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.completion.CompletionSuggestion; @@ -52,7 +53,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -156,7 +157,7 @@ public void testToXContent() throws IOException { ); PhraseSuggestion.Entry phraseEntry = new PhraseSuggestion.Entry(new Text("entryText"), 42, 313); phraseEntry.addOption(phraseOption); - BytesReference xContent = toXContent(phraseEntry, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(phraseEntry, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( "{\"text\":\"entryText\"," + "\"offset\":42," @@ -173,7 +174,7 @@ public void testToXContent() throws IOException { TermSuggestion.Entry.Option termOption = new TermSuggestion.Entry.Option(new Text("termSuggestOption"), 42, 3.13f); TermSuggestion.Entry termEntry = new TermSuggestion.Entry(new Text("entryText"), 42, 313); termEntry.addOption(termOption); - xContent = toXContent(termEntry, XContentType.JSON, randomBoolean()); + xContent = toXContent(termEntry, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( "{\"text\":\"entryText\"," + "\"offset\":42," @@ -194,7 +195,7 @@ public void testToXContent() throws IOException { ); CompletionSuggestion.Entry completionEntry = new CompletionSuggestion.Entry(new Text("entryText"), 42, 313); completionEntry.addOption(completionOption); - xContent = toXContent(completionEntry, XContentType.JSON, randomBoolean()); + xContent = toXContent(completionEntry, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( "{\"text\":\"entryText\"," + "\"offset\":42," diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestionOptionTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestionOptionTests.java index 76d8301d6260d..39c061f2b6586 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestionOptionTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestionOptionTests.java @@ -32,18 +32,19 @@ package org.opensearch.search.suggest; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.phrase.PhraseSuggestion; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -93,7 +94,7 @@ private void doTestFromXContent(boolean addRandomFields) throws IOException { public void testToXContent() throws IOException { Option option = new PhraseSuggestion.Entry.Option(new Text("someText"), new Text("somethingHighlighted"), 1.3f, true); - BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(option, MediaTypeRegistry.JSON, randomBoolean()); assertEquals( ("{" + " \"text\": \"someText\"," diff --git a/server/src/test/java/org/opensearch/search/suggest/SuggestionTests.java b/server/src/test/java/org/opensearch/search/suggest/SuggestionTests.java index f1933ca43f2f9..c2be83b593a67 100644 --- a/server/src/test/java/org/opensearch/search/suggest/SuggestionTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/SuggestionTests.java @@ -32,16 +32,17 @@ package org.opensearch.search.suggest; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedObjectNotFoundException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.suggest.Suggest.Suggestion; import org.opensearch.search.suggest.Suggest.Suggestion.Entry; @@ -58,7 +59,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -229,7 +230,7 @@ public void testToXContent() throws IOException { entry.addOption(option); PhraseSuggestion suggestion = new PhraseSuggestion("suggestionName", 5); suggestion.addTerm(entry); - BytesReference xContent = toXContent(suggestion, XContentType.JSON, params, randomBoolean()); + BytesReference xContent = toXContent(suggestion, MediaTypeRegistry.JSON, params, randomBoolean()); assertEquals( ("{" + " \"phrase#suggestionName\": [" @@ -262,7 +263,7 @@ public void testToXContent() throws IOException { entry.addOption(option); PhraseSuggestion suggestion = new PhraseSuggestion("suggestionName", 5); suggestion.addTerm(entry); - BytesReference xContent = toXContent(suggestion, XContentType.JSON, params, randomBoolean()); + BytesReference xContent = toXContent(suggestion, MediaTypeRegistry.JSON, params, randomBoolean()); assertEquals( ("{" + " \"phrase#suggestionName\": [" @@ -290,7 +291,7 @@ public void testToXContent() throws IOException { entry.addOption(option); TermSuggestion suggestion = new TermSuggestion("suggestionName", 5, SortBy.SCORE); suggestion.addTerm(entry); - BytesReference xContent = toXContent(suggestion, XContentType.JSON, params, randomBoolean()); + BytesReference xContent = toXContent(suggestion, MediaTypeRegistry.JSON, params, randomBoolean()); assertEquals( ("{" + " \"term#suggestionName\": [" @@ -318,7 +319,7 @@ public void testToXContent() throws IOException { entry.addOption(option); CompletionSuggestion suggestion = new CompletionSuggestion("suggestionName", 5, randomBoolean()); suggestion.addTerm(entry); - BytesReference xContent = toXContent(suggestion, XContentType.JSON, params, randomBoolean()); + BytesReference xContent = toXContent(suggestion, MediaTypeRegistry.JSON, params, randomBoolean()); assertEquals( ("{" + " \"completion#suggestionName\": [" diff --git a/server/src/test/java/org/opensearch/search/suggest/TermSuggestionOptionTests.java b/server/src/test/java/org/opensearch/search/suggest/TermSuggestionOptionTests.java index b032f7729cb1d..d54ae78550b68 100644 --- a/server/src/test/java/org/opensearch/search/suggest/TermSuggestionOptionTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/TermSuggestionOptionTests.java @@ -32,17 +32,18 @@ package org.opensearch.search.suggest; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.suggest.term.TermSuggestion.Entry.Option; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -90,7 +91,7 @@ private void doTestFromXContent(boolean addRandomFields) throws IOException { public void testToXContent() throws IOException { Option option = new Option(new Text("someText"), 100, 1.3f); - BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean()); + BytesReference xContent = toXContent(option, MediaTypeRegistry.JSON, randomBoolean()); assertEquals("{\"text\":\"someText\",\"score\":1.3,\"freq\":100}", xContent.utf8ToString()); } diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java index 478e99c269f37..ff85a912552fc 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/CategoryContextMappingTests.java @@ -41,15 +41,14 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.util.BytesRef; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.CompletionFieldMapper.CompletionFieldType; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.KeywordFieldMapper; @@ -75,21 +74,20 @@ public class CategoryContextMappingTests extends OpenSearchSingleNodeTestCase { public void testIndexingWithNoContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -117,7 +115,7 @@ public void testIndexingWithNoContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -125,21 +123,20 @@ public void testIndexingWithNoContexts() throws Exception { } public void testIndexingWithSimpleContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -162,7 +159,7 @@ public void testIndexingWithSimpleContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -170,21 +167,20 @@ public void testIndexingWithSimpleContexts() throws Exception { } public void testIndexingWithSimpleNumberContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -207,7 +203,7 @@ public void testIndexingWithSimpleNumberContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -215,21 +211,20 @@ public void testIndexingWithSimpleNumberContexts() throws Exception { } public void testIndexingWithSimpleBooleanContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -252,7 +247,7 @@ public void testIndexingWithSimpleBooleanContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -260,21 +255,20 @@ public void testIndexingWithSimpleBooleanContexts() throws Exception { } public void testIndexingWithSimpleNULLContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -293,7 +287,7 @@ public void testIndexingWithSimpleNULLContexts() throws Exception { Exception e = expectThrows( MapperParsingException.class, - () -> defaultMapper.parse(new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON)) + () -> defaultMapper.parse(new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON)) ); assertEquals( "contexts must be a string, number or boolean or a list of string, number or boolean, but was [VALUE_NULL]", @@ -302,21 +296,20 @@ public void testIndexingWithSimpleNULLContexts() throws Exception { } public void testIndexingWithContextList() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -337,7 +330,7 @@ public void testIndexingWithContextList() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -345,21 +338,20 @@ public void testIndexingWithContextList() throws Exception { } public void testIndexingWithMixedTypeContextList() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -380,7 +372,7 @@ public void testIndexingWithMixedTypeContextList() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); @@ -388,21 +380,20 @@ public void testIndexingWithMixedTypeContextList() throws Exception { } public void testIndexingWithMixedTypeContextListHavingNULL() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -419,31 +410,30 @@ public void testIndexingWithMixedTypeContextListHavingNULL() throws Exception { Exception e = expectThrows( MapperParsingException.class, - () -> defaultMapper.parse(new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON)) + () -> defaultMapper.parse(new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON)) ); assertEquals("context array must have string, number or boolean values, but was [VALUE_NULL]", e.getCause().getMessage()); } public void testIndexingWithMultipleContexts() throws Exception { - String mapping = Strings.toString( - jsonBuilder().startObject() - .startObject("properties") - .startObject("completion") - .field("type", "completion") - .startArray("contexts") - .startObject() - .field("name", "ctx") - .field("type", "category") - .endObject() - .startObject() - .field("name", "type") - .field("type", "category") - .endObject() - .endArray() - .endObject() - .endObject() - .endObject() - ); + String mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("completion") + .field("type", "completion") + .startArray("contexts") + .startObject() + .field("name", "ctx") + .field("type", "category") + .endObject() + .startObject() + .field("name", "type") + .field("type", "category") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .toString(); DocumentMapper defaultMapper = createIndex("test").mapperService() .documentMapperParser() @@ -462,7 +452,7 @@ public void testIndexingWithMultipleContexts() throws Exception { .endArray() .endObject(); ParsedDocument parsedDocument = defaultMapper.parse( - new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON) + new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(fieldMapper.name()); assertContextSuggestFields(fields, 3); diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/CompletionSuggesterBuilderTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/CompletionSuggesterBuilderTests.java index 868e60f049f68..83a3fed411601 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/CompletionSuggesterBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/CompletionSuggesterBuilderTests.java @@ -33,8 +33,8 @@ package org.opensearch.search.suggest.completion; import org.apache.lucene.analysis.core.SimpleAnalyzer; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/FuzzyOptionsTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/FuzzyOptionsTests.java index 7856c106ac24f..8179d448e2058 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/FuzzyOptionsTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/FuzzyOptionsTests.java @@ -32,8 +32,8 @@ package org.opensearch.search.suggest.completion; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java index d641090f1a5aa..07f3526dd2bb0 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/GeoContextMappingTests.java @@ -34,12 +34,12 @@ import org.apache.lucene.index.IndexableField; import org.opensearch.OpenSearchParseException; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ParsedDocument; @@ -53,8 +53,8 @@ import java.util.Collection; import java.util.List; -import static org.opensearch.geometry.utils.Geohash.addNeighborsAtLevel; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.geometry.utils.Geohash.addNeighborsAtLevel; import static org.opensearch.search.suggest.completion.CategoryContextMappingTests.assertContextSuggestFields; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.in; @@ -102,7 +102,7 @@ public void testIndexingWithNoContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); @@ -147,7 +147,7 @@ public void testIndexingWithSimpleContexts() throws Exception { .endArray() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); @@ -196,7 +196,7 @@ public void testIndexingWithContextList() throws Exception { .endObject() .endObject() ), - XContentType.JSON + MediaTypeRegistry.JSON ) ); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); @@ -237,7 +237,7 @@ public void testIndexingWithMultipleContexts() throws Exception { .endArray() .endObject(); ParsedDocument parsedDocument = mapperService.documentMapper() - .parse(new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON)); + .parse(new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/GeoQueryContextTests.java b/server/src/test/java/org/opensearch/search/suggest/completion/GeoQueryContextTests.java index 654bea6fd9b6c..f52009d36c382 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/GeoQueryContextTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/GeoQueryContextTests.java @@ -32,11 +32,11 @@ package org.opensearch.search.suggest.completion; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.suggest.completion.context.GeoQueryContext; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/search/suggest/completion/QueryContextTestCase.java b/server/src/test/java/org/opensearch/search/suggest/completion/QueryContextTestCase.java index 571746882b956..c79cdc256d055 100644 --- a/server/src/test/java/org/opensearch/search/suggest/completion/QueryContextTestCase.java +++ b/server/src/test/java/org/opensearch/search/suggest/completion/QueryContextTestCase.java @@ -32,9 +32,9 @@ package org.opensearch.search.suggest.completion; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/server/src/test/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 8484861999c3f..25ee8f95b8acb 100644 --- a/server/src/test/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -46,14 +46,14 @@ import org.apache.lucene.search.spell.SuggestMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -140,7 +140,7 @@ private static DirectCandidateGeneratorBuilder mutate(DirectCandidateGeneratorBu public void testFromXContent() throws IOException { for (int runs = 0; runs < NUMBER_OF_RUNS; runs++) { DirectCandidateGeneratorBuilder generator = randomCandidateGenerator(); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/suggest/phrase/SmoothingModelTestCase.java b/server/src/test/java/org/opensearch/search/suggest/phrase/SmoothingModelTestCase.java index a4adf03cff316..30bb48af1fd7a 100644 --- a/server/src/test/java/org/opensearch/search/suggest/phrase/SmoothingModelTestCase.java +++ b/server/src/test/java/org/opensearch/search/suggest/phrase/SmoothingModelTestCase.java @@ -43,13 +43,13 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiTerms; import org.apache.lucene.store.ByteBuffersDirectory; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.SearchModule; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; @@ -101,7 +101,7 @@ public static void afterClass() throws Exception { */ public void testFromXContent() throws IOException { SmoothingModel testModel = createTestModel(); - XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder contentBuilder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { contentBuilder.prettyPrint(); } diff --git a/server/src/test/java/org/opensearch/search/suggest/term/TermSuggestionBuilderTests.java b/server/src/test/java/org/opensearch/search/suggest/term/TermSuggestionBuilderTests.java index 11aa02d751a41..8a2f199314465 100644 --- a/server/src/test/java/org/opensearch/search/suggest/term/TermSuggestionBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/suggest/term/TermSuggestionBuilderTests.java @@ -34,8 +34,8 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.search.suggest.AbstractSuggestionBuilderTestCase; import org.opensearch.search.suggest.SortBy; import org.opensearch.search.suggest.SuggestBuilder; diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 3129dce795cf8..c5f36fcc01983 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -34,18 +34,23 @@ import org.opensearch.OpenSearchCorruptionException; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.compress.Compressor; -import org.opensearch.common.compress.CompressorFactory; -import org.opensearch.common.compress.CompressorType; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -57,10 +62,17 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.util.Arrays; +import java.nio.file.Path; import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import org.mockito.ArgumentCaptor; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class BlobStoreFormatTests extends OpenSearchTestCase { @@ -115,18 +127,96 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } } + public void testBlobStoreAsyncOperations() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat<BlobObj> checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor<WriteContext> writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(2); + + // Write blobs in different formats + checksumSMILE.writeAsync( + new BlobObj("checksum smile"), + spyContainer, + "check-smile", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + checksumSMILE.writeAsync( + new BlobObj("checksum smile compressed"), + spyContainer, + "check-smile-comp", + CompressorRegistry.getCompressor(DeflateCompressor.NAME), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + + latch.await(); + + verify(spyContainer, times(2)).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(2, writeContextArgumentCaptor.getAllValues().size()); + writeContextArgumentCaptor.getAllValues() + .forEach(writeContext -> assertEquals(WritePriority.NORMAL, writeContext.getWritePriority())); + // Assert that all checksum blobs can be read + assertEquals(checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile", xContentRegistry()).getText(), "checksum smile"); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile-comp", xContentRegistry()).getText(), + "checksum smile compressed" + ); + } + + public void testBlobStorePriorityAsyncOperation() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat<BlobObj> checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + + ArgumentCaptor<ActionListener<Void>> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor<WriteContext> writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(1); + + // Write blobs in different formats + checksumSMILE.writeAsyncWithUrgentPriority( + new BlobObj("cluster state diff"), + spyContainer, + "cluster-state-diff", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + latch.await(); + + verify(spyContainer).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(WritePriority.URGENT, writeContextArgumentCaptor.getValue().getWritePriority()); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "cluster-state-diff", xContentRegistry()).getText(), + "cluster state diff" + ); + } + public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); ChecksumBlobStoreFormat<BlobObj> checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); // Write blobs in different formats - checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile", CompressorType.NONE.compressor()); + checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile", CompressorRegistry.none()); checksumSMILE.write( new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp", - CompressorFactory.DEFLATE_COMPRESSOR + CompressorRegistry.getCompressor(DeflateCompressor.NAME) ); // Assert that all checksum blobs can be read @@ -143,8 +233,8 @@ public void testCompressionIsApplied() throws IOException { } ChecksumBlobStoreFormat<BlobObj> checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); BlobObj blobObj = new BlobObj(veryRedundantText.toString()); - checksumFormat.write(blobObj, blobContainer, "blob-comp", CompressorType.DEFLATE.compressor()); - checksumFormat.write(blobObj, blobContainer, "blob-not-comp", CompressorType.NONE.compressor()); + checksumFormat.write(blobObj, blobContainer, "blob-comp", CompressorRegistry.getCompressor(DeflateCompressor.NAME)); + checksumFormat.write(blobObj, blobContainer, "blob-not-comp", CompressorRegistry.none()); Map<String, BlobMetadata> blobs = blobContainer.listBlobsByPrefix("blob-"); assertEquals(blobs.size(), 2); assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length())); @@ -156,12 +246,7 @@ public void testBlobCorruption() throws IOException { String testString = randomAlphaOfLength(randomInt(10000)); BlobObj blobObj = new BlobObj(testString); ChecksumBlobStoreFormat<BlobObj> checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); - checksumFormat.write( - blobObj, - blobContainer, - "test-path", - randomFrom(Arrays.stream(CompressorType.values()).map(CompressorType::compressor).toArray(Compressor[]::new)) - ); + checksumFormat.write(blobObj, blobContainer, "test-path", randomFrom(CompressorRegistry.registeredCompressors().values())); assertEquals(checksumFormat.read(blobContainer, "test-path", xContentRegistry()).getText(), testString); randomCorruption(blobContainer, "test-path"); try { @@ -174,6 +259,24 @@ public void testBlobCorruption() throws IOException { } } + private ActionListener<Void> getVoidActionListener(CountDownLatch latch) { + ActionListener<Void> actionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("---> Async write succeeded"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.info("---> Failure in async write"); + throw new RuntimeException("async write should not fail"); + } + }; + + return actionListener; + } + protected BlobStore createTestBlobStore() throws IOException { return new FsBlobStore(randomIntBetween(1, 8) * 1024, createTempDir(), false); } @@ -202,4 +305,35 @@ private long checksum(byte[] buffer) throws IOException { } } } + + public static class MockFsVerifyingBlobContainer extends FsBlobContainer implements AsyncMultiStreamBlobContainer { + + private BlobContainer delegate; + + public MockFsVerifyingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) { + super(blobStore, blobPath, path); + delegate = blobStore.blobContainer(BlobPath.cleanPath()); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> completionListener) throws IOException { + InputStream inputStream = writeContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream(); + delegate.writeBlob(writeContext.getFileName(), inputStream, writeContext.getFileSize(), true); + completionListener.onResponse(null); + } + + @Override + public void readBlobAsync(String blobName, ActionListener<ReadContext> listener) { + throw new RuntimeException("read not supported"); + } + + @Override + public boolean remoteIntegrityCheckSupported() { + return false; + } + + public BlobContainer getDelegate() { + return delegate; + } + } } diff --git a/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java b/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java index 3f71a9da8354f..8550316a666e8 100644 --- a/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java +++ b/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java @@ -34,12 +34,13 @@ import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.Diff; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractDiffableSerializationTestCase; @@ -57,13 +58,18 @@ protected Custom createTestInstance() { for (int i = 0; i < numberOfRepositories; i++) { // divide by 2 to not overflow when adding to this number for the pending generation below final long generation = randomNonNegativeLong() / 2L; + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } entries.add( new RepositoryMetadata( randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), generation, - generation + randomLongBetween(0, generation) + generation + randomLongBetween(0, generation), + cryptoMetadata ) ); } @@ -81,7 +87,11 @@ protected Custom mutateInstance(Custom instance) { List<RepositoryMetadata> entries = new ArrayList<>(((RepositoriesMetadata) instance).repositories()); boolean addEntry = entries.isEmpty() ? true : randomBoolean(); if (addEntry) { - entries.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } + entries.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), cryptoMetadata)); } else { entries.remove(randomIntBetween(0, entries.size() - 1)); } @@ -114,7 +124,11 @@ protected Custom makeTestChanges(Custom testInstance) { // add some elements int addElements = randomInt(10); for (int i = 0; i < addElements; i++) { - repos.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } + repos.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), cryptoMetadata)); } } return new RepositoriesMetadata(repos); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java index a1acb75d9dd45..a00c74f669eac 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java @@ -35,9 +35,9 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index a121a190096b4..635939e68de71 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionType; import org.opensearch.action.RequestValidators; @@ -56,6 +55,9 @@ import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.TransportGetSnapshotsAction; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -88,6 +90,7 @@ import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.search.TransportSearchAction; @@ -105,12 +108,14 @@ import org.opensearch.client.AdminClient; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterInfo; +import org.opensearch.cluster.ClusterInfoService; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateListener; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.action.index.MappingUpdatedAction; @@ -126,6 +131,8 @@ import org.opensearch.cluster.coordination.ElectionStrategy; import org.opensearch.cluster.coordination.InMemoryPersistedState; import org.opensearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.AliasValidator; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -150,41 +157,47 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.cache.module.CacheModule; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.index.Index; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.MetaStateService; import org.opensearch.gateway.TransportNodesListGatewayStartedShards; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; -import org.opensearch.index.store.remote.filecache.FileCacheCleaner; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; @@ -195,6 +208,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -210,9 +224,12 @@ import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -223,6 +240,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -243,12 +261,15 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import org.mockito.Mockito; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.opensearch.action.support.ActionTestUtils.assertNoFailureListener; import static org.opensearch.env.Environment.PATH_HOME_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.node.Node.NODE_NAME_SETTING; +import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; @@ -260,6 +281,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SnapshotResiliencyTests extends OpenSearchTestCase { @@ -413,6 +435,106 @@ public void testSuccessfulSnapshotAndRestore() { assertEquals(0, snapshotInfo.failedShards()); } + public void testSearchableSnapshotOverSubscription() { + setupTestCluster(1, 2, 2); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + final int shards = randomIntBetween(1, 10); + final int documents = randomIntBetween(0, 100); + + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( + testClusterNodes.nodes.values().iterator().next().clusterService.state() + ); + + Map<String, FileCacheStats> nodeFileCacheStats = new HashMap<>(); + for (TestClusterNodes.TestClusterNode node : testClusterNodes.nodes.values()) { + nodeFileCacheStats.put(node.node.getId(), new FileCacheStats(0, 1, 0, 0, 0, 0, 0)); + } + ClusterInfo clusterInfo = new ClusterInfo(Map.of(), Map.of(), Map.of(), Map.of(), Map.of(), nodeFileCacheStats); + testClusterNodes.nodes.values().forEach(node -> when(node.getMockClusterInfoService().getClusterInfo()).thenReturn(clusterInfo)); + + final StepListener<CreateSnapshotResponse> createSnapshotResponseListener = new StepListener<>(); + + continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { + final Runnable afterIndexing = () -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .execute(createSnapshotResponseListener); + if (documents == 0) { + afterIndexing.run(); + } else { + final BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < documents; ++i) { + bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i))); + } + final StepListener<BulkResponse> bulkResponseStepListener = new StepListener<>(); + client().bulk(bulkRequest, bulkResponseStepListener); + continueOrDie(bulkResponseStepListener, bulkResponse -> { + assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); + assertEquals(documents, bulkResponse.getItems().length); + afterIndexing.run(); + }); + } + }); + + final StepListener<AcknowledgedResponse> deleteIndexListener = new StepListener<>(); + + continueOrDie( + createSnapshotResponseListener, + createSnapshotResponse -> client().admin().indices().delete(new DeleteIndexRequest(index), deleteIndexListener) + ); + + final StepListener<RestoreSnapshotResponse> restoreSnapshotResponseListener = new StepListener<>(); + continueOrDie( + deleteIndexListener, + ignored -> client().admin() + .cluster() + .restoreSnapshot( + new RestoreSnapshotRequest(repoName, snapshotName).waitForCompletion(true) + .storageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT), + restoreSnapshotResponseListener + ) + ); + + final AtomicBoolean exceptionVerified = new AtomicBoolean(); + + restoreSnapshotResponseListener.whenComplete(null, restoreSnapshotException -> { + Throwable throwable = restoreSnapshotException; + if (restoreSnapshotException instanceof RemoteTransportException) { + throwable = restoreSnapshotException.getCause(); + } + try { + assertTrue(throwable instanceof SnapshotRestoreException); + assertTrue( + throwable.getMessage() + .contains( + "Size of the indexes to be restored exceeds the file cache bounds. Increase the file cache capacity on the cluster nodes using " + + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + + " setting." + ) + ); + } catch (SnapshotRestoreException ignored) {} + exceptionVerified.set(true); + }); + + runUntil(exceptionVerified::get, TimeUnit.MINUTES.toMillis(5L)); + assertTrue(exceptionVerified.get()); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); + Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + public void testSnapshotWithNodeDisconnects() { final int dataNodes = randomIntBetween(2, 10); final int clusterManagerNodes = randomFrom(1, 3, 5); @@ -635,7 +757,6 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { ); final StepListener<CreateSnapshotResponse> createOtherSnapshotResponseStepListener = new StepListener<>(); - continueOrDie( createSnapshotResponseStepListener, createSnapshotResponse -> client().admin() @@ -643,7 +764,6 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { .prepareCreateSnapshot(repoName, "snapshot-2") .execute(createOtherSnapshotResponseStepListener) ); - final StepListener<AcknowledgedResponse> deleteSnapshotStepListener = new StepListener<>(); continueOrDie( @@ -676,7 +796,6 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds(); // We end up with two snapshots no matter if the delete worked out or not assertThat(snapshotIds, hasSize(2)); - for (SnapshotId snapshotId : snapshotIds) { final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId); assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); @@ -686,6 +805,94 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { } } + public void testTransportGetSnapshotsAction() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + final String[] snapshotsList = { "snapshot-1" }; + final String index = "index-1"; + final int shards = randomIntBetween(1, 10); + + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( + testClusterNodes.nodes.values().iterator().next().clusterService.state() + ); + + final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>(); + final String snapshot = snapshotsList[0]; + continueOrDie( + createRepoAndIndex(repoName, index, shards), + createSnapshotResponse -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshot) + .setWaitForCompletion(true) + .execute(createSnapshotResponseStepListener) + ); + + continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { + + TransportAction getSnapshotsAction = clusterManagerNode.actions.get(GetSnapshotsAction.INSTANCE); + TransportGetSnapshotsAction transportGetSnapshotsAction = (TransportGetSnapshotsAction) getSnapshotsAction; + GetSnapshotsRequest repoSnapshotRequest = new GetSnapshotsRequest().repository(repoName).snapshots(snapshotsList); + + transportGetSnapshotsAction.execute(null, repoSnapshotRequest, ActionListener.wrap(repoSnapshotResponse -> { + assertNotNull("Snapshot list should not be null", repoSnapshotResponse.getSnapshots()); + assertThat(repoSnapshotResponse.getSnapshots(), hasSize(1)); + List<SnapshotInfo> snapshotInfos = repoSnapshotResponse.getSnapshots(); + SnapshotInfo snapshotInfo = snapshotInfos.get(0); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(0, snapshotInfo.failedShards()); + assertEquals(snapshotInfo.snapshotId().getName(), snapshotsList[0]); + }, exception -> { throw new AssertionError(exception); })); + }); + } + + public void testTransportGetCurrentSnapshotsAction() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + final String index = "index-1"; + final String[] snapshotsList = { GetSnapshotsRequest.CURRENT_SNAPSHOT }; + final int shards = randomIntBetween(1, 10); + + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( + testClusterNodes.nodes.values().iterator().next().clusterService.state() + ); + + final StepListener<CreateSnapshotResponse> createSnapshotResponseListener = new StepListener<>(); + clusterManagerNode.clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.state().custom(SnapshotsInProgress.TYPE) != null) { + TransportAction getSnapshotsAction = clusterManagerNode.actions.get(GetSnapshotsAction.INSTANCE); + TransportGetSnapshotsAction transportGetSnapshotsAction = (TransportGetSnapshotsAction) getSnapshotsAction; + GetSnapshotsRequest repoSnapshotRequest = new GetSnapshotsRequest().repository(repoName) + .snapshots(snapshotsList) + .ignoreUnavailable(false) + .verbose(false); + transportGetSnapshotsAction.execute(null, repoSnapshotRequest, ActionListener.wrap(repoSnapshotResponse -> { + assertNotNull("Snapshot list should not be null", repoSnapshotResponse.getSnapshots()); + List<SnapshotInfo> snapshotInfos = repoSnapshotResponse.getSnapshots(); + assertThat(repoSnapshotResponse.getSnapshots(), hasSize(snapshotsList.length)); + for (SnapshotInfo snapshotInfo : snapshotInfos) { + assertEquals(SnapshotState.IN_PROGRESS, snapshotInfo.state()); + assertEquals(0, snapshotInfo.failedShards()); + assertTrue(snapshotInfo.snapshotId().getName().contains("last-snapshot")); + } + }, exception -> { throw new AssertionError(exception); })); + clusterManagerNode.clusterService.removeListener(this); + } + } + }); + continueOrDie( + createRepoAndIndex(repoName, index, shards), + createIndexResponse -> client().admin() + .cluster() + .prepareCreateSnapshot(repoName, GetSnapshotsRequest.CURRENT_SNAPSHOT) + .execute(createSnapshotResponseListener) + ); + deterministicTaskQueue.runAllRunnableTasks(); + } + public void testBulkSnapshotDeleteWithAbort() { setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); @@ -1415,6 +1622,11 @@ private void setupTestCluster(int clusterManagerNodes, int dataNodes) { startCluster(); } + private void setupTestCluster(int clusterManagerNodes, int dataNodes, int searchNodes) { + testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes, searchNodes); + startCluster(); + } + private void scheduleSoon(Runnable runnable) { deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + randomLongBetween(0, 100L), runnable); } @@ -1465,6 +1677,7 @@ private Environment createEnvironment(String nodeName) { ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.get(Settings.EMPTY) ) + .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(MappingUpdatedAction.INDICES_MAX_IN_FLIGHT_UPDATES_SETTING.getKey(), 1000) // o.w. some tests might block .build() ); @@ -1488,6 +1701,10 @@ private final class TestClusterNodes { private final Set<String> disconnectedNodes = new HashSet<>(); TestClusterNodes(int clusterManagerNodes, int dataNodes) { + this(clusterManagerNodes, dataNodes, 0); + } + + TestClusterNodes(int clusterManagerNodes, int dataNodes, int searchNodes) { for (int i = 0; i < clusterManagerNodes; ++i) { nodes.computeIfAbsent("node" + i, nodeName -> { try { @@ -1506,6 +1723,15 @@ private final class TestClusterNodes { } }); } + for (int i = 0; i < searchNodes; ++i) { + nodes.computeIfAbsent("search-node" + i, nodeName -> { + try { + return newSearchNode(nodeName); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } } public TestClusterNode nodeById(final String nodeId) { @@ -1524,6 +1750,10 @@ private TestClusterNode newDataNode(String nodeName) throws IOException { return newNode(nodeName, DiscoveryNodeRole.DATA_ROLE); } + private TestClusterNode newSearchNode(String nodeName) throws IOException { + return newNode(nodeName, DiscoveryNodeRole.SEARCH_ROLE); + } + private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws IOException { return new TestClusterNode( new DiscoveryNode( @@ -1625,7 +1855,6 @@ public TestClusterNode currentClusterManager(ClusterState state) { } private final class TestClusterNode { - private final Logger logger = LogManager.getLogger(TestClusterNode.class); private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( @@ -1667,7 +1896,12 @@ private final class TestClusterNode { private final ThreadPool threadPool; + private final ClusterInfoService clusterInfoService; + private Coordinator coordinator; + private RemoteStoreNodeService remoteStoreNodeService; + + private Map<ActionType, TransportAction> actions = new HashMap<>(); TestClusterNode(DiscoveryNode node) throws IOException { this.node = node; @@ -1757,7 +1991,7 @@ public void onFailure(final Exception e) { return actualHandler; } } - }, a -> node, null, emptySet()); + }, a -> node, null, emptySet(), NoopTracer.INSTANCE); final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); @@ -1771,6 +2005,7 @@ public void onFailure(final Exception e) { emptyMap(), threadPool ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final ActionFilters actionFilters = new ActionFilters(emptySet()); snapshotsService = new SnapshotsService( settings, @@ -1784,6 +2019,7 @@ public void onFailure(final Exception e) { final NamedXContentRegistry namedXContentRegistry = new NamedXContentRegistry(Collections.emptyList()); final ScriptService scriptService = new ScriptService(settings, emptyMap(), emptyMap()); client = new NodeClient(settings, threadPool); + clusterInfoService = Mockito.mock(ClusterInfoService.class); final SetOnce<RerouteService> rerouteServiceSetOnce = new SetOnce<>(); final SnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService( settings, @@ -1802,7 +2038,6 @@ public void onFailure(final Exception e) { final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); final SetOnce<RepositoriesService> repositoriesServiceReference = new SetOnce<>(); repositoriesServiceReference.set(repositoriesService); - FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnv, null); indicesService = new IndicesService( settings, mock(PluginsService.class), @@ -1837,7 +2072,10 @@ public void onFailure(final Exception e) { emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, - fileCacheCleaner + null, + new RemoteStoreStatsTrackerFactory(clusterService, settings), + DefaultRecoverySettings.INSTANCE, + new CacheModule(new ArrayList<>(), settings).getCacheService() ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -1888,7 +2126,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ), new GlobalCheckpointSyncAction( @@ -1902,9 +2141,9 @@ public void onFailure(final Exception e) { ), RetentionLeaseSyncer.EMPTY, SegmentReplicationCheckpointPublisher.EMPTY, - mock(RemoteRefreshSegmentPressureService.class) + mock(RemoteStoreStatsTrackerFactory.class) ); - Map<ActionType, TransportAction> actions = new HashMap<>(); + final SystemIndices systemIndices = new SystemIndices(emptyMap()); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); final MetadataCreateIndexService metadataCreateIndexService = new MetadataCreateIndexService( @@ -1951,10 +2190,12 @@ public void onFailure(final Exception e) { clusterService, mock(IndicesService.class), mock(ShardStateAction.class), + mock(SegmentReplicationStatsTracker.class), mock(ThreadPool.class) ), - mock(RemoteRefreshSegmentPressureService.class), - new SystemIndices(emptyMap()) + mock(RemoteStorePressureService.class), + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); actions.put( BulkAction.INSTANCE, @@ -1969,7 +2210,8 @@ public void onFailure(final Exception e) { scriptService, new AnalysisModule(environment, Collections.emptyList()).getAnalysisRegistry(), Collections.emptyList(), - client + client, + indicesService ), transportShardBulkAction, client, @@ -1977,7 +2219,9 @@ public void onFailure(final Exception e) { indexNameExpressionResolver, new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + mock(IndicesService.class), + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ); final RestoreService restoreService = new RestoreService( @@ -1993,8 +2237,9 @@ public void onFailure(final Exception e) { new SystemIndices(emptyMap()), null ), - clusterSettings, - shardLimitValidator + shardLimitValidator, + indicesService, + clusterInfoService::getClusterInfo ); actions.put( PutMappingAction.INSTANCE, @@ -2040,6 +2285,8 @@ public void onFailure(final Exception e) { writableRegistry(), searchService::aggReduceContextBuilder ); + SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = + new SearchRequestOperationsCompositeListenerFactory(); actions.put( SearchAction.INSTANCE, new TransportSearchAction( @@ -2064,7 +2311,10 @@ public void onFailure(final Exception e) { namedWriteableRegistry, List.of(), client - ) + ), + NoopMetricsRegistry.INSTANCE, + searchRequestOperationsCompositeListenerFactory, + NoopTracer.INSTANCE ) ); actions.put( @@ -2135,6 +2385,17 @@ public void onFailure(final Exception e) { indexNameExpressionResolver ) ); + actions.put( + GetSnapshotsAction.INSTANCE, + new TransportGetSnapshotsAction( + transportService, + clusterService, + threadPool, + repositoriesService, + actionFilters, + indexNameExpressionResolver + ) + ); actions.put( ClusterStateAction.INSTANCE, new TransportClusterStateAction( @@ -2207,6 +2468,10 @@ protected void assertSnapshotOrGenericThread() { } } + public ClusterInfoService getMockClusterInfoService() { + return clusterInfoService; + } + public void restart() { testClusterNodes.disconnectNode(this); final ClusterState oldState = this.clusterService.state(); @@ -2247,6 +2512,8 @@ public void start(ClusterState initialState) { initialState.term(), stateForNode(initialState, node) ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); coordinator = new Coordinator( node.getName(), clusterService.getSettings(), @@ -2266,7 +2533,9 @@ public void start(ClusterState initialState) { random(), rerouteService, ElectionStrategy.DEFAULT_INSTANCE, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotShardsServiceTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotShardsServiceTests.java index 63af67d96974b..21a24166a4571 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotShardsServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotShardsServiceTests.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.UUIDs; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java index e388732dbb7e2..14e711e03a345 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java @@ -45,8 +45,8 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.hamcrest.Matchers.containsInAnyOrder; public class SnapshotUtilsTests extends OpenSearchTestCase { public void testIndexNameFiltering() { diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index 5756397adce8a..f9388c9e4b86e 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -41,8 +41,8 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.support.PlainBlobMetadata; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.io.Streams; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -90,7 +90,7 @@ public MockEventuallyConsistentRepository( final Context context, final Random random ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.context = context; this.namedXContentRegistry = namedXContentRegistry; this.random = random; diff --git a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java index 5da0727d23fed..afaf3e4cdaf59 100644 --- a/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/opensearch/tasks/ListTasksResponseTests.java @@ -37,6 +37,10 @@ import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.core.common.Strings; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; +import org.opensearch.core.tasks.resourcetracker.TaskThreadUsage; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractXContentTestCase; diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java index 56e851871187b..bb154b95f9f01 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java @@ -8,19 +8,21 @@ package org.opensearch.tasks; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.search.SearchShardTask; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -30,12 +32,12 @@ import java.util.concurrent.Phaser; import java.util.concurrent.TimeUnit; +import static org.opensearch.tasks.TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.tasks.TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING; public class TaskCancellationMonitoringServiceTests extends OpenSearchTestCase { @@ -46,7 +48,7 @@ public class TaskCancellationMonitoringServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); diff --git a/server/src/test/java/org/opensearch/tasks/TaskIdTests.java b/server/src/test/java/org/opensearch/tasks/TaskIdTests.java index 0e77f7523cea0..d4a97cac6bd58 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskIdTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskIdTests.java @@ -33,9 +33,10 @@ package org.opensearch.tasks; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java index 6ee91cec29d49..75cf57a24a95a 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskInfoTests.java @@ -36,6 +36,10 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; +import org.opensearch.core.tasks.resourcetracker.TaskThreadUsage; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.AbstractSerializingTestCase; diff --git a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java index 31f3c0a7da094..fac5c89cdfc92 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskManagerTests.java @@ -32,13 +32,14 @@ package org.opensearch.tasks; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java index 3dcd634c234a3..45d438f8d04c9 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskResourceTrackingServiceTests.java @@ -8,16 +8,18 @@ package org.opensearch.tasks; -import org.junit.After; -import org.junit.Before; import org.opensearch.action.admin.cluster.node.tasks.TransportTasksActionTests; import org.opensearch.action.search.SearchTask; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.ThreadResourceInfo; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.util.HashMap; import java.util.List; @@ -26,8 +28,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.tasks.ResourceStats.CPU; -import static org.opensearch.tasks.ResourceStats.MEMORY; +import static org.opensearch.core.tasks.resourcetracker.ResourceStats.CPU; +import static org.opensearch.core.tasks.resourcetracker.ResourceStats.MEMORY; import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; public class TaskResourceTrackingServiceTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/tasks/TaskResultTests.java b/server/src/test/java/org/opensearch/tasks/TaskResultTests.java index f27a920d812e9..447a98b2a43f9 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskResultTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskResultTests.java @@ -33,14 +33,14 @@ package org.opensearch.tasks; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -120,7 +120,7 @@ private XContentBuilder addRandomUnknownFields(XContentBuilder builder) throws I map.put("unknown_field" + i, Collections.singletonMap("inner", randomAlphaOfLength(20))); } } - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(parser.contentType()); return xContentBuilder.map(map); } } diff --git a/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java b/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java new file mode 100644 index 0000000000000..bfa0d566aabd7 --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +public class TaskThreadContextStatePropagatorTests extends OpenSearchTestCase { + private final TaskThreadContextStatePropagator taskThreadContextStatePropagator = new TaskThreadContextStatePropagator(); + + public void testTransient() { + Map<String, Object> transientHeader = new HashMap<>(); + transientHeader.put(TASK_ID, "t_1"); + Map<String, Object> transientPropagatedHeader = taskThreadContextStatePropagator.transients(transientHeader, false); + assertEquals("t_1", transientPropagatedHeader.get(TASK_ID)); + } + + public void testTransientForSystemContext() { + Map<String, Object> transientHeader = new HashMap<>(); + transientHeader.put(TASK_ID, "t_1"); + Map<String, Object> transientPropagatedHeader = taskThreadContextStatePropagator.transients(transientHeader, true); + assertEquals("t_1", transientPropagatedHeader.get(TASK_ID)); + } +} diff --git a/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java index 641fdef4891bd..099f70a3b14d3 100644 --- a/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java +++ b/server/src/test/java/org/opensearch/tasks/consumer/SearchShardTaskDetailsLogMessageTests.java @@ -9,9 +9,9 @@ package org.opensearch.tasks.consumer; import org.opensearch.action.search.SearchShardTask; -import org.opensearch.tasks.ResourceStats; -import org.opensearch.tasks.ResourceStatsType; -import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchSingleNodeTestCase; diff --git a/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java index 73f9f7c0e08cc..68dd3327cb52c 100644 --- a/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java +++ b/server/src/test/java/org/opensearch/tasks/consumer/TopNSearchTasksLoggerTests.java @@ -12,25 +12,25 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.logging.Loggers; import org.opensearch.common.logging.MockAppender; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.ResourceStats; -import org.opensearch.tasks.ResourceStatsType; -import org.opensearch.tasks.ResourceUsageMetric; +import org.opensearch.core.tasks.resourcetracker.ResourceStats; +import org.opensearch.core.tasks.resourcetracker.ResourceStatsType; +import org.opensearch.core.tasks.resourcetracker.ResourceUsageMetric; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.Collections; -import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_SIZE_SETTING; import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_FREQUENCY_SETTING; +import static org.opensearch.tasks.consumer.TopNSearchTasksLogger.LOG_TOP_QUERIES_SIZE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class TopNSearchTasksLoggerTests extends OpenSearchSingleNodeTestCase { diff --git a/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java new file mode 100644 index 0000000000000..4c96f79b30d55 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TelemetrySettingsTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class TelemetrySettingsTests extends OpenSearchTestCase { + + public void testSetTracingEnabledOrDisabled() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validation for tracingEnabled as true + telemetrySettings.setTracingEnabled(true); + assertTrue(telemetrySettings.isTracingEnabled()); + + // Validation for tracingEnabled as false + telemetrySettings.setTracingEnabled(false); + assertFalse(telemetrySettings.isTracingEnabled()); + } + + public void testSetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default sample rate i.e 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 100% request + telemetrySettings.setSamplingProbability(1.00); + assertEquals(1.00, telemetrySettings.getSamplingProbability(), 0.00d); + + // Validating override for sampling for 50% request + telemetrySettings.setSamplingProbability(0.50); + assertEquals(0.50, telemetrySettings.getSamplingProbability(), 0.00d); + } + + public void testGetSamplingProbability() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)); + TelemetrySettings telemetrySettings = new TelemetrySettings(Settings.EMPTY, clusterSettings); + + // Validating default value of Sampling is 1% + assertEquals(0.01, telemetrySettings.getSamplingProbability(), 0.00d); + + clusterSettings.applySettings(Settings.builder().put("telemetry.tracer.sampler.probability", "0.02").build()); + + // Validating if default sampling is updated to 2% + assertEquals(0.02, telemetrySettings.getSamplingProbability(), 0.00d); + } + +} diff --git a/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java new file mode 100644 index 0000000000000..80942123fd4fd --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MetricsRegistryFactoryTests extends OpenSearchTestCase { + + private MetricsRegistryFactory metricsRegistryFactory; + + @After + public void close() { + metricsRegistryFactory.close(); + } + + public void testGetMeterRegistryWithUnavailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.empty()); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(metricsRegistry.createCounter("test", "test", "test") == NoopCounter.INSTANCE); + assertTrue(metricsRegistry.createUpDownCounter("test", "test", "test") == NoopCounter.INSTANCE); + } + + public void testGetMetricsWithAvailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(mock(MetricsTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof DefaultMetricsRegistry); + + } + + public void testNullMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(null); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + + } + + private Set<Setting<?>> getClusterSettings() { + Set<Setting<?>> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + return allTracerSettings; + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java new file mode 100644 index 0000000000000..4b763e4bd4454 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -0,0 +1,206 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.http.HttpResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +public class SpanBuilderTests extends OpenSearchTestCase { + + public String uri; + + public String expectedSpanName; + + public String expectedQueryParams; + + public String expectedReqRawPath; + + @ParametersFactory + public static Collection<Object[]> data() { + return Arrays.asList( + new Object[][] { + { "/_test/resource?name=John&age=25", "GET /_test/resource", "name=John&age=25", "/_test/resource" }, + { "/_test/", "GET /_test/", "", "/_test/" }, } + ); + } + + public SpanBuilderTests(String uri, String expectedSpanName, String expectedQueryParams, String expectedReqRawPath) { + this.uri = uri; + this.expectedSpanName = expectedSpanName; + this.expectedQueryParams = expectedQueryParams; + this.expectedReqRawPath = expectedReqRawPath; + } + + public void testHttpRequestContext() { + HttpRequest httpRequest = createHttpRequest(uri); + SpanCreationContext context = SpanBuilder.from(httpRequest); + Attributes attributes = context.getAttributes(); + assertEquals(expectedSpanName, context.getSpanName()); + assertEquals("true", attributes.getAttributesMap().get(AttributeNames.TRACE)); + assertEquals("GET", attributes.getAttributesMap().get(AttributeNames.HTTP_METHOD)); + assertEquals("HTTP_1_0", attributes.getAttributesMap().get(AttributeNames.HTTP_PROTOCOL_VERSION)); + assertEquals(uri, attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } + } + + public void testRestRequestContext() { + RestRequest restRequest = RestRequest.request(null, createHttpRequest(uri), null); + SpanCreationContext context = SpanBuilder.from(restRequest); + Attributes attributes = context.getAttributes(); + assertEquals(expectedSpanName, context.getSpanName()); + assertEquals(expectedReqRawPath, attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); + assertNotNull(attributes.getAttributesMap().get(AttributeNames.REST_REQ_ID)); + if (expectedQueryParams.isBlank()) { + assertNull(attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } else { + assertEquals(expectedQueryParams, attributes.getAttributesMap().get(AttributeNames.HTTP_REQ_QUERY_PARAMS)); + } + } + + public void testRestRequestContextForNull() { + SpanCreationContext context = SpanBuilder.from((RestRequest) null); + assertEquals("rest_request", context.getSpanName()); + assertEquals(Attributes.EMPTY, context.getAttributes()); + } + + public void testTransportContext() { + String action = "test-action"; + Transport.Connection connection = createTransportConnection(); + SpanCreationContext context = SpanBuilder.from(action, connection); + Attributes attributes = context.getAttributes(); + assertEquals(action + " " + NetworkAddress.format(TransportAddress.META_ADDRESS), context.getSpanName()); + assertEquals(connection.getNode().getHostAddress(), attributes.getAttributesMap().get(AttributeNames.TRANSPORT_TARGET_HOST)); + } + + public void testParentSpan() { + String spanName = "test-name"; + SpanContext parentSpanContext = new SpanContext(NoopSpan.INSTANCE); + SpanCreationContext context = SpanBuilder.from(spanName, parentSpanContext); + Attributes attributes = context.getAttributes(); + assertNull(attributes); + assertEquals(spanName, context.getSpanName()); + assertEquals(parentSpanContext, context.getParent()); + } + + private static Transport.Connection createTransportConnection() { + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return new DiscoveryNode("local", new TransportAddress(TransportAddress.META_ADDRESS, 9200), Version.V_2_0_0); + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + + } + + @Override + public void addCloseListener(ActionListener<Void> listener) { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() { + + } + }; + } + + private static HttpRequest createHttpRequest(String uri) { + return new HttpRequest() { + @Override + public RestRequest.Method method() { + return RestRequest.Method.GET; + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + return null; + } + + @Override + public Map<String, List<String>> getHeaders() { + return Map.of("trace", Arrays.asList("true")); + } + + @Override + public List<String> strictCookies() { + return null; + } + + @Override + public HttpVersion protocolVersion() { + return HttpVersion.HTTP_1_0; + } + + @Override + public HttpRequest removeHeader(String header) { + return null; + } + + @Override + public HttpResponse createResponse(RestStatus status, BytesReference content) { + return null; + } + + @Override + public Exception getInboundException() { + return null; + } + + @Override + public void release() { + + } + + @Override + public HttpRequest releaseAndCopy() { + return null; + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java new file mode 100644 index 0000000000000..bf11bcaf39a96 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java @@ -0,0 +1,271 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContext.StoredContext; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import org.junit.After; +import org.junit.Before; + +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; + +public class ThreadContextBasedTracerContextStorageTests extends OpenSearchTestCase { + private Tracer tracer; + private ThreadContext threadContext; + private TracerContextStorage<String, Span> threadContextStorage; + private ExecutorService executorService; + + @SuppressWarnings("resource") + @Before + public void setUp() throws Exception { + super.setUp(); + + final Settings settings = Settings.builder() + .put(TRACER_ENABLED_SETTING.getKey(), true) + .put(TRACER_SAMPLER_PROBABILITY.getKey(), 1d) + .put(TRACER_FEATURE_ENABLED_SETTING.getKey(), true) + .build(); + + final TelemetrySettings telemetrySettings = new TelemetrySettings( + settings, + new ClusterSettings(Settings.EMPTY, Set.of(TRACER_ENABLED_SETTING, TRACER_SAMPLER_PROBABILITY)) + ); + + final TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + + threadContext = new ThreadContext(Settings.EMPTY); + threadContextStorage = new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry); + + tracer = new TracerFactory(telemetrySettings, Optional.of(new Telemetry() { + @Override + public MetricsTelemetry getMetricsTelemetry() { + return null; + } + + @Override + public TracingTelemetry getTracingTelemetry() { + return tracingTelemetry; + } + }), threadContext) { + @Override + protected TracerContextStorage<String, Span> createTracerContextStorage( + TracingTelemetry tracingTelemetry, + ThreadContext threadContext + ) { + return threadContextStorage; + } + }.getTracer(); + + executorService = Executors.newSingleThreadExecutor(); + assertThat(tracer, not(instanceOf(NoopTracer.class))); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + executorService.shutdown(); + tracer.close(); + } + + public void testStartingSpanDoesNotChangeThreadContext() { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testSpanInScopeChangesThreadContext() { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testStashingPropagatesThreadContext() { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + } + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testPreservingContextThreadContext() throws InterruptedException, ExecutionException, TimeoutException { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + final Runnable r = new Runnable() { + @Override + public void run() { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + } + }; + + executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS); + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testNoThreadContextToPreserve() throws InterruptedException, ExecutionException, TimeoutException { + final Runnable r = new Runnable() { + @Override + public void run() { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + + final Span local1 = tracer.startSpan(SpanCreationContext.internal().name("test-local-1")); + try (SpanScope localScope = tracer.withSpanInScope(local1)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local1.getParentSpan(), is(nullValue())); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local1)); + } + } + + final Span local2 = tracer.startSpan(SpanCreationContext.internal().name("test-local-2")); + try (SpanScope localScope = tracer.withSpanInScope(local2)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local2.getParentSpan(), is(nullValue())); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local2)); + } + } + + final Span local3 = tracer.startSpan(SpanCreationContext.internal().name("test-local-3")); + try (SpanScope localScope = tracer.withSpanInScope(local3)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local3.getParentSpan(), is(nullValue())); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local3)); + } + } + } + }; + + executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS); + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testPreservingContextThreadContextMultipleSpans() throws InterruptedException, ExecutionException, TimeoutException { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + final Runnable r = new Runnable() { + @Override + public void run() { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + + final Span local1 = tracer.startSpan(SpanCreationContext.internal().name("test-local-1")); + try (SpanScope localScope = tracer.withSpanInScope(local1)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local1.getParentSpan(), is(span)); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local1)); + } + } + + final Span local2 = tracer.startSpan(SpanCreationContext.internal().name("test-local-2")); + try (SpanScope localScope = tracer.withSpanInScope(local2)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local2.getParentSpan(), is(span)); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local2)); + } + } + + final Span local3 = tracer.startSpan(SpanCreationContext.internal().name("test-local-3")); + try (SpanScope localScope = tracer.withSpanInScope(local3)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(local3.getParentSpan(), is(span)); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local3)); + } + } + } + }; + + executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS); + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testPreservingContextAndStashingThreadContext() throws InterruptedException, ExecutionException, TimeoutException { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + final Runnable r = new Runnable() { + @Override + public void run() { + final Span local = tracer.startSpan(SpanCreationContext.internal().name("test-local")); + try (SpanScope localScope = tracer.withSpanInScope(local)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat( + threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), + is(not(nullValue())) + ); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(local)); + } + } + } + }; + + executorService.submit(threadContext.preserveContext(r)).get(1, TimeUnit.SECONDS); + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + + public void testSpanNotPropagatedToChildSystemThreadContext() { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + threadContext.markAsSystemContext(); + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index 0ffccee505d43..3a388be22445e 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -8,7 +8,6 @@ package org.opensearch.telemetry.tracing; -import org.junit.After; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -16,8 +15,11 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; import java.util.HashSet; import java.util.List; @@ -46,7 +48,27 @@ public void testGetTracerWithUnavailableTracingTelemetryReturnsNoopTracer() { Tracer tracer = tracerFactory.getTracer(); assertTrue(tracer instanceof NoopTracer); - assertTrue(tracer.startSpan("foo") == SpanScope.NO_OP); + assertTrue(tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == NoopSpan.INSTANCE); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue( + tracer.withSpanInScope( + tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) + ) == SpanScope.NO_OP + ); + } + + public void testGetTracerWithUnavailableTracingTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.empty(), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + + assertTrue(tracer instanceof NoopTracer); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); } public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { @@ -61,6 +83,18 @@ public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { } + public void testNullTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(null); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + + } + private Set<Setting<?>> getClusterSettings() { Set<Setting<?>> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java index d1abc5a4d98aa..8606104d26103 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java @@ -13,6 +13,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; @@ -21,9 +22,11 @@ import java.util.List; import java.util.Set; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class WrappedTracerTests extends OpenSearchTestCase { @@ -33,9 +36,11 @@ public void testStartSpanWithTracingDisabledInvokesNoopTracer() throws Exception DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof NoopTracer); - verify(mockDefaultTracer, never()).startSpan("foo"); + assertFalse(wrappedTracer.isRecording()); + verify(mockDefaultTracer, never()).startSpan(SpanCreationContext.internal().name("foo")); } } @@ -43,12 +48,28 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracer() throws Excepti Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); + when(mockDefaultTracer.isRecording()).thenReturn(true); + try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); + + assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); + assertTrue(wrappedTracer.isRecording()); + verify(mockDefaultTracer).startSpan(eq(spanCreationContext)); + } + } + public void testStartSpanWithTracingEnabledInvokesDefaultTracerWithAttr() throws Exception { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); + Attributes attributes = Attributes.create().addAttribute("key", "value"); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); - verify(mockDefaultTracer).startSpan("foo"); + verify(mockDefaultTracer).startSpan(spanCreationContext); } } diff --git a/server/src/test/java/org/opensearch/test/NoopDiscovery.java b/server/src/test/java/org/opensearch/test/NoopDiscovery.java index c1127aed95c1c..42d3f1887ab4d 100644 --- a/server/src/test/java/org/opensearch/test/NoopDiscovery.java +++ b/server/src/test/java/org/opensearch/test/NoopDiscovery.java @@ -31,10 +31,10 @@ package org.opensearch.test; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleListener; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryStats; diff --git a/server/src/test/java/org/opensearch/test/geo/RandomShapeGenerator.java b/server/src/test/java/org/opensearch/test/geo/RandomShapeGenerator.java index 2772f9d45f618..bdce3fc62df09 100644 --- a/server/src/test/java/org/opensearch/test/geo/RandomShapeGenerator.java +++ b/server/src/test/java/org/opensearch/test/geo/RandomShapeGenerator.java @@ -33,10 +33,8 @@ package org.opensearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.opensearch.OpenSearchException; -import org.locationtech.jts.algorithm.ConvexHull; -import org.locationtech.jts.geom.Coordinate; -import org.locationtech.jts.geom.Geometry; import org.opensearch.common.geo.builders.CoordinatesBuilder; import org.opensearch.common.geo.builders.GeometryCollectionBuilder; import org.opensearch.common.geo.builders.LineStringBuilder; @@ -47,6 +45,12 @@ import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.search.geo.GeoShapeQueryTests; import org.junit.Assert; + +import java.util.Random; + +import org.locationtech.jts.algorithm.ConvexHull; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; import org.locationtech.spatial4j.context.jts.JtsSpatialContext; import org.locationtech.spatial4j.distance.DistanceUtils; import org.locationtech.spatial4j.exception.InvalidShapeException; @@ -54,8 +58,6 @@ import org.locationtech.spatial4j.shape.Rectangle; import org.locationtech.spatial4j.shape.impl.Range; -import java.util.Random; - import static org.locationtech.spatial4j.shape.SpatialRelation.CONTAINS; /** diff --git a/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java b/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java index 76ea3e1ea236d..1470ecec67688 100644 --- a/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java +++ b/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java @@ -41,6 +41,11 @@ import org.opensearch.geometry.MultiLine; import org.hamcrest.Matcher; import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.LineString; @@ -54,10 +59,6 @@ import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; diff --git a/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 34774758dcd0e..605050fffab4e 100644 --- a/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/server/src/test/java/org/opensearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -34,7 +34,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.StringTerms; @@ -48,12 +48,12 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.OpenSearchIntegTestCase.client; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; -import static org.opensearch.search.aggregations.AggregationBuilders.terms; public class SharedSignificantTermsTestMethods { public static final String INDEX_NAME = "testidx"; @@ -99,7 +99,7 @@ public static void index01Docs(String type, String settings, OpenSearchIntegTest } assertAcked( testCase.prepareCreate(INDEX_NAME) - .setSettings(settings, XContentType.JSON) + .setSettings(settings, MediaTypeRegistry.JSON) .setMapping("text", textMappings, CLASS_FIELD, "type=keyword") ); String[] gb = { "0", "1" }; @@ -122,7 +122,7 @@ public static void index01DocsWithRouting(String type, String settings, OpenSear } assertAcked( testCase.prepareCreate(INDEX_NAME) - .setSettings(settings, XContentType.JSON) + .setSettings(settings, MediaTypeRegistry.JSON) .setMapping("text", textMappings, CLASS_FIELD, "type=keyword") ); String[] gb = { "0", "1" }; diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 92bd15d818bca..97326377ce245 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -32,10 +32,13 @@ package org.opensearch.threadpool; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -43,14 +46,29 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase { + @ParametersFactory + public static Collection<Object[]> scalingThreadPools() { + return ThreadPool.THREAD_POOL_TYPES.entrySet() + .stream() + .filter(t -> t.getValue().equals(ThreadPool.ThreadPoolType.SCALING)) + .map(e -> new String[] { e.getKey() }) + .collect(Collectors.toList()); + } + + private final String threadPoolName; + + public ScalingThreadPoolTests(String threadPoolName) { + this.threadPoolName = threadPoolName; + } + public void testScalingThreadPoolConfiguration() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); final int core; @@ -132,15 +150,15 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.SNAPSHOT, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.FETCH_SHARD_STARTED, ThreadPool::twiceAllocatedProcessors); sizes.put(ThreadPool.Names.FETCH_SHARD_STORE, ThreadPool::twiceAllocatedProcessors); - sizes.put(ThreadPool.Names.TRANSLOG_TRANSFER, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.TRANSLOG_TRANSFER, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.TRANSLOG_SYNC, n -> 4 * n); - sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessorsMaxFive); - sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); + sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); + sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors); } public void testScalingThreadPoolIsBounded() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int size = randomIntBetween(32, 512); final Settings settings = Settings.builder().put("thread_pool." + threadPoolName + ".max", size).build(); runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> { @@ -170,7 +188,6 @@ public void testScalingThreadPoolIsBounded() throws InterruptedException { } public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int min = "generic".equals(threadPoolName) ? 4 : 1; final Settings settings = Settings.builder() .put("thread_pool." + threadPoolName + ".max", 128) diff --git a/server/src/test/java/org/opensearch/threadpool/ScheduleWithFixedDelayTests.java b/server/src/test/java/org/opensearch/threadpool/ScheduleWithFixedDelayTests.java index cfa0b7738e2ac..92262b022ac07 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScheduleWithFixedDelayTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScheduleWithFixedDelayTests.java @@ -40,8 +40,8 @@ import org.opensearch.node.Node; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.Scheduler.Cancellable; -import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.threadpool.Scheduler.ReschedulingRunnable; +import org.opensearch.threadpool.ThreadPool.Names; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolSerializationTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolSerializationTests.java index 935c8f6910c21..d083546fbddbe 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolSerializationTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolSerializationTests.java @@ -32,15 +32,15 @@ package org.opensearch.threadpool; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java index e99a7aa462cbb..869d7ec59b081 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java @@ -33,11 +33,11 @@ package org.opensearch.threadpool; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -51,13 +51,13 @@ public class ThreadPoolStatsTests extends OpenSearchTestCase { public void testThreadPoolStatsSort() throws IOException { List<ThreadPoolStats.Stats> stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L, 0L)); List<ThreadPoolStats.Stats> copy = new ArrayList<>(stats); Collections.sort(copy); @@ -79,14 +79,14 @@ public void testThreadPoolStatsToXContent() throws IOException { try (BytesStreamOutput os = new BytesStreamOutput()) { List<ThreadPoolStats.Stats> stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L, -1L)); ThreadPoolStats threadPoolStats = new ThreadPoolStats(stats); - try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) { + try (XContentBuilder builder = new XContentBuilder(MediaTypeRegistry.JSON.xContent(), os)) { builder.startObject(); threadPoolStats.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java index cd23e664d1e72..658de5ec49500 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolTests.java @@ -34,8 +34,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.CountDownLatch; diff --git a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java index bf47fc2cc9b45..1d734a56ef189 100644 --- a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java @@ -33,12 +33,12 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -58,8 +58,8 @@ import java.util.concurrent.atomic.AtomicReference; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; public class ClusterConnectionManagerTests extends OpenSearchTestCase { diff --git a/server/src/test/java/org/opensearch/transport/CompressibleBytesOutputStreamTests.java b/server/src/test/java/org/opensearch/transport/CompressibleBytesOutputStreamTests.java index b9071d5851315..89018b7353e7c 100644 --- a/server/src/test/java/org/opensearch/transport/CompressibleBytesOutputStreamTests.java +++ b/server/src/test/java/org/opensearch/transport/CompressibleBytesOutputStreamTests.java @@ -32,12 +32,12 @@ package org.opensearch.transport; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.compress.CompressorFactory; import org.opensearch.core.common.io.stream.BytesStream; -import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.InputStreamStreamInput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.test.OpenSearchTestCase; import java.io.EOFException; @@ -56,7 +56,7 @@ public void testStreamWithoutCompression() throws IOException { // Closing compression stream does not close underlying stream stream.close(); - assertFalse(CompressorFactory.defaultCompressor().isCompressed(bytesRef)); + assertFalse(CompressorRegistry.defaultCompressor().isCompressed(bytesRef)); StreamInput streamInput = bytesRef.streamInput(); byte[] actualBytes = new byte[expectedBytes.length]; @@ -83,10 +83,10 @@ public void testStreamWithCompression() throws IOException { BytesReference bytesRef = stream.materializeBytes(); stream.close(); - assertTrue(CompressorFactory.defaultCompressor().isCompressed(bytesRef)); + assertTrue(CompressorRegistry.defaultCompressor().isCompressed(bytesRef)); StreamInput streamInput = new InputStreamStreamInput( - CompressorFactory.defaultCompressor().threadLocalInputStream(bytesRef.streamInput()) + CompressorRegistry.defaultCompressor().threadLocalInputStream(bytesRef.streamInput()) ); byte[] actualBytes = new byte[expectedBytes.length]; streamInput.readBytes(actualBytes, 0, expectedBytes.length); @@ -110,7 +110,7 @@ public void testCompressionWithCallingMaterializeFails() throws IOException { stream.write(expectedBytes); StreamInput streamInput = new InputStreamStreamInput( - CompressorFactory.defaultCompressor().threadLocalInputStream(bStream.bytes().streamInput()) + CompressorRegistry.defaultCompressor().threadLocalInputStream(bStream.bytes().streamInput()) ); byte[] actualBytes = new byte[expectedBytes.length]; EOFException e = expectThrows(EOFException.class, () -> streamInput.readBytes(actualBytes, 0, expectedBytes.length)); diff --git a/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java b/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java index dfb14be54a5b9..2dd98a8efe2a3 100644 --- a/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundAggregatorTests.java @@ -33,14 +33,14 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.breaker.TestCircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java index 4d8955650f8be..4d671443f396e 100644 --- a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java @@ -33,12 +33,13 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.transport.TransportMessage; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index d4959472bd08a..e002297911788 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -37,21 +37,22 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.InputStreamStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.tasks.TaskManager; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -90,7 +91,7 @@ public void setUp() throws Exception { super.setUp(); taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); channel = new FakeTcpChannel(randomBoolean(), buildNewFakeTransportAddress().address(), buildNewFakeTransportAddress().address()) { - public void sendMessage(BytesReference reference, org.opensearch.action.ActionListener<Void> listener) { + public void sendMessage(BytesReference reference, ActionListener<Void> listener) { super.sendMessage(reference, listener); if (listener != null) { listener.onResponse(null); @@ -117,7 +118,8 @@ public void sendMessage(BytesReference reference, org.opensearch.action.ActionLi handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java index 33fe928d5c09d..ae4b537223394 100644 --- a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java @@ -33,21 +33,21 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.CircuitBreakingException; -import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.breaker.TestCircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.Streams; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index c61ac51ba57d4..ff99435f765d8 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -34,21 +34,22 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.breaker.NoopCircuitBreaker; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.Streams; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java index 1451e9466778b..1c9880ed14714 100644 --- a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java @@ -41,7 +41,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -82,7 +83,7 @@ public MockTransportService startTransport(final String id, final Version versio .put("node.name", id) .put(settings) .build(); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.start(); newService.acceptIncomingRequests(); @@ -99,7 +100,14 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -138,7 +146,14 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -200,7 +215,14 @@ public void testConnectFailsWithIncompatibleNodes() { try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +262,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -303,7 +332,14 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception return address; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -338,7 +374,14 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe try (MockTransportService remoteTransport = startTransport("node1", Version.CURRENT)) { TransportAddress remoteAddress = remoteTransport.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -441,7 +484,14 @@ public void testServerNameAttributes() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT, bindSettings)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java index 8e11c53e00b35..7595982837365 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java @@ -33,7 +33,6 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -41,6 +40,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -81,7 +82,14 @@ public void testSearchShards() throws Exception { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -121,7 +129,14 @@ public void testSearchShardsThreadContextHeader() { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java index b89d652510850..f3b7f9916d460 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; @@ -79,7 +80,14 @@ public void testConnectAndExecuteRequest() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // following two log lines added to investigate #41745, can be removed once issue is closed logger.info("Start accepting incoming requests on local transport service"); @@ -118,7 +126,14 @@ public void testEnsureWeReconnect() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -147,7 +162,9 @@ public void testEnsureWeReconnect() throws Exception { public void testRemoteClusterServiceNotEnabled() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java index 4106ebd9988c9..bb653439ec21e 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.opensearch.action.admin.cluster.shards.ClusterSearchShardsRequest; @@ -51,21 +50,23 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexNotFoundException; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -126,7 +127,7 @@ public static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put(settings).put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterSearchShardsAction.NAME, @@ -231,7 +232,14 @@ public void run() { }; t.start(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); CountDownLatch listenerCalled = new CountDownLatch(1); @@ -280,7 +288,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt List<String> seedNodes = addresses(seedNode1, seedNode); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -367,7 +382,14 @@ public void testGetConnectionInfo() throws Exception { List<String> seedNodes = addresses(node3, node1, node2); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); @@ -463,14 +485,14 @@ public void testRenderConnectionInfoXContent() throws IOException { "{\"test_cluster\":{\"connected\":true,\"mode\":\"sniff\",\"seeds\":[\"seed:1\",\"seed:2\"]," + "\"num_nodes_connected\":2,\"max_connections_per_cluster\":3,\"initial_connect_timeout\":\"30m\"," + "\"skip_unavailable\":true}}", - Strings.toString(builder) + builder.toString() ); } else { assertEquals( "{\"test_cluster\":{\"connected\":true,\"mode\":\"proxy\",\"proxy_address\":\"seed:1\"," + "\"server_name\":\"the_server_name\",\"num_proxy_sockets_connected\":16,\"max_proxy_socket_connections\":18," + "\"initial_connect_timeout\":\"30m\",\"skip_unavailable\":true}}", - Strings.toString(builder) + builder.toString() ); } } @@ -480,7 +502,14 @@ public void testCollectNodes() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -515,7 +544,14 @@ public void testNoChannelsExceptREG() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -568,7 +604,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted ); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -645,7 +688,14 @@ public void testGetConnection() throws Exception { DiscoveryNode disconnectedNode = disconnectedTransport.getLocalNode(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -694,7 +744,7 @@ private static Settings buildSniffSettings(String clusterAlias, List<String> see builder.put(RemoteConnectionStrategy.REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(clusterAlias).getKey(), "sniff"); builder.put( SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(clusterAlias).getKey(), - org.opensearch.core.common.Strings.collectionToCommaDelimitedString(seedNodes) + Strings.collectionToCommaDelimitedString(seedNodes) ); return builder.build(); } diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index 52bf7aa08fe67..449715189c881 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; @@ -43,7 +42,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -162,7 +163,7 @@ public void testGroupClusterIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -233,7 +234,7 @@ public void testGroupIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -326,7 +327,7 @@ public void testIncrementallyAddClusters() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -393,7 +394,12 @@ public void testDefaultPingSchedule() throws IOException { } Settings settings = settingsBuilder.build(); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -436,7 +442,7 @@ public void testCustomPingSchedule() throws IOException { transportSettings, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -474,7 +480,7 @@ public void testChangeSettings() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -523,7 +529,12 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -586,7 +597,12 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -654,7 +670,12 @@ public void testCollectNodes() throws InterruptedException, IOException { Collections.shuffle(knownNodes_c2, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -901,7 +922,7 @@ public void testReconnectWhenStrategySettingsUpdated() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -983,7 +1004,14 @@ public void testSkipUnavailable() { knownNodes.add(seedNode); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -1002,7 +1030,9 @@ public void testSkipUnavailable() { public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( @@ -1015,7 +1045,9 @@ public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { public void testRemoteClusterServiceNotEnabledGetCollectNodes() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java index ccba66ff4c45e..c7f3944a2888b 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java @@ -32,11 +32,11 @@ package org.opensearch.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/RemoteConnectionStrategyTests.java index 3888bcbdd3c99..e2acbcff3db16 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteConnectionStrategyTests.java @@ -32,9 +32,9 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import static org.mockito.Mockito.mock; diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index 975da2de82ae6..c89a9d328b419 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -47,8 +47,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.MockTransportService; @@ -105,7 +106,7 @@ public MockTransportService startTransport( .put(settings) .build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterStateAction.NAME, @@ -143,7 +144,14 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -192,7 +200,14 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep return seedNode; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +255,14 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn knownNodes.add(discoverableNode2); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -297,7 +319,14 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -336,7 +365,14 @@ public void testConnectFailsWithIncompatibleNodes() { DiscoveryNode incompatibleSeedNode = incompatibleSeedTransport.getLocalNode(); knownNodes.add(incompatibleSeedNode); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -378,7 +414,14 @@ public void testFilterNodesWithNodePredicate() { DiscoveryNode rejectedNode = randomBoolean() ? seedNode : discoverableNode; Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -424,7 +467,14 @@ public void testConnectFailsIfNoConnectionsOpened() { knownNodes.add(discoverableNode); closedTransport.close(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -474,7 +524,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro Collections.shuffle(knownNodes, random()); Collections.shuffle(otherKnownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -542,7 +599,14 @@ public void testMultipleCallsToConnectEnsuresConnection() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -589,8 +653,18 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService accessible = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService unresponsive1 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); - MockTransportService unresponsive2 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool) + MockTransportService unresponsive1 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ); + MockTransportService unresponsive2 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { // We start in order to get a valid address + port, but do not start accepting connections as we // will not actually connect to these transports @@ -616,7 +690,14 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -679,7 +760,14 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java index 05d375579f3a5..7ab78cca7d615 100644 --- a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java @@ -38,17 +38,18 @@ import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.component.Lifecycle; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.network.NetworkService; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -255,7 +256,8 @@ private void testDefaultSeedAddresses(final Settings settings, Matcher<Iterable< new MockPageCacheRecycler(settings), new NoneCircuitBreakerService(), writableRegistry(), - new NetworkService(Collections.emptyList()) + new NetworkService(Collections.emptyList()), + NoopTracer.INSTANCE ) { @Override diff --git a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java index d1d46d5be13c0..dd2aefd2318f7 100644 --- a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java @@ -35,10 +35,12 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -84,7 +86,7 @@ public void tearDown() throws Exception { } private MockTransportService buildService(final Version version) { - MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, version, threadPool, null); + MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, version, threadPool, NoopTracer.INSTANCE); service.start(); service.acceptIncomingRequests(); return service; diff --git a/server/src/test/java/org/opensearch/transport/TransportDecompressorTests.java b/server/src/test/java/org/opensearch/transport/TransportDecompressorTests.java index 35caedae00edb..9811c0f690800 100644 --- a/server/src/test/java/org/opensearch/transport/TransportDecompressorTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportDecompressorTests.java @@ -32,17 +32,17 @@ package org.opensearch.transport; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.compress.CompressorFactory; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -54,7 +54,7 @@ public void testSimpleCompression() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { byte randomByte = randomByte(); try ( - OutputStream deflateStream = CompressorFactory.defaultCompressor() + OutputStream deflateStream = CompressorRegistry.defaultCompressor() .threadLocalOutputStream(Streams.flushOnCloseStream(output)) ) { deflateStream.write(randomByte); @@ -77,7 +77,7 @@ public void testMultiPageCompression() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { try ( StreamOutput deflateStream = new OutputStreamStreamOutput( - CompressorFactory.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(output)) + CompressorRegistry.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(output)) ) ) { for (int i = 0; i < 10000; ++i) { @@ -109,7 +109,7 @@ public void testIncrementalMultiPageCompression() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { try ( StreamOutput deflateStream = new OutputStreamStreamOutput( - CompressorFactory.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(output)) + CompressorRegistry.defaultCompressor().threadLocalOutputStream(Streams.flushOnCloseStream(output)) ) ) { for (int i = 0; i < 10000; ++i) { diff --git a/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java b/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java index 58c8806380436..b48c25faa27ce 100644 --- a/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java @@ -35,9 +35,10 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/TransportInfoTests.java b/server/src/test/java/org/opensearch/transport/TransportInfoTests.java index 402c4183fa2b8..751f4786acf55 100644 --- a/server/src/test/java/org/opensearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportInfoTests.java @@ -33,11 +33,11 @@ package org.opensearch.transport; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/transport/TransportKeepAliveTests.java b/server/src/test/java/org/opensearch/transport/TransportKeepAliveTests.java index 080b333251db2..bc8e351d75ac4 100644 --- a/server/src/test/java/org/opensearch/transport/TransportKeepAliveTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportKeepAliveTests.java @@ -32,11 +32,11 @@ package org.opensearch.transport; import org.opensearch.common.AsyncBiFunction; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -48,8 +48,8 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.same; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.same; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/server/src/test/java/org/opensearch/transport/TransportLoggerTests.java b/server/src/test/java/org/opensearch/transport/TransportLoggerTests.java index fb0c29a82ac42..05296e9308657 100644 --- a/server/src/test/java/org/opensearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportLoggerTests.java @@ -36,12 +36,12 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.stats.ClusterStatsAction; import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/transport/TransportRequestDeduplicatorTests.java b/server/src/test/java/org/opensearch/transport/TransportRequestDeduplicatorTests.java index 8b4a75a37317b..3479102e553d4 100644 --- a/server/src/test/java/org/opensearch/transport/TransportRequestDeduplicatorTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportRequestDeduplicatorTests.java @@ -31,9 +31,9 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; import org.opensearch.common.SetOnce; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.Phaser; diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java index 01d6a4d331477..d10b4f26100cc 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java @@ -37,11 +37,13 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; -import org.opensearch.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; @@ -81,7 +83,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, ignored -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.registerRequestHandler( diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index 71c69acb75896..9a884fd29d109 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -32,15 +32,18 @@ package org.opensearch.transport; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -48,8 +51,6 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.MockNioTransport; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -84,7 +85,8 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); TransportService transportService = new MockTransportService( settings, @@ -100,7 +102,8 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver version ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/watcher/FileWatcherTests.java b/server/src/test/java/org/opensearch/watcher/FileWatcherTests.java index 3e90784a15ab9..2f5787ffff29f 100644 --- a/server/src/test/java/org/opensearch/watcher/FileWatcherTests.java +++ b/server/src/test/java/org/opensearch/watcher/FileWatcherTests.java @@ -31,8 +31,8 @@ package org.opensearch.watcher; -import org.opensearch.common.util.io.IOUtils; import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.BufferedWriter; diff --git a/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy new file mode 100644 index 0000000000000..0dc754ccb0c57 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test-codebases.policy @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +//// additional test framework permissions. +//// These are mock objects and test management that we allow test framework libs +//// to provide on our behalf. But tests themselves cannot do this stuff! + +grant codeBase "${codebase.zstd-jni}" { +}; + +grant codeBase "${codebase.kafka-server-common}" { +}; + +grant codeBase "${codebase.kafka-server-common@test}" { +}; diff --git a/server/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..7b0a9b3d5d709 --- /dev/null +++ b/server/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // allow to test Security policy and codebases + permission java.util.PropertyPermission "*", "read,write"; + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; +}; diff --git a/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml b/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml index b533ae036e758..1bffbc18e83e6 100644 --- a/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml +++ b/server/src/test/resources/org/opensearch/common/settings/loader/test-settings.yml @@ -6,3 +6,4 @@ test1: test3: - test3-1 - test3-2 + test4: [] diff --git a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json index 4a4fc7d2c81b1..1ed56fa6dab4d 100644 --- a/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json +++ b/server/src/test/resources/org/opensearch/index/analysis/shingle_analysis.json @@ -20,4 +20,4 @@ } } } -} \ No newline at end of file +} diff --git a/settings.gradle b/settings.gradle index b7d47cd9b745e..8fbf32504215b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,12 +10,14 @@ */ plugins { - id "com.gradle.enterprise" version "3.13.3" + id "com.gradle.enterprise" version "3.16.2" } +ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') + buildCache { local { - enabled = true + enabled = !disableBuildCache removeUnusedEntriesAfterDays = 14 } } @@ -53,6 +55,7 @@ List projects = [ 'distribution:archives:no-jdk-linux-ppc64le-tar', 'distribution:archives:linux-tar', 'distribution:archives:no-jdk-linux-tar', + 'distribution:archives:jre-linux-tar', 'distribution:docker', 'distribution:docker:docker-arm64-build-context', 'distribution:docker:docker-arm64-export', diff --git a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java index e850ae9dcc859..5cb5cdd18f6cc 100644 --- a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java +++ b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java @@ -31,25 +31,37 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.metrics.InternalMax; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class DelayedShardAggregationIT extends OpenSearchIntegTestCase { +public class DelayedShardAggregationIT extends ParameterizedOpenSearchIntegTestCase { + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } @Override protected Collection<Class<? extends Plugin>> nodePlugins() { diff --git a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java index 773c969e2116a..006632ca93925 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java @@ -32,10 +32,10 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; @@ -137,6 +137,11 @@ protected Aggregator createInternal( } while (searchContext.getRelativeTimeInMillis() - start < delay.getMillis()); return factory.create(searchContext, parent, cardinality); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationPlugin.java b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationPlugin.java index e896a55c998a5..e2ba59a59a6e5 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationPlugin.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationPlugin.java @@ -32,12 +32,12 @@ package org.opensearch.search.aggregations; -import java.util.List; - import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; +import java.util.List; + import static java.util.Collections.singletonList; /** diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index b052988f88a2b..0ca4797bfeff1 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.51.v20230217' + 'jetty': '9.4.53.v20231009' ] dependencies { @@ -45,9 +45,16 @@ dependencies { exclude module: 'protobuf-java' exclude group: 'org.codehaus.jackson' exclude group: "org.bouncycastle" + exclude group: "com.squareup.okhttp3" + exclude group: "org.xerial.snappy" + exclude module: "json-io" + exclude module: "logback-core" + exclude module: "logback-classic" + exclude module: "avro" + exclude group: 'org.apache.kerby' } api "org.codehaus.jettison:jettison:${versions.jettison}" - api "org.apache.commons:commons-compress:1.23.0" + api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" @@ -61,9 +68,16 @@ dependencies { api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" - api 'org.apache.zookeeper:zookeeper:3.8.2' - api "org.apache.commons:commons-text:1.10.0" - api "commons-net:commons-net:3.9.0" + api 'org.apache.zookeeper:zookeeper:3.9.2' + api "org.apache.commons:commons-text:1.11.0" + api "commons-net:commons-net:3.10.0" + api "ch.qos.logback:logback-core:1.5.3" + api "ch.qos.logback:logback-classic:1.2.13" + api 'org.apache.kerby:kerb-admin:2.0.3' runtimeOnly "com.google.guava:guava:${versions.guava}" - + runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { + exclude group: "com.squareup.okio" + } + runtimeOnly "com.squareup.okio:okio:3.8.0" + runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } diff --git a/test/fixtures/minio-fixture/docker-compose.yml b/test/fixtures/minio-fixture/docker-compose.yml index e4d2faab9a657..539ca9471fa04 100644 --- a/test/fixtures/minio-fixture/docker-compose.yml +++ b/test/fixtures/minio-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3' +version: '3.2' services: minio-fixture: build: diff --git a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java index 1403bf71a7f92..52496c331c25e 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java @@ -32,10 +32,10 @@ package org.opensearch.action.support; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.common.CheckedConsumer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.tasks.Task; import static org.opensearch.action.support.PlainActionFuture.newFuture; @@ -55,7 +55,7 @@ public static <Request extends ActionRequest, Response extends ActionResponse> R /** * Executes the given action. - * + * <p> * This is a shim method to make execution publicly available in tests. */ public static <Request extends ActionRequest, Response extends ActionResponse> void execute( diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index d46fcb224e8c2..933385dedcf49 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -33,17 +33,18 @@ package org.opensearch.bootstrap; import com.carrotsearch.randomizedtesting.RandomizedRunner; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.Strings; -import org.opensearch.core.util.FileSystemUtils; import org.opensearch.common.io.PathUtils; import org.opensearch.common.network.IfConfig; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; +import org.opensearch.core.util.FileSystemUtils; import org.opensearch.mockito.plugin.PriviledgedMockMaker; import org.opensearch.plugins.PluginInfo; import org.opensearch.secure_sm.SecureSM; @@ -68,6 +69,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; @@ -82,6 +84,7 @@ * The idea is to mimic as much as possible what happens with ES in production * mode (e.g. assign permissions and install security manager the same way) */ +@SuppressWarnings("removal") public class BootstrapForTesting { // TODO: can we share more code with the non-test side here @@ -160,12 +163,17 @@ public class BootstrapForTesting { addClassCodebase(codebases, "opensearch-rest-client", "org.opensearch.client.RestClient"); } final Policy testFramework = Security.readPolicy(Bootstrap.class.getResource("test-framework.policy"), codebases); + // Allow modules to define own test policy in ad-hoc fashion (if needed) that is not really applicable to other modules + final Optional<Policy> testPolicy = Optional.ofNullable(Bootstrap.class.getResource("test.policy")) + .map(policy -> Security.readPolicy(policy, codebases)); final Policy opensearchPolicy = new OpenSearchPolicy(codebases, perms, getPluginPermissions(), true, new Permissions()); Policy.setPolicy(new Policy() { @Override public boolean implies(ProtectionDomain domain, Permission permission) { // implements union - return opensearchPolicy.implies(domain, permission) || testFramework.implies(domain, permission); + return opensearchPolicy.implies(domain, permission) + || testFramework.implies(domain, permission) + || testPolicy.map(policy -> policy.implies(domain, permission)).orElse(false /* no policy */); } }); // Create access control context for mocking diff --git a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java index 4ee1314d27fe1..5566b493adc7d 100644 --- a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java @@ -54,7 +54,7 @@ public void resetTerminal() { /** * Runs a command with the given args. - * + * <p> * Output can be found in {@link #terminal}. */ public String execute(String... args) throws Exception { diff --git a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java index eb5177bc0f39b..5a964954c4599 100644 --- a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java @@ -32,14 +32,13 @@ package org.opensearch.client; -import java.util.HashMap; -import java.util.Map; - import joptsimple.internal.Strings; - import org.apache.hc.core5.http.Header; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashMap; +import java.util.Map; + /** * A test case with access to internals of a RestClient. */ diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 6354cf18e8b62..1ad6083074025 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -112,6 +112,7 @@ List<NodeStats> adjustNodesStats(List<NodeStats> nodesStats) { nodeStats.getDiscoveryStats(), nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), + nodeStats.getResourceUsageStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats(), @@ -120,7 +121,10 @@ List<NodeStats> adjustNodesStats(List<NodeStats> nodesStats) { nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), nodeStats.getTaskCancellationStats(), - nodeStats.getSearchPipelineStats() + nodeStats.getSearchPipelineStats(), + nodeStats.getSegmentReplicationRejectionStats(), + nodeStats.getRepositoriesStats(), + nodeStats.getAdmissionControlStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java index 55f9f6947b999..0c08de252e4cd 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java @@ -14,7 +14,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.CollectionUtils; +import org.opensearch.core.common.util.CollectionUtils; import org.junit.Before; import java.util.Arrays; @@ -209,28 +209,28 @@ public int allocateAndCheckIndexShardHotSpots(boolean expected, int nodes, Strin continue; } - /** - * Hot spots can occur due to the order in which shards get allocated to nodes. - * A node with fewer shards may not be able to accept current shard due to - * SameShardAllocationDecider, causing it to breach allocation constraint on - * another node. We need to differentiate between such hot spots v/s actual hot - * spots. - * - * A simple check could be to ensure there is no node with shards less than - * allocation limit, that can accept current shard. However, in current - * allocation algorithm, when nodes get throttled, shards are added to - * ModelNodes without adding them to actual cluster (RoutingNodes). As a result, - * the shards per node we see here, are different from the ones observed by - * weight function in balancer. RoutingNodes with {@link count} < {@link limit} - * may not have had the same count in the corresponding ModelNode seen by weight - * function. We hence use the following alternate check -- - * - * Given the way {@link limit} is defined, we should not have hot spots if *all* - * nodes are eligible to accept the shard. A hot spot is acceptable, if either - * all peer nodes have {@link count} > {@link limit}, or if even one node is - * ineligible to accept the shard due to SameShardAllocationDecider, as this - * leads to a chain of events that breach IndexShardsPerNode constraint on all - * other nodes. + /* + Hot spots can occur due to the order in which shards get allocated to nodes. + A node with fewer shards may not be able to accept current shard due to + SameShardAllocationDecider, causing it to breach allocation constraint on + another node. We need to differentiate between such hot spots v/s actual hot + spots. + + A simple check could be to ensure there is no node with shards less than + allocation limit, that can accept current shard. However, in current + allocation algorithm, when nodes get throttled, shards are added to + ModelNodes without adding them to actual cluster (RoutingNodes). As a result, + the shards per node we see here, are different from the ones observed by + weight function in balancer. RoutingNodes with {@link count} < {@link limit} + may not have had the same count in the corresponding ModelNode seen by weight + function. We hence use the following alternate check -- + + Given the way {@link limit} is defined, we should not have hot spots if *all* + nodes are eligible to accept the shard. A hot spot is acceptable, if either + all peer nodes have {@link count} > {@link limit}, or if even one node is + ineligible to accept the shard due to SameShardAllocationDecider, as this + leads to a chain of events that breach IndexShardsPerNode constraint on all + other nodes. */ // If all peer nodes have count >= limit, hotspot is acceptable diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 98b4774ce1836..28d7706fb1493 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import com.carrotsearch.randomizedtesting.RandomizedContext; + import org.apache.logging.log4j.CloseableThreadContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -42,12 +43,13 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.coordination.AbstractCoordinatorTestCase.Cluster.ClusterNode; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.opensearch.cluster.coordination.LinearizabilityChecker.History; import org.opensearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -57,22 +59,24 @@ import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; -import org.opensearch.common.lease.Releasable; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.env.NodeEnvironment; @@ -80,9 +84,11 @@ import org.opensearch.gateway.GatewayService; import org.opensearch.gateway.MockGatewayMetaState; import org.opensearch.gateway.PersistedClusterStateService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -839,14 +845,16 @@ class MockPersistedState implements CoordinationState.PersistedState { private final CoordinationState.PersistedState delegate; private final NodeEnvironment nodeEnvironment; + private MockGatewayMetaState mockGatewayMetaState; + MockPersistedState(DiscoveryNode localNode) { try { if (rarely()) { nodeEnvironment = newNodeEnvironment(); nodeEnvironments.add(nodeEnvironment); - final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); - gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry()); - delegate = gatewayMetaState.getPersistedState(); + mockGatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); + mockGatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); + delegate = mockGatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; delegate = new InMemoryPersistedState( @@ -863,11 +871,12 @@ class MockPersistedState implements CoordinationState.PersistedState { MockPersistedState( DiscoveryNode newLocalNode, - MockPersistedState oldState, + PersistedStateRegistry persistedStateRegistry, Function<Metadata, Metadata> adaptGlobalMetadata, Function<Long, Long> adaptCurrentTerm ) { try { + MockPersistedState oldState = (MockPersistedState) persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); if (oldState.nodeEnvironment != null) { nodeEnvironment = oldState.nodeEnvironment; final Metadata updatedMetadata = adaptGlobalMetadata.apply(oldState.getLastAcceptedState().metadata()); @@ -889,7 +898,7 @@ class MockPersistedState implements CoordinationState.PersistedState { } } final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(newLocalNode, bigArrays); - gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry()); + gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); delegate = gatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; @@ -1007,6 +1016,11 @@ public void setLastAcceptedState(ClusterState clusterState) { delegate.setLastAcceptedState(clusterState); } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public void close() { assertTrue(openPersistedStates.remove(this)); @@ -1024,7 +1038,7 @@ class ClusterNode { private final int nodeIndex; Coordinator coordinator; private final DiscoveryNode localNode; - final MockPersistedState persistedState; + final PersistedStateRegistry persistedStateRegistry; final Settings nodeSettings; private AckedFakeThreadPoolClusterManagerService clusterManagerService; private DisruptableClusterApplierService clusterApplierService; @@ -1032,6 +1046,8 @@ class ClusterNode { TransportService transportService; private DisruptableMockTransport mockTransport; private NodeHealthService nodeHealthService; + private RepositoriesService repositoriesService; + private RemoteStoreNodeService remoteStoreNodeService; List<BiConsumer<DiscoveryNode, ClusterState>> extraJoinValidators = new ArrayList<>(); ClusterNode(int nodeIndex, boolean clusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) { @@ -1055,7 +1071,9 @@ class ClusterNode { this.nodeIndex = nodeIndex; this.localNode = localNode; this.nodeSettings = nodeSettings; - persistedState = persistedStateSupplier.apply(localNode); + final MockPersistedState persistedState = persistedStateSupplier.apply(localNode); + persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); assertTrue("must use a fresh PersistedState", openPersistedStates.add(persistedState)); boolean success = false; try { @@ -1104,7 +1122,8 @@ protected Optional<DisruptableMockTransport> getDisruptableMockTransport(Transpo getTransportInterceptor(localNode, threadPool), a -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); clusterManagerService = new AckedFakeThreadPoolClusterManagerService( localNode.getId(), @@ -1124,6 +1143,15 @@ protected Optional<DisruptableMockTransport> getDisruptableMockTransport(Transpo clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) ); + repositoriesService = new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final Collection<BiConsumer<DiscoveryNode, ClusterState>> onJoinValidators = Collections.singletonList( (dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs)) ); @@ -1143,7 +1171,9 @@ protected Optional<DisruptableMockTransport> getDisruptableMockTransport(Transpo Randomness.get(), (s, p, r) -> {}, getElectionStrategy(), - nodeHealthService + nodeHealthService, + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( @@ -1203,14 +1233,14 @@ ClusterNode restartedNode( return new ClusterNode( nodeIndex, newLocalNode, - node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetadata, adaptCurrentTerm), + node -> new MockPersistedState(newLocalNode, persistedStateRegistry, adaptGlobalMetadata, adaptCurrentTerm), nodeSettings, nodeHealthService ); } private CoordinationState.PersistedState getPersistedState() { - return persistedState; + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); } String getId() { diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index 5ef7fb192b054..cbe695cbb2136 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -48,9 +49,7 @@ import java.util.Set; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.rarely; import static java.util.stream.Collectors.toSet; -import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomBoolean; import static org.opensearch.test.OpenSearchTestCase.randomFrom; import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; @@ -58,6 +57,8 @@ import static org.opensearch.test.OpenSearchTestCase.randomLongBetween; import static org.opensearch.test.OpenSearchTestCase.randomSubsetOf; import static org.hamcrest.Matchers.hasSize; +import static com.carrotsearch.randomizedtesting.RandomizedTest.rarely; +import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.junit.Assert.assertThat; public class CoordinationStateTestCluster { @@ -128,6 +129,8 @@ static class ClusterNode { DiscoveryNode localNode; CoordinationState.PersistedState persistedState; + PersistedStateRegistry persistedStateRegistry; + CoordinationState state; ClusterNode(DiscoveryNode localNode, ElectionStrategy electionStrategy) { @@ -143,8 +146,11 @@ static class ClusterNode { 0L ) ); + persistedStateRegistry = new PersistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); + this.electionStrategy = electionStrategy; - state = new CoordinationState(localNode, persistedState, electionStrategy); + state = new CoordinationState(localNode, persistedStateRegistry, electionStrategy, Settings.EMPTY); } void reboot() { @@ -183,7 +189,7 @@ void reboot() { localNode.getVersion() ); - state = new CoordinationState(localNode, persistedState, electionStrategy); + state = new CoordinationState(localNode, persistedStateRegistry, electionStrategy, Settings.EMPTY); } void setInitialState(CoordinationMetadata.VotingConfiguration initialConfig, long initialValue) { diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java index 0c99d1f754944..1ad18bf89d5ba 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.coordination; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Settings; diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java index 3d3cff6f3be9b..946b980bfb62f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.FixedBitSet; -import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; +import org.opensearch.core.common.Strings; import java.util.ArrayList; import java.util.Collection; @@ -476,22 +476,22 @@ void unlift() { /** * A cache optimized for small bit-counts (less than 64) and small number of unique permutations of state objects. - * + * <p> * Each combination of states is kept once only, building on the * assumption that the number of permutations is small compared to the * number of bits permutations. For those histories that are difficult to check * we will have many bits combinations that use the same state permutations. - * + * <p> * The smallMap optimization allows us to avoid object overheads for bit-sets up to 64 bit large. - * + * <p> * Comparing set of (bits, state) to smallMap: * (bits, state) : 24 (tuple) + 24 (FixedBitSet) + 24 (bits) + 5 (hash buckets) + 24 (hashmap node). * smallMap bits to {state} : 10 (bits) + 5 (hash buckets) + avg-size of unique permutations. - * + * <p> * The avg-size of the unique permutations part is very small compared to the * sometimes large number of bits combinations (which are the cases where * we run into trouble). - * + * <p> * set of (bits, state) totals 101 bytes compared to smallMap bits to { state } * which totals 15 bytes, ie. a 6x improvement in memory usage. */ diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index 062af17ce68ca..f67108345550f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -41,8 +41,8 @@ import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.OpenSearchTestCase; -import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.apache.lucene.tests.util.LuceneTestCase.random; /** * A helper that allows to create shard routing instances within tests, while not requiring to expose diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 46893fe08bd76..3ca938c99b5fd 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ClusterStatePublisher.AckListener; @@ -44,6 +43,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.node.Node; import org.opensearch.threadpool.ThreadPool; @@ -52,8 +52,8 @@ import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import static org.apache.lucene.tests.util.LuceneTestCase.random; import static org.opensearch.test.OpenSearchTestCase.randomInt; +import static org.apache.lucene.tests.util.LuceneTestCase.random; public class FakeThreadPoolClusterManagerService extends ClusterManagerService { private static final Logger logger = LogManager.getLogger(FakeThreadPoolClusterManagerService.class); diff --git a/test/framework/src/main/java/org/opensearch/common/breaker/TestCircuitBreaker.java b/test/framework/src/main/java/org/opensearch/common/breaker/TestCircuitBreaker.java index bbacdd2ac4eec..9705a56700f72 100644 --- a/test/framework/src/main/java/org/opensearch/common/breaker/TestCircuitBreaker.java +++ b/test/framework/src/main/java/org/opensearch/common/breaker/TestCircuitBreaker.java @@ -32,6 +32,9 @@ package org.opensearch.common.breaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; + import java.util.concurrent.atomic.AtomicBoolean; public class TestCircuitBreaker extends NoopCircuitBreaker { diff --git a/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java index 07fd383dc230a..855f2a7d7a6dc 100644 --- a/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -35,17 +35,17 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefIterator; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.bytes.AbstractBytesReference; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.util.BigArrays; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import java.io.EOFException; diff --git a/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java new file mode 100644 index 0000000000000..c1600abcacd3e --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.util.Locale; + +/** + * Represents the avalanche statistics of a hash function. + */ +public class AvalancheStats { + private final int inputBits; + private final int outputBits; + private final double bias; + private final double sumOfSquaredErrors; + + public AvalancheStats(int[][] flips, int iterations) { + this.inputBits = flips.length; + this.outputBits = flips[0].length; + double sumOfBiases = 0; + double sumOfSquaredErrors = 0; + + for (int i = 0; i < inputBits; i++) { + for (int o = 0; o < outputBits; o++) { + sumOfSquaredErrors += Math.pow(0.5 - ((double) flips[i][o] / iterations), 2); + sumOfBiases += 2 * ((double) flips[i][o] / iterations) - 1; + } + } + + this.bias = Math.abs(sumOfBiases / (inputBits * outputBits)); + this.sumOfSquaredErrors = sumOfSquaredErrors; + } + + public double bias() { + return bias; + } + + public double diffusion() { + return 1 - bias; + } + + public double sumOfSquaredErrors() { + return sumOfSquaredErrors; + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "AvalancheStats{inputBits=%d, outputBits=%d, bias=%.4f%%, diffusion=%.4f%%, sumOfSquaredErrors=%.2f}", + inputBits, + outputBits, + bias() * 100, + diffusion() * 100, + sumOfSquaredErrors() + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java new file mode 100644 index 0000000000000..e272fe0962047 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.Randomness; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Random; + +/** + * Base class for testing the quality of hash functions. + */ +public abstract class HashFunctionTestCase extends OpenSearchTestCase { + private static final int[] INPUT_BITS = new int[] { 24, 32, 40, 48, 56, 64, 72, 80, 96, 112, 128, 160, 512, 1024 }; + private static final int ITERATIONS = 1000; + private static final double BIAS_THRESHOLD = 0.01; // 1% + + public abstract byte[] hash(byte[] input); + + public abstract int outputBits(); + + /** + * Tests if the hash function shows an avalanche effect, i.e, flipping a single input bit + * should flip half the output bits. + */ + public void testAvalanche() { + for (int inputBits : INPUT_BITS) { + AvalancheStats stats = simulate(inputBits); + if (stats.bias() >= BIAS_THRESHOLD) { + fail("bias exceeds threshold: " + stats); + } + } + } + + private AvalancheStats simulate(int inputBits) { + int outputBits = outputBits(); + assert inputBits % 8 == 0; // using full bytes for simplicity + assert outputBits % 8 == 0; // using full bytes for simplicity + byte[] input = new byte[inputBits >>> 3]; + Random random = Randomness.get(); + int[][] flips = new int[inputBits][outputBits]; + + for (int iter = 0; iter < ITERATIONS; iter++) { + random.nextBytes(input); + byte[] hash = Arrays.copyOf(hash(input), outputBits >>> 3); // copying since the underlying byte-array is reused + + for (int i = 0; i < inputBits; i++) { + flipBit(input, i); // flip one bit + byte[] newHash = hash(input); // recompute the hash; half the bits should have flipped + flipBit(input, i); // return to original + + for (int o = 0; o < outputBits; o++) { + flips[i][o] += getBit(hash, o) ^ getBit(newHash, o); + } + } + } + + return new AvalancheStats(flips, ITERATIONS); + } + + private static void flipBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + input[offset] ^= (1 << bit); + } + + private static int getBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + return (input[offset] >>> bit) & 1; + } +} diff --git a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java index e4774030f21cb..5f4d92d65548b 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java @@ -52,11 +52,11 @@ * Fields available upon process startup: <code>type</code>, <code>timestamp</code>, <code>level</code>, <code>component</code>, * <code>message</code>, <code>node.name</code>, <code>cluster.name</code>. * Whereas <code>node.id</code> and <code>cluster.uuid</code> are available later once the first clusterState has been received. - * + * <p> * * <code>node.name</code>, <code>cluster.name</code>, <code>node.id</code>, <code>cluster.uuid</code> * should not change across all log lines - * + * <p> * Note that this won't pass for nodes in clusters that don't have the node name defined in opensearch.yml <strong>and</strong> start * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by <code>LogConfigurator.setNodeName</code>. */ diff --git a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsStream.java index 81a637efbb388..20973d9879864 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsStream.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsStream.java @@ -32,10 +32,10 @@ package org.opensearch.common.logging; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.BufferedReader; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/common/logging/TestThreadInfoPatternConverter.java b/test/framework/src/main/java/org/opensearch/common/logging/TestThreadInfoPatternConverter.java index 2543d702e481f..dffc746870499 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/TestThreadInfoPatternConverter.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/TestThreadInfoPatternConverter.java @@ -32,10 +32,6 @@ package org.opensearch.common.logging; -import java.util.Arrays; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.config.plugins.Plugin; import org.apache.logging.log4j.core.pattern.ConverterKeys; @@ -43,6 +39,10 @@ import org.apache.logging.log4j.core.pattern.PatternConverter; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + /** * Converts {@code %test_thread_info} in log4j patterns into information * based on the loggin thread's name. If that thread is part of an diff --git a/test/framework/src/main/java/org/opensearch/common/lucene/store/OpenSearchIndexInputTestCase.java b/test/framework/src/main/java/org/opensearch/common/lucene/store/OpenSearchIndexInputTestCase.java index 4fb310b2370ed..e9d5719f04706 100644 --- a/test/framework/src/main/java/org/opensearch/common/lucene/store/OpenSearchIndexInputTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/lucene/store/OpenSearchIndexInputTestCase.java @@ -32,13 +32,13 @@ package org.opensearch.common.lucene.store; import org.apache.lucene.store.IndexInput; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/test/framework/src/main/java/org/opensearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/opensearch/common/util/MockBigArrays.java index 5ef68f0eab757..faf97039974ea 100644 --- a/test/framework/src/main/java/org/opensearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/opensearch/common/util/MockBigArrays.java @@ -34,15 +34,16 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.SeedUtils; + +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.tests.util.LuceneTestCase; -import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.util.set.Sets; +import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.util.BigArray; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import java.util.Collection; import java.util.Collections; diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index 6a3748e55394e..2f006a5519d69 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -33,6 +33,7 @@ package org.opensearch.gateway; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.Manifest; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; @@ -44,6 +45,8 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -62,10 +65,32 @@ public class MockGatewayMetaState extends GatewayMetaState { private final DiscoveryNode localNode; private final BigArrays bigArrays; + private final RemoteClusterStateService remoteClusterStateService; + private final RemoteStoreRestoreService remoteStoreRestoreService; + private boolean prepareFullState = false; + + public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays, boolean prepareFullState) { + this(localNode, bigArrays); + this.prepareFullState = prepareFullState; + } public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; this.bigArrays = bigArrays; + this.remoteClusterStateService = mock(RemoteClusterStateService.class); + this.remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + } + + public MockGatewayMetaState( + DiscoveryNode localNode, + BigArrays bigArrays, + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService + ) { + this.localNode = localNode; + this.bigArrays = bigArrays; + this.remoteClusterStateService = remoteClusterStateService; + this.remoteStoreRestoreService = remoteStoreRestoreService; } @Override @@ -80,11 +105,35 @@ Metadata upgradeMetadataForNode( @Override ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { - // Just set localNode here, not to mess with ClusterService and IndicesService mocking - return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + if (prepareFullState) { + return super.prepareInitialClusterState(transportService, clusterService, clusterState); + } else { + // Just set localNode here, not to mess with ClusterService and IndicesService mocking + return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + } } - public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry) { + @Override + public void close() throws IOException { + super.close(); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry + ) { + start(settings, nodeEnvironment, xContentRegistry, persistedStateRegistry, false); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry, + boolean prepareFullState + ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(mock(ThreadPool.class)); final ClusterService clusterService = mock(ClusterService.class); @@ -97,6 +146,7 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont } catch (IOException e) { throw new AssertionError(e); } + this.prepareFullState = prepareFullState; start( settings, transportService, @@ -110,7 +160,10 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont bigArrays, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L - ) + ), + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService ); } } diff --git a/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java index c2825bc60542b..108492c1cf8f9 100644 --- a/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/index/MapperTestUtils.java @@ -52,10 +52,10 @@ import java.nio.file.Path; import java.util.Collections; -import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; import static org.opensearch.test.OpenSearchTestCase.createTestAnalysis; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsString; +import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; public class MapperTestUtils { diff --git a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java index 7d3282ca4443a..0a47db4c740d6 100644 --- a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java @@ -31,8 +31,8 @@ package org.opensearch.index; -import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.opensearch.common.settings.Setting; import org.opensearch.index.engine.EngineFactory; import org.opensearch.plugins.EnginePlugin; @@ -46,7 +46,7 @@ /** * A plugin to use {@link MockEngineFactory}. - * + * <p> * Subclasses may override the reader wrapper used. */ public class MockEngineFactoryPlugin extends Plugin implements EnginePlugin { diff --git a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java index 3c20f22e392b8..5ab77783b2bac 100644 --- a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java @@ -35,11 +35,13 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.HashSet; +import java.util.Set; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -54,7 +56,7 @@ private RandomCreateIndexGenerator() {} /** * Returns a random {@link CreateIndexRequest}. - * + * <p> * Randomizes the index name, the aliases, mappings and settings associated with the * index. If present, the mapping definition will be nested under a type name. */ @@ -96,7 +98,7 @@ public static Settings randomIndexSettings() { * Creates a random mapping */ public static XContentBuilder randomMapping() throws IOException { - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); builder.startObject(); randomMappingFields(builder, true); @@ -112,8 +114,12 @@ public static void randomMappingFields(XContentBuilder builder, boolean allowObj builder.startObject("properties"); int fieldsNo = randomIntBetween(0, 5); - for (int i = 0; i < fieldsNo; i++) { - builder.startObject(randomAlphaOfLength(5)); + Set<String> uniqueFields = new HashSet<>(); + while (uniqueFields.size() < fieldsNo) { + uniqueFields.add(randomAlphaOfLength(5)); + } + for (String uniqueField : uniqueFields) { + builder.startObject(uniqueField); if (allowObjectField && randomBoolean()) { randomMappingFields(builder, false); diff --git a/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java b/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java index ce862d2b69d8f..fb9db9a578b4a 100644 --- a/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java @@ -102,6 +102,11 @@ public static AliasActions randomAliasAction(boolean useStringAsFilter) { action.isHidden(randomBoolean()); } } + if (action.actionType() == AliasActions.Type.REMOVE) { + if (randomBoolean()) { + action.mustExist(randomBoolean()); + } + } return action; } diff --git a/test/framework/src/main/java/org/opensearch/index/analysis/AnalysisTestsHelper.java b/test/framework/src/main/java/org/opensearch/index/analysis/AnalysisTestsHelper.java index ae542819d157e..80c54ec20d51f 100644 --- a/test/framework/src/main/java/org/opensearch/index/analysis/AnalysisTestsHelper.java +++ b/test/framework/src/main/java/org/opensearch/index/analysis/AnalysisTestsHelper.java @@ -39,8 +39,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.plugins.AnalysisPlugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Path; diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 1ac92bbb479c3..43289a7c89524 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -62,8 +62,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.junit.After; -import org.junit.Before; import org.opensearch.Version; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.replication.ReplicationResponse; @@ -73,9 +71,6 @@ import org.opensearch.common.CheckedBiFunction; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; @@ -83,13 +78,18 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.set.Sets; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; import org.opensearch.index.VersionType; @@ -110,7 +110,6 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.translog.InternalTranslogManager; import org.opensearch.index.translog.LocalTranslog; @@ -119,13 +118,13 @@ import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogManager; import org.opensearch.index.translog.listener.TranslogEventListener; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.nio.charset.Charset; @@ -150,15 +149,15 @@ import static java.util.Collections.emptyList; import static java.util.Collections.shuffle; +import static org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; +import static org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY; +import static org.opensearch.index.engine.Engine.Operation.Origin.REPLICA; +import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.opensearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; -import static org.opensearch.index.engine.Engine.Operation.Origin.PRIMARY; -import static org.opensearch.index.engine.Engine.Operation.Origin.REPLICA; -import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; public abstract class EngineTestCase extends OpenSearchTestCase { @@ -423,23 +422,22 @@ protected static ParsedDocument testParsedDocument( } else { document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); } - return new ParsedDocument(versionField, seqID, id, routing, Arrays.asList(document), source, XContentType.JSON, mappingUpdate); + return new ParsedDocument(versionField, seqID, id, routing, Arrays.asList(document), source, MediaTypeRegistry.JSON, mappingUpdate); } public static CheckedBiFunction<String, Integer, ParsedDocument, IOException> nestedParsedDocFactory() throws Exception { final MapperService mapperService = createMapperService(); - final String nestedMapping = Strings.toString( - XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("nested_field") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject() - ); + final String nestedMapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("nested_field") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); final DocumentMapper nestedMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(nestedMapping)); return (docId, nestedFieldValues) -> { final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", "value"); @@ -451,7 +449,7 @@ public static CheckedBiFunction<String, Integer, ParsedDocument, IOException> ne source.endObject(); } source.endObject(); - return nestedMapper.parse(new SourceToParse("test", docId, BytesReference.bytes(source), XContentType.JSON)); + return nestedMapper.parse(new SourceToParse("test", docId, BytesReference.bytes(source), MediaTypeRegistry.JSON)); }; } @@ -480,7 +478,7 @@ public ParsedDocument newDeleteTombstoneDoc(String id) { null, Collections.singletonList(doc), new BytesArray("{}"), - XContentType.JSON, + MediaTypeRegistry.JSON, null ); } @@ -498,7 +496,16 @@ public ParsedDocument newNoopTombstoneDoc(String reason) { doc.add(versionField); BytesRef byteRef = new BytesRef(reason); doc.add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); - return new ParsedDocument(versionField, seqID, null, null, Collections.singletonList(doc), null, XContentType.JSON, null); + return new ParsedDocument( + versionField, + seqID, + null, + null, + Collections.singletonList(doc), + null, + MediaTypeRegistry.JSON, + null + ); } }; } @@ -520,7 +527,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -833,10 +840,45 @@ public EngineConfig config( final @Nullable Supplier<RetentionLeases> maybeRetentionLeasesSupplier, final CircuitBreakerService breakerService ) { - final IndexWriterConfig iwc = newIndexWriterConfig(); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); final Engine.EventListener eventListener = new Engine.EventListener() { }; // we don't need to notify anybody in this test + + return config( + indexSettings, + store, + translogPath, + mergePolicy, + externalRefreshListener, + internalRefreshListener, + indexSort, + maybeGlobalCheckpointSupplier, + maybeGlobalCheckpointSupplier == null ? null : () -> RetentionLeases.EMPTY, + breakerService, + eventListener + ); + } + + public EngineConfig config( + final IndexSettings indexSettings, + final Store store, + final Path translogPath, + final MergePolicy mergePolicy, + final ReferenceManager.RefreshListener externalRefreshListener, + final ReferenceManager.RefreshListener internalRefreshListener, + final Sort indexSort, + final @Nullable LongSupplier maybeGlobalCheckpointSupplier, + final @Nullable Supplier<RetentionLeases> maybeRetentionLeasesSupplier, + final CircuitBreakerService breakerService, + final Engine.EventListener eventListener + ) { + final IndexWriterConfig iwc = newIndexWriterConfig(); + final TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE, + "" + ); final List<ReferenceManager.RefreshListener> extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); @@ -903,7 +945,7 @@ protected EngineConfig config( .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build() ); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, ""); return new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) .indexSettings(indexSettings) diff --git a/test/framework/src/main/java/org/opensearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/opensearch/index/engine/TranslogHandler.java index 6a715599a8e1a..9e4e59d9a4d15 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/TranslogHandler.java @@ -33,8 +33,8 @@ package org.opensearch.index.engine; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.analysis.AnalysisRegistry; @@ -137,7 +137,13 @@ public Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.O final String indexName = mapperService.index().getName(); final Engine.Index engineIndex = IndexShard.prepareIndex( docMapper(MapperService.SINGLE_MAPPING_NAME), - new SourceToParse(indexName, index.id(), index.source(), XContentHelper.xContentType(index.source()), index.routing()), + new SourceToParse( + indexName, + index.id(), + index.source(), + MediaTypeRegistry.xContentType(index.source()), + index.routing() + ), index.seqNo(), index.primaryTerm(), index.version(), diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java index c7783276db237..77137073aa30f 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java @@ -36,12 +36,11 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.opensearch.Version; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexService; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -150,7 +149,7 @@ protected Set<String> unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + * <p> * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer<T, T> method) { @@ -159,7 +158,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer<T, T> /** * Add type-specific modifiers for consistency checking. - * + * <p> * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer<T, Boolean> method) { @@ -260,6 +259,6 @@ private String mappingsToString(ToXContent builder, boolean includeDefaults) thr x.startObject().startObject("properties"); builder.toXContent(x, params); x.endObject().endObject(); - return Strings.toString(x); + return x.toString(); } } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java index 3fef22418825a..5dfb2f16a1aae 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java @@ -35,7 +35,6 @@ import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.opensearch.common.Strings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.analysis.AnalyzerScope; @@ -140,7 +139,7 @@ protected Set<String> unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + * <p> * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer<T, T> method) { @@ -149,7 +148,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer<T, T> /** * Add type-specific modifiers for consistency checking. - * + * <p> * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer<T, Boolean> method) { @@ -233,8 +232,8 @@ protected void assertSerializes(T builder) throws IOException { XContentBuilder reparsed = mappingsToJson(rebuilt, false); XContentBuilder reparsedWithDefault = mappingsToJson(rebuilt, true); - assertThat(Strings.toString(reparsed), equalTo(Strings.toString(mappings))); - assertThat(Strings.toString(reparsedWithDefault), equalTo(Strings.toString(mappingsWithDefault))); + assertThat(reparsed.toString(), equalTo(mappings.toString())); + assertThat(reparsedWithDefault.toString(), equalTo(mappingsWithDefault.toString())); } private XContentBuilder mappingsToJson(ToXContent builder, boolean includeDefaults) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index 9d39b488070be..a65ce3cbdd380 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -35,20 +35,20 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.IndexAnalyzers; @@ -185,11 +185,11 @@ protected final SourceToParse source(CheckedConsumer<XContentBuilder, IOExceptio XContentBuilder builder = JsonXContent.contentBuilder().startObject(); build.accept(builder); builder.endObject(); - return new SourceToParse("test", "1", BytesReference.bytes(builder), XContentType.JSON); + return new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON); } protected final SourceToParse source(String source) { - return new SourceToParse("test", "1", new BytesArray(source), XContentType.JSON); + return new SourceToParse("test", "1", new BytesArray(source), MediaTypeRegistry.JSON); } /** @@ -239,7 +239,7 @@ protected final XContentBuilder fieldMapping(CheckedConsumer<XContentBuilder, IO }); } - QueryShardContext createQueryShardContext(MapperService mapperService) { + protected QueryShardContext createQueryShardContext(MapperService mapperService) { QueryShardContext queryShardContext = mock(QueryShardContext.class); when(queryShardContext.getMapperService()).thenReturn(mapperService); when(queryShardContext.fieldMapper(anyString())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); @@ -253,7 +253,9 @@ QueryShardContext createQueryShardContext(MapperService mapperService) { when(queryShardContext.allowExpensiveQueries()).thenReturn(true); when(queryShardContext.lookup()).thenReturn(new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); - })); + }, SearchLookup.UNKNOWN_SHARD_ID)); + when(queryShardContext.getFieldType(any())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); + when(queryShardContext.documentMapper(anyString())).thenReturn(mapperService.documentMapper()); return queryShardContext; } } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java index 57c09db577247..dc5954907a4fa 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperTestCase.java @@ -44,16 +44,15 @@ import org.apache.lucene.search.TermQuery; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.DocValueFormat; import org.opensearch.search.lookup.SearchLookup; @@ -192,7 +191,7 @@ public final void testMinimalSerializesToItself() throws IOException { XContentBuilder parsedFromOrig = JsonXContent.contentBuilder().startObject(); createMapperService(orig).documentMapper().mapping().toXContent(parsedFromOrig, ToXContent.EMPTY_PARAMS); parsedFromOrig.endObject(); - assertEquals(Strings.toString(orig), Strings.toString(parsedFromOrig)); + assertEquals(orig.toString(), parsedFromOrig.toString()); assertParseMinimalWarnings(); } @@ -204,7 +203,7 @@ public void testMinimalToMaximal() throws IOException { XContentBuilder parsedFromOrig = JsonXContent.contentBuilder().startObject(); createMapperService(orig).documentMapper().mapping().toXContent(parsedFromOrig, INCLUDE_DEFAULTS); parsedFromOrig.endObject(); - assertEquals(Strings.toString(orig), Strings.toString(parsedFromOrig)); + assertEquals(orig.toString(), parsedFromOrig.toString()); assertParseMaximalWarnings(); } @@ -294,7 +293,7 @@ protected final List<?> fetchFromDocValues(MapperService mapperService, MappedFi withLuceneIndex(mapperService, iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field(ft.name(), sourceValue))).rootDoc()); }, iw -> { - SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup); + SearchLookup lookup = new SearchLookup(mapperService, fieldDataLookup, SearchLookup.UNKNOWN_SHARD_ID); ValueFetcher valueFetcher = new DocValueFetcher(format, lookup.doc().getForField(ft)); IndexSearcher searcher = newSearcher(iw); LeafReaderContext context = searcher.getIndexReader().leaves().get(0); diff --git a/test/framework/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java b/test/framework/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java index 10d6a8de139f8..2c3eadb32cafc 100644 --- a/test/framework/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollActionTestCase.java @@ -33,7 +33,7 @@ package org.opensearch.index.reindex; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.tasks.TaskId; +import org.opensearch.core.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index f3c98ce4f9f03..9800782272ede 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -34,7 +34,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.flush.FlushRequest; @@ -74,14 +73,16 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.collect.Iterators; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.EngineConfigFactory; @@ -96,9 +97,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.PrimaryReplicaSyncer; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; @@ -149,7 +148,11 @@ protected ReplicationGroup createGroup(int replicas, Settings settings, EngineFa protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory) throws IOException { - return createGroup(replicas, settings, mappings, engineFactory, null); + Path remotePath = null; + if ("true".equals(settings.get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED))) { + remotePath = createTempDir(); + } + return createGroup(replicas, settings, mappings, engineFactory, remotePath); } protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory, Path remotePath) @@ -249,10 +252,6 @@ protected ReplicationGroup(final IndexMetadata indexMetadata) throws IOException protected ReplicationGroup(final IndexMetadata indexMetadata, Path remotePath) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); - Store remoteStore = null; - if (remotePath != null) { - remoteStore = createRemoteStore(remotePath, primaryRouting, indexMetadata); - } primary = newShard( primaryRouting, indexMetadata, @@ -260,7 +259,7 @@ protected ReplicationGroup(final IndexMetadata indexMetadata, Path remotePath) t getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer, - remoteStore + remotePath ); replicas = new CopyOnWriteArrayList<>(); this.indexMetadata = indexMetadata; @@ -291,7 +290,7 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet())) - .source("{}", XContentType.JSON); + .source("{}", MediaTypeRegistry.JSON); final BulkItemResponse response = index(indexRequest); if (response.isFailed()) { throw response.getFailure().getCause(); @@ -304,7 +303,7 @@ public int indexDocs(final int numOfDoc) throws Exception { public int appendDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { - final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", XContentType.JSON); + final IndexRequest indexRequest = new IndexRequest(index.getName()).source("{}", MediaTypeRegistry.JSON); final BulkItemResponse response = index(indexRequest); if (response.isFailed()) { throw response.getFailure().getCause(); @@ -386,10 +385,6 @@ public IndexShard addReplica() throws IOException { public IndexShard addReplica(Path remotePath) throws IOException { final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false); - Store remoteStore = null; - if (remotePath != null) { - remoteStore = createRemoteStore(remotePath, replicaRouting, indexMetadata); - } final IndexShard replica = newShard( replicaRouting, indexMetadata, @@ -397,7 +392,7 @@ public IndexShard addReplica(Path remotePath) throws IOException { getEngineFactory(replicaRouting), () -> {}, retentionLeaseSyncer, - remoteStore + remotePath ); addReplica(replica); return replica; @@ -627,8 +622,8 @@ public synchronized IndexShard getPrimary() { return primary; } - public synchronized void reinitPrimaryShard() throws IOException { - primary = reinitShard(primary); + public synchronized void reinitPrimaryShard(Path remotePath) throws IOException { + primary = reinitShard(primary, remotePath); computeReplicationTargets(); } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java index cf4b3800069bf..bcd47e3d578ee 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java @@ -8,7 +8,7 @@ package org.opensearch.index.replication; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -17,6 +17,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.util.List; +import java.util.function.BiConsumer; /** * This class is used by unit tests implementing SegmentReplicationSource @@ -36,6 +37,7 @@ public abstract void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 93b9742ada0da..bf1c4d4c94e04 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -37,17 +37,15 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.junit.Assert; -import org.mockito.Mockito; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -66,20 +64,25 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.io.PathUtils; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; import org.opensearch.index.VersionType; @@ -91,9 +94,11 @@ import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; -import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -112,9 +117,9 @@ import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryResponse; @@ -138,17 +143,21 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.Snapshot; import org.opensearch.test.DummyShardLock; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.junit.Assert; import java.io.IOException; import java.nio.file.Path; @@ -165,11 +174,16 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; +import org.mockito.Mockito; + +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -178,8 +192,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; /** * A base class for unit tests that need to create and shutdown {@link IndexShard} instances easily, @@ -262,11 +274,11 @@ public Settings threadPoolSettings() { } protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { - return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); + return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex()), shardPath); } - protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory) throws IOException { - return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); + protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardPath shardPath) throws IOException { + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId), Store.OnClose.EMPTY, shardPath); } protected Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { @@ -460,7 +472,7 @@ protected IndexShard newShard( @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, - Store remoteStore, + Path path, IndexingOperationListener... listeners ) throws IOException { // add node id as name to settings for proper logging @@ -478,7 +490,7 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, EMPTY_EVENT_LISTENER, - remoteStore, + path, listeners ); } @@ -506,7 +518,7 @@ protected IndexShard newShard( Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, - Store remoteStore, + Path remotePath, IndexingOperationListener... listeners ) throws IOException { return newShard( @@ -521,11 +533,63 @@ protected IndexShard newShard( retentionLeaseSyncer, indexEventListener, SegmentReplicationCheckpointPublisher.EMPTY, - remoteStore, + remotePath, listeners ); } + protected IndexShard newShard(boolean primary, SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + return newShard(primary, checkpointPublisher, settings); + } + + /** + * creates a new initializing shard. The shard will be put in its proper path under the + * current node id the shard is assigned to. + * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint + */ + protected IndexShard newShard(boolean primary, SegmentReplicationCheckpointPublisher checkpointPublisher, Settings settings) + throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(10), + primary, + ShardRoutingState.INITIALIZING, + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + ); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) + .put(Settings.EMPTY) + .build(); + IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) + .settings(indexSettings) + .primaryTerm(0, primaryTerm) + .putMapping("{ \"properties\": {} }") + .build(); + return newShard( + shardRouting, + shardPath, + metadata, + null, + null, + new NRTReplicationEngineFactory(), + new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), + () -> {}, + RetentionLeaseSyncer.EMPTY, + EMPTY_EVENT_LISTENER, + checkpointPublisher, + null + ); + } + /** * creates a new initializing shard. * @param routing shard routing to use @@ -550,7 +614,7 @@ protected IndexShard newShard( RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, SegmentReplicationCheckpointPublisher checkpointPublisher, - @Nullable Store remoteStore, + @Nullable Path remotePath, IndexingOperationListener... listeners ) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); @@ -578,28 +642,36 @@ protected IndexShard newShard( Collections.emptyList(), clusterSettings ); + Store remoteStore; + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; + RepositoriesService mockRepoSvc = mock(RepositoriesService.class); - RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService = null; if (indexSettings.isRemoteStoreEnabled()) { - if (remoteStore == null) { - Path remoteStorePath; - String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); - if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { - remoteStorePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); - } else { - remoteStorePath = createTempDir(); - } - remoteStore = createRemoteStore(remoteStorePath, routing, indexMetadata); + String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); + // remote path via setting a repository . This is a hack used for shards are created using reset . + // since we can't get remote path from IndexShard directly, we are using repository to store it . + if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { + remotePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); + } else if (remotePath == null) { + remotePath = createTempDir(); } - remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService(clusterService, indexSettings.getSettings()); + + remoteStore = createRemoteStore(remotePath, routing, indexMetadata, shardPath); + + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, indexSettings.getSettings()); + BlobStoreRepository repo = createRepository(remotePath); + when(mockRepoSvc.repository(any())).thenAnswer(invocationOnMock -> repo); + } else { + remoteStore = null; } final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier = (settings, shardRouting) -> { if (settings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { return new RemoteBlobStoreInternalTranslogFactory( - this::createRepositoriesService, + () -> mockRepoSvc, threadPool, - settings.getRemoteStoreTranslogRepository() + settings.getRemoteStoreTranslogRepository(), + new RemoteTranslogTransferTracker(shardRouting.shardId(), 20) ); } return new InternalTranslogFactory(); @@ -628,11 +700,14 @@ protected IndexShard newShard( translogFactorySupplier, checkpointPublisher, remoteStore, - remoteRefreshSegmentPressureService + remoteStoreStatsTrackerFactory, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + "dummy-node", + DefaultRecoverySettings.INSTANCE ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); - if (remoteRefreshSegmentPressureService != null) { - remoteRefreshSegmentPressureService.afterIndexShardCreated(indexShard); + if (remoteStoreStatsTrackerFactory != null) { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); } success = true; } finally { @@ -643,6 +718,39 @@ protected IndexShard newShard( return indexShard; } + private BlobStoreRepository createRepository(Path path) { + Settings settings = Settings.builder().put("location", path).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + final FsRepository repository = new FsRepository( + repositoryMetadata, + createEnvironment(path), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + private Environment createEnvironment(Path path) { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), path.toAbsolutePath()) + .build() + ); + } + protected RepositoriesService createRepositoriesService() { RepositoriesService repositoriesService = Mockito.mock(RepositoriesService.class); BlobStoreRepository repository = Mockito.mock(BlobStoreRepository.class); @@ -666,22 +774,23 @@ protected RepositoriesService createRepositoriesService() { return repositoriesService; } - protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata) throws IOException { + protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata, ShardPath shardPath) + throws IOException { Settings nodeSettings = Settings.builder().put("node.name", shardRouting.currentNodeId()).build(); ShardId shardId = shardRouting.shardId(); RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = createRemoteSegmentStoreDirectory(shardId, path); - return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory, shardPath); } protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(path); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); - RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); - RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); + RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("data")); + RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex().resolve("metadata")); RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( - new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) + new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex().resolve("lock_files"))) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { @@ -694,12 +803,18 @@ protected BlobContainer getBlobContainer(Path f) throws IOException { return new FsBlobContainer(fsBlobStore, blobPath, f); } + protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException { + return reinitShard(current, (Path) null, listeners); + } + /** * Takes an existing shard, closes it and starts a new initialing shard at the same location * + * @param current The current shard to reinit + * @param remotePath Remote path to recover from if remote storage is used * @param listeners new listerns to use for the newly created shard */ - protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException { + protected IndexShard reinitShard(IndexShard current, Path remotePath, IndexingOperationListener... listeners) throws IOException { final ShardRouting shardRouting = current.routingEntry(); return reinitShard( current, @@ -707,6 +822,7 @@ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener.. shardRouting, shardRouting.primary() ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE ), + remotePath, listeners ); } @@ -718,13 +834,18 @@ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener.. * @param listeners new listerns to use for the newly created shard */ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException { + return reinitShard(current, routing, null, listeners); + } + + protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Path remotePath, IndexingOperationListener... listeners) + throws IOException { return reinitShard( current, routing, current.indexSettings.getIndexMetadata(), current.engineFactory, current.engineConfigFactory, - current.remoteStore(), + remotePath, listeners ); } @@ -743,7 +864,7 @@ protected IndexShard reinitShard( IndexMetadata indexMetadata, EngineFactory engineFactory, EngineConfigFactory engineConfigFactory, - Store remoteStore, + Path remotePath, IndexingOperationListener... listeners ) throws IOException { closeShards(current); @@ -758,7 +879,7 @@ protected IndexShard reinitShard( current.getGlobalCheckpointSyncer(), current.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, - remoteStore, + remotePath, listeners ); } @@ -966,7 +1087,7 @@ protected void recoverReplica( /** * Recovers a replica from the give primary, allow the user to supply a custom recovery target. A typical usage of a custom recovery * target is to assert things in the various stages of recovery. - * + * <p> * Note: this method keeps the shard in {@link IndexShardState#POST_RECOVERY} and doesn't start it. * * @param replica the recovery target shard @@ -1133,12 +1254,12 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, id, source, XContentType.JSON, null); + return indexDoc(shard, id, source, MediaTypeRegistry.JSON, null); } - protected Engine.IndexResult indexDoc(IndexShard shard, String id, String source, XContentType xContentType, String routing) + protected Engine.IndexResult indexDoc(IndexShard shard, String id, String source, MediaType mediaType, String routing) throws IOException { - SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), id, new BytesArray(source), xContentType, routing); + SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), id, new BytesArray(source), mediaType, routing); Engine.IndexResult result; if (shard.routingEntry().primary()) { result = shard.applyIndexOperationOnPrimary( @@ -1326,9 +1447,27 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { }; } + private SegmentReplicationTargetService getSegmentReplicationTargetService( + TransportService transportService, + IndicesService indicesService, + ClusterService clusterService, + SegmentReplicationSourceFactory sourceFactory + ) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + transportService, + sourceFactory, + indicesService, + clusterService + ); + } + /** * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has - * been configured to return the given primaryShard's current segments. + * been configured to return the given primaryShard's current segments. In order to do so, it mimics the replication + * source (to avoid transport calls) and simply copies over the segment files from primary store to replica's as part of + * get_files calls. * * @param primaryShard {@link IndexShard} - The target replica shard in segment replication. * @param target {@link IndexShard} - The source primary shard in segment replication. @@ -1339,7 +1478,7 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { * which are desired right after files are copied. e.g. To work with temp files * @return Returns SegmentReplicationTargetService */ - public final SegmentReplicationTargetService prepareForReplication( + private SegmentReplicationTargetService prepareForReplication( IndexShard primaryShard, IndexShard target, TransportService transportService, @@ -1347,22 +1486,28 @@ public final SegmentReplicationTargetService prepareForReplication( ClusterService clusterService, Consumer<IndexShard> postGetFilesRunnable ) { - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( - threadPool, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - transportService, - sourceFactory, - indicesService, - clusterService - ); - final SegmentReplicationSource replicationSource = getSegmentReplicationSource( - primaryShard, - (repId) -> targetService.get(repId), - postGetFilesRunnable - ); - when(sourceFactory.get(any())).thenReturn(replicationSource); - when(indicesService.getShardOrNull(any())).thenReturn(target); + + SegmentReplicationSourceFactory sourceFactory = null; + SegmentReplicationTargetService targetService; + if (primaryShard.indexSettings.isRemoteStoreEnabled()) { + RecoverySettings recoverySettings = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + sourceFactory = new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService); + targetService = getSegmentReplicationTargetService(transportService, indicesService, clusterService, sourceFactory); + } else { + sourceFactory = mock(SegmentReplicationSourceFactory.class); + targetService = getSegmentReplicationTargetService(transportService, indicesService, clusterService, sourceFactory); + final SegmentReplicationSource replicationSource = getSegmentReplicationSource( + primaryShard, + (repId) -> targetService.get(repId), + postGetFilesRunnable + ); + when(sourceFactory.get(any())).thenReturn(replicationSource); + // This is needed for force segment sync call. Remote store uses a different recovery mechanism + when(indicesService.getShardOrNull(any())).thenReturn(target); + } return targetService; } @@ -1459,10 +1604,7 @@ public void getCheckpointMetadata( ActionListener<CheckpointInfoResponse> listener ) { try { - final CopyState copyState = new CopyState( - ReplicationCheckpoint.empty(primaryShard.shardId, primaryShard.getLatestReplicationCheckpoint().getCodec()), - primaryShard - ); + final CopyState copyState = new CopyState(primaryShard.getLatestReplicationCheckpoint(), primaryShard); listener.onResponse( new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); @@ -1479,6 +1621,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List<StoreFileMetadata> filesToFetch, IndexShard indexShard, + BiConsumer<String, Long> fileProgressTracker, ActionListener<GetSegmentFilesResponse> listener ) { try ( @@ -1502,9 +1645,11 @@ public void getSegmentFiles( * @param replicaShards - Replicas that will be updated. * @return {@link List} List of target components orchestrating replication. */ - public final List<SegmentReplicationTarget> replicateSegments(IndexShard primaryShard, List<IndexShard> replicaShards) + protected final List<SegmentReplicationTarget> replicateSegments(IndexShard primaryShard, List<IndexShard> replicaShards) throws IOException, InterruptedException { + // Latch to block test execution until replica catches up final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + // Get primary metadata to verify with replica's, used to ensure replica catches up Map<String, StoreFileMetadata> primaryMetadata; try (final GatedCloseable<SegmentInfos> segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); @@ -1515,6 +1660,7 @@ public final List<SegmentReplicationTarget> replicateSegments(IndexShard primary final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard, replica); final SegmentReplicationTarget target = targetService.startReplication( replica, + primaryShard.getLatestReplicationCheckpoint(), getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch) ); ids.add(target); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index a9e1a526b1786..be2f895301396 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -33,14 +33,14 @@ import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.component.AbstractLifecycleComponent; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; @@ -150,6 +150,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; @@ -163,6 +173,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index b93cb64e32cfe..c412ae8317f24 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -98,6 +98,7 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("czechstem", MovedToAnalysisCommon.class) .put("decimaldigit", MovedToAnalysisCommon.class) .put("delimitedpayload", MovedToAnalysisCommon.class) + .put("delimitedtermfrequency", MovedToAnalysisCommon.class) .put("dictionarycompoundword", MovedToAnalysisCommon.class) .put("edgengram", MovedToAnalysisCommon.class) .put("elision", MovedToAnalysisCommon.class) @@ -201,9 +202,6 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("daterecognizer", Void.class) // for token filters that generate bad offsets, which are now rejected since Lucene 7 .put("fixbrokenoffsets", Void.class) - // should we expose it, or maybe think about higher level integration of the - // fake term frequency feature (LUCENE-7854) - .put("delimitedtermfrequency", Void.class) // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. .put("protectedterm", Void.class) diff --git a/test/framework/src/main/java/org/opensearch/indices/recovery/AsyncRecoveryTarget.java b/test/framework/src/main/java/org/opensearch/indices/recovery/AsyncRecoveryTarget.java index b3398fb752be5..a89c83b7c1dc3 100644 --- a/test/framework/src/main/java/org/opensearch/indices/recovery/AsyncRecoveryTarget.java +++ b/test/framework/src/main/java/org/opensearch/indices/recovery/AsyncRecoveryTarget.java @@ -33,7 +33,7 @@ package org.opensearch.indices.recovery; import org.apache.lucene.util.BytesRef; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.seqno.ReplicationTracker; diff --git a/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java new file mode 100644 index 0000000000000..359668f5dad71 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RecoverySettings} instance containing all defaults + */ +public final class DefaultRecoverySettings { + private DefaultRecoverySettings() {} + + public static final RecoverySettings INSTANCE = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/test/framework/src/main/java/org/opensearch/ingest/IngestTestPlugin.java b/test/framework/src/main/java/org/opensearch/ingest/IngestTestPlugin.java index 86918939acfff..f7e1ca116853f 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/IngestTestPlugin.java +++ b/test/framework/src/main/java/org/opensearch/ingest/IngestTestPlugin.java @@ -32,12 +32,12 @@ package org.opensearch.ingest; -import java.util.Collections; -import java.util.Map; - import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; +import java.util.Collections; +import java.util.Map; + /** * Adds an ingest processor to be used in tests. */ diff --git a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java index 5d55f098a1f82..c478bf9239f74 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java @@ -35,6 +35,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.opensearch.index.VersionType; import java.util.ArrayList; diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index 7eb8ec90b9071..cc2d26a598757 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -8,13 +8,6 @@ package org.opensearch.mockito.plugin; -import org.mockito.Incubating; -import org.mockito.MockedConstruction; -import org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker; -import org.mockito.internal.util.reflection.LenientCopyTool; -import org.mockito.invocation.MockHandler; -import org.mockito.mock.MockCreationSettings; -import org.mockito.plugins.MockMaker; import org.opensearch.common.SuppressForbidden; import java.security.AccessControlContext; @@ -26,10 +19,19 @@ import java.util.Optional; import java.util.function.Function; +import org.mockito.Incubating; +import org.mockito.MockedConstruction; +import org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker; +import org.mockito.internal.util.reflection.LenientCopyTool; +import org.mockito.invocation.MockHandler; +import org.mockito.mock.MockCreationSettings; +import org.mockito.plugins.MockMaker; + /** * Mockito plugin which wraps the Mockito calls into priviledged execution blocks and respects * SecurityManager presence. */ +@SuppressWarnings("removal") @SuppressForbidden(reason = "allow URL#getFile() to be used in tests") public class PriviledgedMockMaker implements MockMaker { private static AccessControlContext context; @@ -42,7 +44,7 @@ public class PriviledgedMockMaker implements MockMaker { * since Mockito does not support SecurityManager out of the box. The method has to be called by * test framework before the SecurityManager is being set, otherwise additional permissions have * to be granted to the caller: - * + * <p> * permission java.security.Permission "createAccessControlContext" * */ diff --git a/test/framework/src/main/java/org/opensearch/node/MockNode.java b/test/framework/src/main/java/org/opensearch/node/MockNode.java index 0bd6e1872a101..e6c7e21d5b3ea 100644 --- a/test/framework/src/main/java/org/opensearch/node/MockNode.java +++ b/test/framework/src/main/java/org/opensearch/node/MockNode.java @@ -37,19 +37,19 @@ import org.opensearch.cluster.MockInternalClusterInfoService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.env.Environment; import org.opensearch.http.HttpServerTransport; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptService; @@ -60,6 +60,7 @@ import org.opensearch.search.SearchService; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -199,16 +200,35 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { // we use the MockTransportService.TestPlugin class as a marker to create a network // module with this MockNetworkService. NetworkService is such an integral part of the systme // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { - return super.newTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return super.newTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } else { - return new MockTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new MockTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } } diff --git a/test/framework/src/main/java/org/opensearch/node/RecoverySettingsChunkSizePlugin.java b/test/framework/src/main/java/org/opensearch/node/RecoverySettingsChunkSizePlugin.java index 062675737f277..dabf23ce08263 100644 --- a/test/framework/src/main/java/org/opensearch/node/RecoverySettingsChunkSizePlugin.java +++ b/test/framework/src/main/java/org/opensearch/node/RecoverySettingsChunkSizePlugin.java @@ -34,7 +34,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; -import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index bcbf3b5b9a610..74c75ea05b1f3 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -34,18 +34,19 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; + +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.Streams; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.test.OpenSearchTestCase; -import org.apache.hc.core5.http.ConnectionClosedException; -import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.Before; diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index ad515f2405f1d..f55eb72b7aa28 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -54,9 +54,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexModule; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -88,9 +88,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.anEmptyMap; import static org.opensearch.test.OpenSearchTestCase.buildNewFakeTransportAddress; import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; +import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasKey; @@ -136,7 +136,7 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex final RepositoryData repositoryData; try ( InputStream blob = blobContainer.readBlob(BlobStoreRepository.INDEX_FILE_PREFIX + latestGen); - XContentParser parser = XContentType.JSON.xContent() + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob) ) { repositoryData = RepositoryData.snapshotsFromXContent(parser, latestGen); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 3b7a921381882..789858ca38fad 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -46,11 +46,11 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.common.compress.CompressorType; import org.opensearch.common.io.Streams; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -97,7 +97,7 @@ protected Settings repositorySettings() { final Settings.Builder builder = Settings.builder(); builder.put("compress", compress); if (compress) { - builder.put("compression_type", randomFrom(CompressorType.values())); + builder.put("compression_type", randomFrom(CompressorRegistry.registeredCompressors().keySet())); } return builder.build(); } @@ -165,6 +165,27 @@ public void testWriteRead() throws IOException { } } + public void testReadRange() throws IOException { + try (BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + final byte[] data = randomBytes(4096); + + // Pick a subrange starting somewhere between position 100 and 1000 + // and ending somewhere between 100 bytes past that position and + // 100 bytes before the end + final int startOffset = randomIntBetween(100, 1000); + final int endOffset = randomIntBetween(startOffset + 100, data.length - 100); + final byte[] subrangeData = Arrays.copyOfRange(data, startOffset, endOffset); + + writeBlob(container, "foobar", new BytesArray(data), randomBoolean()); + try (InputStream stream = container.readBlob("foobar", startOffset, subrangeData.length)) { + final byte[] actual = stream.readAllBytes(); + assertArrayEquals(subrangeData, actual); + } + container.delete(); + } + } + public void testList() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index 3fe46a3cb3c86..faa9d52b105b2 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -44,10 +44,10 @@ import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; @@ -238,7 +238,7 @@ public void testRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - private Map<String, Long> getMockRequestCounts() { + protected Map<String, Long> getMockRequestCounts() { for (HttpHandler h : handlers.values()) { while (h instanceof DelegatingHttpHandler) { if (h instanceof HttpStatsCollectorHandler) { @@ -265,7 +265,7 @@ protected static void drainInputStream(final InputStream inputStream) throws IOE /** * HTTP handler that injects random service errors - * + * <p> * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -339,7 +339,7 @@ public interface DelegatingHttpHandler extends HttpHandler { /** * HTTP handler that allows collect request stats per request type. - * + * <p> * Implementors should keep track of the desired requests on {@link #maybeTrack(String, Headers)}. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") @@ -377,7 +377,7 @@ public void handle(HttpExchange exchange) throws IOException { /** * Tracks the given request if it matches the criteria. - * + * <p> * The request is represented as: * Request = Method SP Request-URI * diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 98912e53c9d6a..83b245a1bcecb 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -33,6 +33,7 @@ package org.opensearch.script; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Scorable; import org.opensearch.index.query.IntervalFilterScript; import org.opensearch.index.similarity.ScriptedSimilarity.Doc; @@ -57,14 +58,14 @@ /** * A mocked script engine that can be used for testing purpose. - * + * <p> * This script engine allows to define a set of predefined scripts that basically a combination of a key and a * function: - * + * <p> * The key can be anything as long as it is a {@link String} and is used to resolve the scripts * at compilation time. For inline scripts, the key can be a description of the script. For stored and file scripts, * the source must match a key in the predefined set of scripts. - * + * <p> * The function is used to provide the result of the script execution and can return anything. */ public class MockScriptEngine implements ScriptEngine { @@ -624,7 +625,7 @@ public MockScoreScript(MockDeterministicScript script) { } @Override - public ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup) { + public ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup, IndexSearcher indexSearcher) { return new ScoreScript.LeafFactory() { @Override public boolean needs_score() { @@ -634,7 +635,7 @@ public boolean needs_score() { @Override public ScoreScript newInstance(LeafReaderContext ctx) throws IOException { Scorable[] scorerHolder = new Scorable[1]; - return new ScoreScript(params, lookup, ctx) { + return new ScoreScript(params, lookup, indexSearcher, ctx) { @Override public double execute(ExplanationHolder explanation) { Map<String, Object> vars = new HashMap<>(getParams()); diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptService.java b/test/framework/src/main/java/org/opensearch/script/MockScriptService.java index 4fbc4c4d4bc90..dff71189eb4ee 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptService.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptService.java @@ -34,7 +34,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.node.MockNode; - import org.opensearch.plugins.Plugin; import java.util.Map; diff --git a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java index 26b439fa6438f..9cf2141555957 100644 --- a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java +++ b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java @@ -39,7 +39,7 @@ /** * A float encapsulation that dynamically accesses the score of a document. - * + * <p> * The provided {@link DocLookup} is used to retrieve the score * for the current document. */ diff --git a/test/framework/src/main/java/org/opensearch/search/MockSearchService.java b/test/framework/src/main/java/org/opensearch/search/MockSearchService.java index 808dc50512c58..a0bbcb7be05f9 100644 --- a/test/framework/src/main/java/org/opensearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/opensearch/search/MockSearchService.java @@ -34,8 +34,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.node.MockNode; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptService; diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index e7b1dd2bde1ea..74de1e6d96d93 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -35,15 +35,15 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.text.Text; -import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.query.QueryBuilders; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -131,6 +131,9 @@ public static SearchRequest randomSearchRequest(Supplier<SearchSourceBuilder> ra if (randomBoolean()) { searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval")); } + if (randomBoolean()) { + searchRequest.setPhaseTook(randomBoolean()); + } return searchRequest; } @@ -353,7 +356,7 @@ public static SearchSourceBuilder randomSearchSourceBuilder( } jsonBuilder.endArray(); jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) + XContentParser parser = MediaTypeRegistry.JSON.xContent() .createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 60d337599771c..4eb49ebb42241 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -34,20 +34,19 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.BinaryDocValuesField; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StoredField; -import org.apache.lucene.tests.index.AssertingDirectoryReader; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.CompositeReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.sandbox.document.HalfFloatPoint; -import org.apache.lucene.tests.search.AssertingIndexSearcher; import org.apache.lucene.search.Collector; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -57,25 +56,32 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.AssertingDirectoryReader; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.search.AssertingIndexSearcher; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.TriConsumer; import org.opensearch.common.TriFunction; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.AnalyzerScope; @@ -100,6 +106,7 @@ import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.Mapper.BuilderContext; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.mapper.ObjectMapper.Nested; @@ -108,10 +115,7 @@ import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesModule; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.SearchPlugin; @@ -120,6 +124,7 @@ import org.opensearch.search.aggregations.AggregatorFactories.Builder; import org.opensearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer; import org.opensearch.search.aggregations.bucket.nested.NestedAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.TermsAggregator; import org.opensearch.search.aggregations.metrics.MetricsAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; @@ -132,8 +137,8 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; @@ -146,6 +151,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; @@ -177,6 +183,14 @@ public abstract class AggregatorTestCase extends OpenSearchTestCase { // A list of field types that should not be tested, or are not currently supported private static List<String> TYPE_TEST_DENYLIST; + protected static final TriConsumer<Document, String, String> ADD_SORTED_SET_FIELD_NOT_INDEXED = (document, field, value) -> document + .add(new SortedSetDocValuesField(field, new BytesRef(value))); + + protected static final TriConsumer<Document, String, String> ADD_SORTED_SET_FIELD_INDEXED = (document, field, value) -> { + document.add(new SortedSetDocValuesField(field, new BytesRef(value))); + document.add(new StringField(field, value, Field.Store.NO)); + }; + static { List<String> denylist = new ArrayList<>(); denylist.add(ObjectMapper.CONTENT_TYPE); // Cannot aggregate objects @@ -350,6 +364,7 @@ public boolean shouldCache(Query query) { when(searchContext.aggregations()).thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); when(searchContext.query()).thenReturn(query); when(searchContext.bucketCollectorProcessor()).thenReturn(new BucketCollectorProcessor()); + when(searchContext.asLocalBucketCountThresholds(any())).thenCallRealMethod(); /* * Always use the circuit breaking big arrays instance so that the CircuitBreakerService * we're passed gets a chance to break. @@ -431,7 +446,6 @@ protected QueryShardContext queryShardContextMock( CircuitBreakerService circuitBreakerService, BigArrays bigArrays ) { - return new QueryShardContext( 0, indexSettings, @@ -684,7 +698,7 @@ protected static IndexReader maybeWrapReaderEs(DirectoryReader reader) throws IO * Implementors should return a list of {@link ValuesSourceType} that the aggregator supports. * This is used to test the matrix of supported/unsupported field types against the aggregator * and verify it works (or doesn't) as expected. - * + * <p> * If this method is implemented, {@link AggregatorTestCase#createAggBuilderForTypeTest(MappedFieldType, String)} * should be implemented as well. * @@ -701,7 +715,7 @@ protected List<ValuesSourceType> getSupportedValuesSourceTypes() { * The field type and name are provided, and the implementor is expected to return an AggBuilder accordingly. * The AggBuilder should be returned even if the aggregation does not support the field type, because * the test will check if an exception is thrown in that case. - * + * <p> * The list of supported types are provided by {@link AggregatorTestCase#getSupportedValuesSourceTypes()}, * which must also be implemented. * @@ -719,7 +733,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy * A method that allows implementors to specifically denylist particular field types (based on their content_name). * This is needed in some areas where the ValuesSourceType is not granular enough, for example integer values * vs floating points, or `keyword` bytes vs `binary` bytes (which are not searchable) - * + * <p> * This is a denylist instead of an allowlist because there are vastly more field types than ValuesSourceTypes, * and it's expected that these unsupported cases are exceptional rather than common */ @@ -733,7 +747,7 @@ protected List<String> unsupportedMappedFieldTypes() { * is provided by the implementor class, and it is executed against each field type in turn. If * an exception is thrown when the field is supported, that will fail the test. Similarly, if * an exception _is not_ thrown when a field is unsupported, that will also fail the test. - * + * <p> * Exception types/messages are not currently checked, just presence/absence of an exception. */ public void testSupportedFieldTypes() throws IOException { @@ -759,7 +773,8 @@ public void testSupportedFieldTypes() throws IOException { source.put("type", mappedType.getKey()); // Text is the only field that doesn't support DVs, instead FD - if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false) { + if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false + && mappedType.getKey().equals(MatchOnlyTextFieldMapper.CONTENT_TYPE) == false) { source.put("doc_values", "true"); } @@ -824,7 +839,7 @@ private ValuesSourceType fieldToVST(MappedFieldType fieldType) { /** * Helper method to write a single document with a single value specific to the requested fieldType. - * + * <p> * Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without * being tested. */ @@ -1058,6 +1073,11 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } @@ -1088,6 +1108,89 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } + /** + * Wrapper around Aggregator class + * Maintains a count for times collect() is invoked - number of documents visited + */ + protected static class CountingAggregator extends Aggregator { + private final AtomicInteger collectCounter; + public final Aggregator delegate; + + public CountingAggregator(AtomicInteger collectCounter, TermsAggregator delegate) { + this.collectCounter = collectCounter; + this.delegate = delegate; + } + + public AtomicInteger getCollectCount() { + return collectCounter; + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public SearchContext context() { + return delegate.context(); + } + + @Override + public Aggregator parent() { + return delegate.parent(); + } + + @Override + public Aggregator subAggregator(String name) { + return delegate.subAggregator(name); + } + + @Override + public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { + return delegate.buildAggregations(owningBucketOrds); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return delegate.buildEmptyAggregation(); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + delegate.getLeafCollector(ctx).collect(doc, bucket); + collectCounter.incrementAndGet(); + } + }; + } + + @Override + public ScoreMode scoreMode() { + return delegate.scoreMode(); + } + + @Override + public void preCollection() throws IOException { + delegate.preCollection(); + } + + @Override + public void postCollection() throws IOException { + delegate.postCollection(); + } + + public void setWeight(Weight weight) { + this.delegate.setWeight(weight); + } + } + public static class InternalAggCardinality extends InternalAggregation { private final CardinalityUpperBound cardinality; diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/BaseAggregationTestCase.java index 89d705c8fd1e8..39209f037300c 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/BaseAggregationTestCase.java @@ -32,15 +32,15 @@ package org.opensearch.search.aggregations; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.AbstractBuilderTestCase; @@ -67,7 +67,7 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil public void testFromXContent() throws IOException { AB testAgg = createTestAggregatorBuilder(); AggregatorFactories.Builder factoriesBuilder = AggregatorFactories.builder().addAggregator(testAgg); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -91,7 +91,7 @@ public void testFromXContentMulti() throws IOException { factoriesBuilder.addAggregator(testAgg); } - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } @@ -138,8 +138,8 @@ public void testSerializationMulti() throws IOException { */ public void testToString() throws IOException { AB testAgg = createTestAggregatorBuilder(); - String toString = randomBoolean() ? Strings.toString(XContentType.JSON, testAgg) : testAgg.toString(); - XContentParser parser = createParser(XContentType.JSON.xContent(), toString); + String toString = randomBoolean() ? Strings.toString(MediaTypeRegistry.JSON, testAgg) : testAgg.toString(); + XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), toString); AggregationBuilder newAgg = parse(parser); assertNotSame(newAgg, testAgg); assertEquals(testAgg, newAgg); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/BasePipelineAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/BasePipelineAggregationTestCase.java index fdbe7d73172af..2f589e80c22e2 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/BasePipelineAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/BasePipelineAggregationTestCase.java @@ -32,28 +32,18 @@ package org.opensearch.search.aggregations; -import static java.util.Collections.emptyList; -import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; -import static org.hamcrest.Matchers.hasSize; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.env.Environment; import org.opensearch.indices.IndicesModule; import org.opensearch.plugins.SearchPlugin; @@ -63,6 +53,16 @@ import org.opensearch.test.AbstractQueryTestCase; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.hasSize; + public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelineAggregationBuilder<AF>> extends OpenSearchTestCase { protected static final String STRING_FIELD_NAME = "mapped_string"; @@ -140,7 +140,7 @@ public void testFromXContent() throws IOException { AF testAgg = createTestAggregatorFactory(); AggregatorFactories.Builder factoriesBuilder = AggregatorFactories.builder().addPipelineAggregator(testAgg); logger.info("Content string: {}", factoriesBuilder); - XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.values())); if (randomBoolean()) { builder.prettyPrint(); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/InternalSingleBucketAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/InternalSingleBucketAggregationTestCase.java index 589590c07e873..f3d7c0910179b 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/InternalSingleBucketAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/InternalSingleBucketAggregationTestCase.java @@ -32,9 +32,9 @@ package org.opensearch.search.aggregations; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.aggregations.bucket.InternalSingleBucketAggregation; import org.opensearch.search.aggregations.bucket.ParsedSingleBucketAggregation; @@ -51,7 +51,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; public abstract class InternalSingleBucketAggregationTestCase<T extends InternalSingleBucketAggregation> extends diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java index 75192e276982e..0b44fe447d6f8 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java @@ -32,16 +32,35 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public abstract class AbstractTermsTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractTermsTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public AbstractTermsTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java index 66d167499b2cf..466e4d1bf1742 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -14,6 +14,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; @@ -33,26 +34,29 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.junit.After; -import org.junit.Before; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.text.Text; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; +import org.opensearch.core.common.text.Text; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.opensearch.search.aggregations.bucket.composite.InternalComposite; import org.opensearch.test.IndexSettingsModule; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.net.InetAddress; @@ -139,12 +143,16 @@ protected void executeTestCase( boolean useIndexSort, Query query, List<Map<String, List<Object>>> dataset, - Supplier<CompositeAggregationBuilder> create, + Supplier<? extends AggregationBuilder> create, Consumer<InternalComposite> verify ) throws IOException { Map<String, MappedFieldType> types = FIELD_TYPES.stream().collect(Collectors.toMap(MappedFieldType::name, Function.identity())); - CompositeAggregationBuilder aggregationBuilder = create.get(); - Sort indexSort = useIndexSort ? buildIndexSort(aggregationBuilder.sources(), types) : null; + AggregationBuilder aggregationBuilder = create.get(); + Sort indexSort = null; + if (aggregationBuilder instanceof CompositeAggregationBuilder && useIndexSort) { + CompositeAggregationBuilder cab = (CompositeAggregationBuilder) aggregationBuilder; + indexSort = buildIndexSort(cab.sources(), types); + } IndexSettings indexSettings = createIndexSettings(indexSort); try (Directory directory = newDirectory()) { IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())); @@ -180,14 +188,16 @@ protected void executeTestCase( } try (IndexReader indexReader = DirectoryReader.open(directory)) { IndexSearcher indexSearcher = new IndexSearcher(indexReader); - InternalComposite composite = searchAndReduce( + InternalAggregation aggregation = searchAndReduce( indexSettings, indexSearcher, query, aggregationBuilder, FIELD_TYPES.toArray(new MappedFieldType[0]) ); - verify.accept(composite); + if (aggregation instanceof InternalComposite) { + verify.accept((InternalComposite) aggregation); + } } } } @@ -196,6 +206,12 @@ protected void addToDocument(int id, Document doc, Map<String, List<Object>> key doc.add(new StringField("id", Integer.toString(id), Field.Store.NO)); for (Map.Entry<String, List<Object>> entry : keys.entrySet()) { final String name = entry.getKey(); + if (name.equals(DocCountFieldMapper.NAME)) { + doc.add(new IntPoint(name, (int) entry.getValue().get(0))); + // doc count field should be DocValuesType.NUMERIC + doc.add(new NumericDocValuesField(name, (int) entry.getValue().get(0))); + continue; + } for (Object value : entry.getValue()) { if (value instanceof Integer) { doc.add(new SortedNumericDocValuesField(name, (int) value)); @@ -305,6 +321,6 @@ protected static Map<String, Object> createAfterKey(Object... fields) { } protected static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java index a4f6b97115bb0..8c2cefa89c860 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -31,18 +31,37 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractNumericTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractNumericTestCase extends ParameterizedStaticSettingsOpenSearchIntegTestCase { protected static long minValue, maxValue, minValues, maxValues; + public AbstractNumericTestCase(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java index ba3653d0b4a84..af06b1688dca2 100644 --- a/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java +++ b/test/framework/src/main/java/org/opensearch/search/backpressure/SearchBackpressureTestHelpers.java @@ -8,8 +8,8 @@ package org.opensearch.search.backpressure; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; import org.opensearch.tasks.CancellableTask; -import org.opensearch.tasks.TaskResourceUsage; import org.opensearch.test.OpenSearchTestCase; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index e3b1c7af6891c..0ee889af5ce1a 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.snapshots; import org.opensearch.Version; -import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.index.IndexRequestBuilder; @@ -49,20 +48,21 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.compress.CompressorType; import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.IndexModule; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.indices.replication.common.ReplicationType; @@ -331,6 +331,12 @@ public static void blockNodeOnAnyFiles(String repository, String nodeName) { ); } + public static void blockNodeOnAnySegmentFile(String repository, String nodeName) { + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnSegmentFiles( + true + ); + } + public static void blockDataNode(String repository, String nodeName) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnDataFiles(true); } @@ -417,7 +423,7 @@ protected Settings.Builder randomRepositorySettings() { final boolean compress = randomBoolean(); settings.put("location", randomRepoPath()).put("compress", compress); if (compress) { - settings.put("compression_type", randomFrom(CompressorType.values())); + settings.put("compression_type", randomFrom(CompressorRegistry.registeredCompressors().keySet())); } if (rarely()) { settings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); @@ -466,7 +472,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString()) + jsonBuilder.toString().replace(Version.CURRENT.toString(), version.toString()) ), repositoryData.getGenId() ); @@ -480,11 +486,12 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version protected SnapshotInfo createFullSnapshot(String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] in [{}]", snapshotName, repoName); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setIncludeGlobalState(true) + final CreateSnapshotResponse response = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(true) .get(); - final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + final SnapshotInfo snapshotInfo = response.getSnapshotInfo(); assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards())); assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); return snapshotInfo; @@ -498,8 +505,8 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) .setWaitForCompletion(true) .get(); + SnapshotInfo snapshotInfo = response.getSnapshotInfo(); - final SnapshotInfo snapshotInfo = response.getSnapshotInfo(); assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS)); assertThat(snapshotInfo.successfulShards(), greaterThan(0)); assertThat(snapshotInfo.failedShards(), equalTo(0)); @@ -523,7 +530,7 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce assertDocCount(index, numdocs); } - protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + protected Settings getRemoteStoreBackedIndexSettings() { return Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") .put("index.refresh_interval", "300s") @@ -531,9 +538,6 @@ protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) .build(); } @@ -560,6 +564,11 @@ protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String rem .prepareGetSettings(remoteStoreIndex) .get() .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); + } + + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) + throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index 7a7c4bd448c55..72c4ba44d0a31 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -33,6 +33,7 @@ package org.opensearch.snapshots.mockstore; import com.carrotsearch.randomizedtesting.RandomizedContext; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; @@ -138,6 +139,8 @@ public long getFailureCount() { private volatile boolean blockOnDataFiles; + private volatile boolean blockOnSegmentFiles; + private volatile boolean blockOnDeleteIndexN; /** @@ -189,6 +192,7 @@ public MockRepository( maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnAnyFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); + blockOnSegmentFiles = metadata.settings().getAsBoolean("block_on_segment", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); @@ -236,6 +240,7 @@ public synchronized void unblock() { blocked = false; // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; + blockOnSegmentFiles = false; blockOnAnyFiles = false; blockAndFailOnWriteIndexFile = false; blockOnWriteIndexFile = false; @@ -258,6 +263,14 @@ public void setBlockOnAnyFiles(boolean blocked) { blockOnAnyFiles = blocked; } + public void blockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + + public void setBlockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + public void setBlockAndFailOnWriteSnapFiles(boolean blocked) { blockAndFailOnWriteSnapFile = blocked; } @@ -305,6 +318,7 @@ private synchronized boolean blockExecution() { boolean wasBlocked = false; try { while (blockOnDataFiles + || blockOnSegmentFiles || blockOnAnyFiles || blockAndFailOnWriteIndexFile || blockOnWriteIndexFile @@ -406,6 +420,8 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException { blockExecutionAndMaybeWait(blobName); } else if (blobName.startsWith("snap-") && blockAndFailOnWriteSnapFile) { blockExecutionAndFail(blobName); + } else if (blockOnSegmentFiles && blobName.contains(".si__")) { + blockExecutionAndMaybeWait(blobName); } } } diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractBroadcastResponseTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractBroadcastResponseTestCase.java index af33d5b66226e..61621c851bcb0 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractBroadcastResponseTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractBroadcastResponseTestCase.java @@ -33,16 +33,17 @@ package org.opensearch.test; import org.opensearch.OpenSearchException; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -150,7 +151,7 @@ public void testFailuresDeduplication() throws IOException { public void testToXContent() { T response = createTestInstance(10, 10, 0, null); - String output = Strings.toString(XContentType.JSON, response); + String output = Strings.toString(MediaTypeRegistry.JSON, response); assertEquals("{\"_shards\":{\"total\":10,\"successful\":10,\"failed\":0}}", output); } } diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java index 527e04b6a4eb5..7a8cf4963c4f1 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java @@ -35,10 +35,10 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Accountable; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; @@ -47,20 +47,22 @@ import org.opensearch.action.termvectors.MultiTermVectorsResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Strings; import org.opensearch.common.compress.CompressedXContent; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.util.BigArrays; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.cache.bitset.BitsetFilterCache; @@ -68,12 +70,12 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.node.InternalSettingsPreparer; @@ -317,6 +319,20 @@ protected static QueryShardContext createShardContext() { return createShardContext(null); } + protected static QueryBuilderVisitor createTestVisitor(List<QueryBuilder> visitedQueries) { + return new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + visitedQueries.add(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + } + private static class ClientInvocationHandler implements InvocationHandler { AbstractBuilderTestCase delegate; @@ -434,42 +450,40 @@ public void onRemoval(ShardId shardId, Accountable accountable) { mapperService.merge( "_doc", new CompressedXContent( - Strings.toString( - PutMappingRequest.simpleMapping( - TEXT_FIELD_NAME, - "type=text", - KEYWORD_FIELD_NAME, - "type=keyword", - TEXT_ALIAS_FIELD_NAME, - "type=alias,path=" + TEXT_FIELD_NAME, - INT_FIELD_NAME, - "type=integer", - INT_ALIAS_FIELD_NAME, - "type=alias,path=" + INT_FIELD_NAME, - INT_RANGE_FIELD_NAME, - "type=integer_range", - DOUBLE_FIELD_NAME, - "type=double", - BOOLEAN_FIELD_NAME, - "type=boolean", - DATE_NANOS_FIELD_NAME, - "type=date_nanos", - DATE_FIELD_NAME, - "type=date", - DATE_ALIAS_FIELD_NAME, - "type=alias,path=" + DATE_FIELD_NAME, - DATE_RANGE_FIELD_NAME, - "type=date_range", - OBJECT_FIELD_NAME, - "type=object", - GEO_POINT_FIELD_NAME, - "type=geo_point", - GEO_POINT_ALIAS_FIELD_NAME, - "type=alias,path=" + GEO_POINT_FIELD_NAME, - GEO_SHAPE_FIELD_NAME, - "type=geo_shape" - ) - ) + PutMappingRequest.simpleMapping( + TEXT_FIELD_NAME, + "type=text", + KEYWORD_FIELD_NAME, + "type=keyword", + TEXT_ALIAS_FIELD_NAME, + "type=alias,path=" + TEXT_FIELD_NAME, + INT_FIELD_NAME, + "type=integer", + INT_ALIAS_FIELD_NAME, + "type=alias,path=" + INT_FIELD_NAME, + INT_RANGE_FIELD_NAME, + "type=integer_range", + DOUBLE_FIELD_NAME, + "type=double", + BOOLEAN_FIELD_NAME, + "type=boolean", + DATE_NANOS_FIELD_NAME, + "type=date_nanos", + DATE_FIELD_NAME, + "type=date", + DATE_ALIAS_FIELD_NAME, + "type=alias,path=" + DATE_FIELD_NAME, + DATE_RANGE_FIELD_NAME, + "type=date_range", + OBJECT_FIELD_NAME, + "type=object", + GEO_POINT_FIELD_NAME, + "type=geo_point", + GEO_POINT_ALIAS_FIELD_NAME, + "type=alias,path=" + GEO_POINT_FIELD_NAME, + GEO_SHAPE_FIELD_NAME, + "type=geo_shape" + ).toString() ), MapperService.MergeReason.MAPPING_UPDATE ); diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java index f5d358a162bd1..10e782e6af8da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java @@ -41,8 +41,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support, + * <p> + * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization, XContent-based serialization and is diffable. * Writable serialization, XContent-based serialization and is diffable. */ public abstract class AbstractDiffableSerializationTestCase<T extends Diffable<T> & ToXContent> extends AbstractSerializingTestCase<T> { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java index ff7c39cd8f102..3f97f0704d733 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java @@ -40,8 +40,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support, + * <p> + * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization and is diffable. * Writable serialization and is diffable. */ public abstract class AbstractDiffableWireSerializationTestCase<T extends Diffable<T>> extends AbstractWireSerializingTestCase<T> { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index 096b265d81019..afd93e1b72fbb 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -41,26 +41,27 @@ import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.core.common.ParsingException; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable.Reader; -import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentGenerator; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.index.query.AbstractQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; @@ -212,7 +213,7 @@ public void testUnknownObjectException() throws IOException { /** * Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each * object encountered. - * + * <p> * For instance given the following valid term query: * { * "term" : { @@ -221,7 +222,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + * <p> * The following two mutations will be generated, and an exception is expected when trying to parse them: * { * "term" : { @@ -232,7 +233,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + * <p> * { * "term" : { * "field" : { @@ -242,7 +243,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + * <p> * Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the * arbitraryMarkers parameter. @@ -262,7 +263,7 @@ static List<Tuple<String, Boolean>> alterateQueries(Set<String> queries, Set<Str BytesStreamOutput out = new BytesStreamOutput(); try ( - XContentGenerator generator = XContentType.JSON.xContent().createGenerator(out); + XContentGenerator generator = MediaTypeRegistry.JSON.xContent().createGenerator(out); XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, @@ -408,7 +409,7 @@ private void assertParsedQuery(XContentParser parser, QueryBuilder expectedQuery } protected QueryBuilder parseQuery(AbstractQueryBuilder<?> builder) throws IOException { - BytesReference bytes = XContentHelper.toXContent(builder, XContentType.JSON, false); + BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(builder, MediaTypeRegistry.JSON, false); return parseQuery(createParser(JsonXContent.jsonXContent, bytes)); } @@ -633,11 +634,11 @@ public QB mutateInstance(QB instance) throws IOException { public void testValidOutput() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) { QB testQuery = createTestQueryBuilder(); - XContentType xContentType = XContentType.JSON; - String toString = Strings.toString(XContentType.JSON, testQuery); - assertParsedQuery(createParser(xContentType.xContent(), toString), testQuery); - BytesReference bytes = XContentHelper.toXContent(testQuery, xContentType, false); - assertParsedQuery(createParser(xContentType.xContent(), bytes), testQuery); + MediaType mediaType = MediaTypeRegistry.JSON; + String toString = Strings.toString(MediaTypeRegistry.JSON, testQuery); + assertParsedQuery(createParser(mediaType.xContent(), toString), testQuery); + BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(testQuery, mediaType, false); + assertParsedQuery(createParser(mediaType.xContent(), bytes), testQuery); } } @@ -767,7 +768,7 @@ protected static String randomMinimumShouldMatch() { /** * Call this method to check a valid json string representing the query under test against * it's generated json. - * + * <p> * Note: By the time of this writing (Nov 2015) all queries are taken from the query dsl * reference docs mirroring examples there. Here's how the queries were generated: * @@ -784,11 +785,7 @@ public static void checkGeneratedJson(String expected, QueryBuilder source) thro // now assert that we actually generate the same JSON XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); source.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals( - msg(expected, Strings.toString(builder)), - expected.replaceAll("\\s+", ""), - Strings.toString(builder).replaceAll("\\s+", "") - ); + assertEquals(msg(expected, builder.toString()), expected.replaceAll("\\s+", ""), builder.toString().replaceAll("\\s+", "")); } private static String msg(String left, String right) { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractSerializingTestCase.java index f3496aabcb292..e85a8631d8b62 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractSerializingTestCase.java @@ -32,13 +32,14 @@ package org.opensearch.test; -import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.XContent; import java.io.IOException; import java.time.Instant; @@ -75,7 +76,7 @@ public final void testFromXContent() throws IOException { * Override this method if the random instance that you build * should be aware of the {@link XContentType} used in the test. */ - protected T createXContextTestInstance(XContentType xContentType) { + protected T createXContextTestInstance(final MediaType mediaType) { return createTestInstance(); } diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java index 9a4363dc4d946..64c5d916d55d2 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java @@ -124,9 +124,9 @@ protected final T copyInstance(T instance) throws IOException { /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + * <p> * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + * <p> * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractXContentTestCase.java index a83399d902c22..bcb900aee51fb 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractXContentTestCase.java @@ -35,15 +35,15 @@ import org.opensearch.common.CheckedBiConsumer; import org.opensearch.common.CheckedBiFunction; import org.opensearch.common.CheckedFunction; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.util.function.BiConsumer; @@ -94,7 +94,7 @@ public static <T extends ToXContent> XContentTester<T> xContentTester( public static <T extends ToXContent> XContentTester<T> xContentTester( CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser, - Function<XContentType, T> instanceSupplier, + Function<MediaType, T> instanceSupplier, ToXContent.Params toXContentParams, CheckedFunction<XContentParser, T, IOException> fromXContent ) { @@ -111,8 +111,8 @@ public static <T extends ToXContent> XContentTester<T> xContentTester( */ public static class XContentTester<T> { private final CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser; - private final Function<XContentType, T> instanceSupplier; - private final CheckedBiFunction<T, XContentType, BytesReference, IOException> toXContent; + private final Function<MediaType, T> instanceSupplier; + private final CheckedBiFunction<T, MediaType, BytesReference, IOException> toXContent; private final CheckedFunction<XContentParser, T, IOException> fromXContent; private int numberOfTestRuns = NUMBER_OF_TEST_RUNS; @@ -128,8 +128,8 @@ public static class XContentTester<T> { private XContentTester( CheckedBiFunction<XContent, BytesReference, XContentParser, IOException> createParser, - Function<XContentType, T> instanceSupplier, - CheckedBiFunction<T, XContentType, BytesReference, IOException> toXContent, + Function<MediaType, T> instanceSupplier, + CheckedBiFunction<T, MediaType, BytesReference, IOException> toXContent, CheckedFunction<XContentParser, T, IOException> fromXContent ) { this.createParser = createParser; @@ -151,7 +151,7 @@ public void test() throws IOException { randomFieldsExcludeFilter, createParser ); - XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent); + XContentParser parser = createParser.apply(xContentType.xContent(), shuffledContent); T parsed = fromXContent.apply(parser); assertEqualsConsumer.accept(testInstance, parsed); if (assertToXContentEquivalence) { @@ -292,7 +292,7 @@ protected ToXContent.Params getToXContentParams() { static BytesReference insertRandomFieldsAndShuffle( BytesReference xContent, - XContentType xContentType, + MediaType mediaType, boolean supportsUnknownFields, String[] shuffleFieldsExceptions, Predicate<String> randomFieldsExcludeFilter, @@ -301,11 +301,11 @@ static BytesReference insertRandomFieldsAndShuffle( BytesReference withRandomFields; if (supportsUnknownFields) { // add a few random fields to check that the parser is lenient on new fields - withRandomFields = XContentTestUtils.insertRandomFields(xContentType, xContent, randomFieldsExcludeFilter, random()); + withRandomFields = XContentTestUtils.insertRandomFields(mediaType, xContent, randomFieldsExcludeFilter, random()); } else { withRandomFields = xContent; } - XContentParser parserWithRandonFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields); + XContentParser parserWithRandonFields = createParserFunction.apply(mediaType.xContent(), withRandomFields); return BytesReference.bytes(shuffleXContent(parserWithRandonFields, false, shuffleFieldsExceptions)); } diff --git a/test/framework/src/main/java/org/opensearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/opensearch/test/BackgroundIndexer.java index 647e42e5038f4..22ba6ed2c4224 100644 --- a/test/framework/src/main/java/org/opensearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/opensearch/test/BackgroundIndexer.java @@ -35,6 +35,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -47,8 +48,8 @@ import org.opensearch.client.Client; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.xcontent.XContentBuilder; import org.junit.Assert; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java index abfb3edc26417..67522bb618cf1 100644 --- a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java @@ -32,8 +32,9 @@ package org.opensearch.test; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.logging.log4j.Logger; + import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.ChecksumIndexInput; @@ -50,9 +51,9 @@ import java.nio.file.StandardOpenOption; import java.util.Random; -import static org.apache.lucene.tests.util.LuceneTestCase.assumeTrue; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.apache.lucene.tests.util.LuceneTestCase.assumeTrue; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -120,7 +121,7 @@ public static void corruptFile(Random random, Path... files) throws IOException } } - static void corruptAt(Path path, FileChannel channel, int position) throws IOException { + public static void corruptAt(Path path, FileChannel channel, int position) throws IOException { // read channel.position(position); long filePointer = channel.position(); diff --git a/test/framework/src/main/java/org/opensearch/test/DummyShardLock.java b/test/framework/src/main/java/org/opensearch/test/DummyShardLock.java index 82e5fa8733c42..36d6c1987dc2b 100644 --- a/test/framework/src/main/java/org/opensearch/test/DummyShardLock.java +++ b/test/framework/src/main/java/org/opensearch/test/DummyShardLock.java @@ -32,8 +32,8 @@ package org.opensearch.test; -import org.opensearch.env.ShardLock; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.ShardLock; /* * A ShardLock that does nothing... for tests only diff --git a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java index 4eb73487ac886..3eb1680069b06 100644 --- a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java @@ -41,11 +41,11 @@ import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.client.Client; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.env.Environment; import org.opensearch.http.HttpInfo; import org.opensearch.node.MockNode; diff --git a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java index eddcf9c738bb3..f698cd03c464f 100644 --- a/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java +++ b/test/framework/src/main/java/org/opensearch/test/FeatureFlagSetter.java @@ -46,6 +46,7 @@ public static synchronized void clear() { private static final Logger LOGGER = LogManager.getLogger(FeatureFlagSetter.class); private final Set<String> flags = ConcurrentCollections.newConcurrentSet(); + @SuppressWarnings("removal") @SuppressForbidden(reason = "Enables setting of feature flags") private void setFlag(String flag) { flags.add(flag); @@ -53,6 +54,7 @@ private void setFlag(String flag) { LOGGER.info("set feature_flag={}", flag); } + @SuppressWarnings("removal") @SuppressForbidden(reason = "Clears the set feature flags") private void clearAll() { for (String flag : flags) { diff --git a/test/framework/src/main/java/org/opensearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/opensearch/test/FieldMaskingReader.java index dba42b546c1a2..8dbf7514bc8e4 100644 --- a/test/framework/src/main/java/org/opensearch/test/FieldMaskingReader.java +++ b/test/framework/src/main/java/org/opensearch/test/FieldMaskingReader.java @@ -32,10 +32,10 @@ package org.opensearch.test; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.tests.index.FieldFilterLeafReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.LeafReader; +import org.apache.lucene.tests.index.FieldFilterLeafReader; import java.io.IOException; import java.util.Collections; diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index 95246e06d028c..7e29e84c847fe 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -33,21 +33,21 @@ package org.opensearch.test; import org.opensearch.common.SetOnce; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.ParseField; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.ContextParser; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.rest.action.search.RestSearchAction; @@ -175,7 +175,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonMap; -import static org.opensearch.common.xcontent.XContentHelper.toXContent; +import static org.opensearch.core.xcontent.XContentHelper.toXContent; import static org.opensearch.search.aggregations.InternalMultiBucketAggregation.countInnerBucket; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index e062c5d166f12..c2b964aa96212 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -36,6 +36,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; @@ -54,6 +55,7 @@ import org.opensearch.cluster.coordination.ClusterBootstrapService; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; @@ -65,30 +67,32 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.component.LifecycleListener; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.set.Sets; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.http.HttpServerTransport; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexingPressure; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -99,9 +103,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.recovery.RecoverySettings; @@ -151,26 +153,26 @@ import java.util.stream.IntStream; import java.util.stream.Stream; -import static org.apache.lucene.tests.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.tests.util.LuceneTestCase.rarely; import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueSeconds; import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; import static org.opensearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; import static org.opensearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; -import static org.opensearch.test.NodeRoles.onlyRoles; -import static org.opensearch.test.OpenSearchTestCase.assertBusy; -import static org.opensearch.test.OpenSearchTestCase.randomFrom; import static org.opensearch.test.NodeRoles.dataOnlyNode; import static org.opensearch.test.NodeRoles.noRoles; import static org.opensearch.test.NodeRoles.onlyRole; +import static org.opensearch.test.NodeRoles.onlyRoles; import static org.opensearch.test.NodeRoles.removeRoles; +import static org.opensearch.test.OpenSearchTestCase.assertBusy; +import static org.opensearch.test.OpenSearchTestCase.randomFrom; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.apache.lucene.tests.util.LuceneTestCase.TEST_NIGHTLY; +import static org.apache.lucene.tests.util.LuceneTestCase.rarely; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; @@ -1317,6 +1319,12 @@ public synchronized void validateClusterFormed() { assertTrue("Expected node to exist: " + expectedNode + debugString, discoveryNodes.nodeExists(expectedNode)); } }); + states.forEach(cs -> { + if (cs.nodes().getNodes().values().stream().findFirst().get().isRemoteStoreNode()) { + RepositoriesMetadata repositoriesMetadata = cs.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata != null && !repositoriesMetadata.repositories().isEmpty()); + } + }); }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { throw new IllegalStateException("cluster failed to form", ae); @@ -1808,7 +1816,7 @@ public synchronized void stopCurrentClusterManagerNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. */ - public synchronized void stopRandomNonClusterManagerNode() throws IOException { + public synchronized void stopRandomNodeNotCurrentClusterManager() throws IOException { NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (nodeAndClient != null) { logger.info( @@ -1833,11 +1841,46 @@ public synchronized void stopCurrentMasterNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNonClusterManagerNode()} + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNodeNotCurrentClusterManager()} */ @Deprecated - public synchronized void stopRandomNonMasterNode() throws IOException { - stopRandomNonClusterManagerNode(); + public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { + stopRandomNodeNotCurrentClusterManager(); + } + + /** + * Stops all running nodes in cluster + */ + public void stopAllNodes() { + try { + if (numDataAndClusterManagerNodes() != numClusterManagerNodes()) { + int totalDataNodes = numDataNodes(); + while (totalDataNodes > 0) { + stopRandomDataNode(); + totalDataNodes -= 1; + } + } + int totalClusterManagerNodes = numClusterManagerNodes(); + while (totalClusterManagerNodes > 1) { + stopRandomNodeNotCurrentClusterManager(); + totalClusterManagerNodes -= 1; + } + stopCurrentClusterManagerNode(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Replace all nodes by stopping all current node and starting new node. + * Used for remote store test cases, where remote state is restored. + */ + public void resetCluster() { + int totalClusterManagerNodes = numClusterManagerNodes(); + int totalDataNodes = numDataNodes(); + stopAllNodes(); + startClusterManagerOnlyNodes(totalClusterManagerNodes); + startDataOnlyNodes(totalDataNodes); } private synchronized void startAndPublishNodesAndClients(List<NodeAndClient> nodeAndClients) { @@ -2689,6 +2732,10 @@ public void ensureEstimatedStats() { false, false, false, + false, + false, + false, + false, false ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java index 7661b21c7cc0e..841cf62620f0d 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java @@ -32,9 +32,9 @@ package org.opensearch.test; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpInfo; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; diff --git a/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java index b79343c418508..e8f22a409bf09 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java +++ b/test/framework/src/main/java/org/opensearch/test/MockIndexEventListener.java @@ -38,13 +38,13 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.plugins.Plugin; diff --git a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java index c27f3f169fbae..3d829d77dd323 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java @@ -44,7 +44,7 @@ /** * Some tests rely on the keyword tokenizer, but this tokenizer isn't part of lucene-core and therefor not available * in some modules. What this test plugin does, is use the mock tokenizer and advertise that as the keyword tokenizer. - * + * <p> * Most tests that need this test plugin use normalizers. When normalizers are constructed they try to resolve the * keyword tokenizer, but if the keyword tokenizer isn't available then constructing normalizers will fail. */ diff --git a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java index 328aaf8a65b1f..6d6199833b25b 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/opensearch/test/MockLogAppender.java @@ -35,18 +35,23 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Property; import org.apache.logging.log4j.core.filter.RegexFilter; import org.opensearch.common.logging.Loggers; import org.opensearch.common.regex.Regex; +import org.opensearch.test.junit.annotations.TestLogging; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Pattern; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.fail; /** * Test appender that can be used to verify that certain events were logged correctly @@ -68,11 +73,19 @@ public class MockLogAppender extends AbstractAppender implements AutoCloseable { * write to a closed MockLogAppender instance. */ public static MockLogAppender createForLoggers(Logger... loggers) throws IllegalAccessException { - return createForLoggers(".*(\n.*)*", loggers); + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, ".*(\n.*)*", loggers); } public static MockLogAppender createForLoggers(String filter, Logger... loggers) throws IllegalAccessException { + final String callingClass = Thread.currentThread().getStackTrace()[2].getClassName(); + return createForLoggersInternal(callingClass, filter, loggers); + } + + private static MockLogAppender createForLoggersInternal(String callingClass, String filter, Logger... loggers) + throws IllegalAccessException { final MockLogAppender appender = new MockLogAppender( + callingClass + "-mock-log-appender", RegexFilter.createFilter(filter, new String[0], false, null, null), Collections.unmodifiableList(Arrays.asList(loggers)) ); @@ -83,8 +96,8 @@ public static MockLogAppender createForLoggers(String filter, Logger... loggers) return appender; } - private MockLogAppender(RegexFilter filter, List<Logger> loggers) { - super("mock", filter, null); + private MockLogAppender(String name, RegexFilter filter, List<Logger> loggers) { + super(name, filter, null, true, Property.EMPTY_ARRAY); /* * We use a copy-on-write array list since log messages could be appended while we are setting up expectations. When that occurs, * we would run into a concurrent modification exception from the iteration over the expectations in #append, concurrent with a @@ -116,7 +129,14 @@ public void close() { for (Logger logger : loggers) { Loggers.removeAppender(logger, this); } - this.stop(); + super.stop(); + } + + @Override + public void stop() { + // MockLogAppender should be used with try-with-resources to ensure + // proper clean up ordering and should never be stopped directly. + throw new UnsupportedOperationException("Use close() to ensure proper clean up ordering"); } public interface LoggingExpectation { @@ -243,6 +263,59 @@ public void assertMatched() { } + /** + * Used for cases when the logger is dynamically named such as to include an index name or shard id + * + * Best used in conjunction with the root logger: + * {@code @TestLogging(value = "_root:debug", reason = "Validate logging output");} + * @see TestLogging + * */ + public static class PatternSeenWithLoggerPrefixExpectation implements LoggingExpectation { + private final String expectationName; + private final String loggerPrefix; + private final Level level; + private final String messageMatchingRegex; + + private final List<String> loggerMatches = new ArrayList<>(); + private final AtomicBoolean eventSeen = new AtomicBoolean(false); + + public PatternSeenWithLoggerPrefixExpectation( + final String expectationName, + final String loggerPrefix, + final Level level, + final String messageMatchingRegex + ) { + this.expectationName = expectationName; + this.loggerPrefix = loggerPrefix; + this.level = level; + this.messageMatchingRegex = messageMatchingRegex; + } + + @Override + public void match(final LogEvent event) { + if (event.getLevel() == level && event.getLoggerName().startsWith(loggerPrefix)) { + final String formattedMessage = event.getMessage().getFormattedMessage(); + loggerMatches.add(formattedMessage); + if (formattedMessage.matches(messageMatchingRegex)) { + eventSeen.set(true); + } + } + } + + @Override + public void assertMatched() { + if (!eventSeen.get()) { + final StringBuilder failureMessage = new StringBuilder(); + failureMessage.append(expectationName + " was not seen, found " + loggerMatches.size() + " messages matching the logger."); + failureMessage.append("\r\nMessage matching regex: " + messageMatchingRegex); + if (!loggerMatches.isEmpty()) { + failureMessage.append("\r\nMessage details:\r\n" + String.join("\r\n", loggerMatches)); + } + fail(failureMessage.toString()); + } + } + } + private static String getLoggerName(String name) { if (name.startsWith("org.opensearch.")) { name = name.substring("org.opensearch.".length()); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 3564bd667ee2b..4d013229eb3b2 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -32,23 +32,21 @@ package org.opensearch.test; -import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.hc.core5.http.HttpHost; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -70,14 +68,13 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.WriteRequest; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.ClusterAdminClient; import org.opensearch.client.Requests; import org.opensearch.client.RestClient; -import org.opensearch.client.RestClientBuilder; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.OpenSearchNodeCommand; @@ -95,74 +92,81 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.FeatureFlagSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.smile.SmileXContent; -import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; -import org.opensearch.http.HttpInfo; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; +import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.IndicesStore; import org.opensearch.monitor.os.OsInfo; import org.opensearch.node.NodeMocksPlugin; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.script.MockScriptService; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchService; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.client.RandomizingClient; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; -import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportService; import org.hamcrest.Matchers; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Rule; import java.io.IOException; import java.lang.Runtime.Version; @@ -183,13 +187,12 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.IdentityHashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Random; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -200,17 +203,24 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.unit.TimeValue.timeValueMillis; -import static org.opensearch.common.util.CollectionUtils.eagerPartition; +import static org.opensearch.core.common.util.CollectionUtils.eagerPartition; import static org.opensearch.discovery.DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING; import static org.opensearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.XContentTestUtils.convertToMap; import static org.opensearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -271,6 +281,17 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String SYSPROP_THIRDPARTY = "tests.thirdparty"; + /** + * The lucene_default {@link Codec} is not added to the list as it internally maps to Asserting {@link Codec}. + * The override to fetch the {@link CompletionFieldMapper.CompletionFieldType} postings format is not available for this codec. + */ + public static final List<String> CODECS = List.of( + CodecService.DEFAULT_CODEC, + CodecService.LZ4, + CodecService.BEST_COMPRESSION_CODEC, + CodecService.ZLIB + ); + /** * Annotation for third-party integration tests. * <p> @@ -317,6 +338,10 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; private static final boolean MOCK_MODULES_ENABLED = "true".equals(System.getProperty(TESTS_ENABLE_MOCK_MODULES, "true")); + + @Rule + public static OpenSearchTestClusterRule testClusterRule = new OpenSearchTestClusterRule(); + /** * Threshold at which indexing switches from frequently async to frequently bulk. */ @@ -352,22 +377,9 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; - /** - * The current cluster depending on the configured {@link Scope}. - * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. - */ - private static TestCluster currentCluster; - private static RestClient restClient = null; - - private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); - - private static OpenSearchIntegTestCase INSTANCE = null; // see @SuiteScope - private static Long SUITE_SEED = null; - @BeforeClass public static void beforeClass() throws Exception { - SUITE_SEED = randomLong(); - initializeSuiteScope(); + testClusterRule.beforeClass(); } @Override @@ -377,36 +389,6 @@ protected final boolean enableWarningsCheck() { return false; } - protected final void beforeInternal() throws Exception { - final Scope currentClusterScope = getCurrentClusterScope(); - Callable<Void> setup = () -> { - cluster().beforeTest(random()); - cluster().wipe(excludeTemplates()); - randomIndexTemplate(); - return null; - }; - switch (currentClusterScope) { - case SUITE: - assert SUITE_SEED != null : "Suite seed was not initialized"; - currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED); - RandomizedContext.current().runWithPrivateRandomness(SUITE_SEED, setup); - break; - case TEST: - currentCluster = buildAndPutCluster(currentClusterScope, randomLong()); - setup.call(); - break; - } - - } - - private void printTestMessage(String message) { - if (isSuiteScopedTest(getClass()) && (getTestName().equals("<unknown>"))) { - logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); - } else { - logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message); - } - } - /** * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas @@ -427,7 +409,7 @@ protected void randomIndexTemplate() { // otherwise, use it, it has assertions and so on that can find bugs. SuppressCodecs annotation = getClass().getAnnotation(SuppressCodecs.class); if (annotation != null && annotation.value().length == 1 && "*".equals(annotation.value()[0])) { - randomSettingsBuilder.put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)); + randomSettingsBuilder.put("index.codec", randomFrom(CODECS)); } else { randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC); } @@ -484,7 +466,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } @@ -530,95 +512,6 @@ private static Settings.Builder setRandomIndexTranslogSettings(Random random, Se return builder; } - private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(seed, () -> buildTestCluster(scope, seed)); - } - - private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception { - final Class<?> clazz = this.getClass(); - TestCluster testCluster = clusters.remove(clazz); // remove this cluster first - clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere - switch (currentClusterScope) { - case SUITE: - if (testCluster == null) { // only build if it's not there yet - testCluster = buildWithPrivateContext(currentClusterScope, seed); - } - break; - case TEST: - // close the previous one and create a new one - IOUtils.closeWhileHandlingException(testCluster); - testCluster = buildTestCluster(currentClusterScope, seed); - break; - } - clusters.put(clazz, testCluster); - return testCluster; - } - - private static void clearClusters() throws Exception { - if (!clusters.isEmpty()) { - IOUtils.close(clusters.values()); - clusters.clear(); - } - if (restClient != null) { - restClient.close(); - restClient = null; - } - assertBusy(() -> { - int numChannels = RestCancellableNodeClient.getNumChannels(); - assertEquals( - numChannels - + " channels still being tracked in " - + RestCancellableNodeClient.class.getSimpleName() - + " while there should be none", - 0, - numChannels - ); - }); - } - - private void afterInternal(boolean afterClass) throws Exception { - boolean success = false; - try { - final Scope currentClusterScope = getCurrentClusterScope(); - if (isInternalCluster()) { - internalCluster().clearDisruptionScheme(); - } - try { - if (cluster() != null) { - if (currentClusterScope != Scope.TEST) { - Metadata metadata = client().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); - - final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); - assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); - - final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); - assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); - } - ensureClusterSizeConsistency(); - ensureClusterStateConsistency(); - ensureClusterStateCanBeReadByNodeTool(); - beforeIndexDeletion(); - cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete - if (afterClass || currentClusterScope == Scope.TEST) { - cluster().close(); - } - cluster().assertAfterTest(); - } - } finally { - if (currentClusterScope == Scope.TEST) { - clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST - } - } - success = true; - } finally { - if (!success) { - // if we failed here that means that something broke horribly so we should clear all clusters - // TODO: just let the exception happen, WTF is all this horseshit - // afterTestRule.forceFailure(); - } - } - } - /** * @return An exclude set of index templates that will not be removed in between tests. */ @@ -631,18 +524,15 @@ protected void beforeIndexDeletion() throws Exception { } public static TestCluster cluster() { - return currentCluster; + return testClusterRule.cluster(); } public static boolean isInternalCluster() { - return (currentCluster instanceof InternalTestCluster); + return testClusterRule.isInternalCluster(); } public static InternalTestCluster internalCluster() { - if (!isInternalCluster()) { - throw new UnsupportedOperationException("current test cluster is immutable"); - } - return (InternalTestCluster) currentCluster; + return testClusterRule.internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")); } public ClusterService clusterService() { @@ -654,14 +544,7 @@ public static Client client() { } public static Client client(@Nullable String node) { - if (node != null) { - return internalCluster().client(node); - } - Client client = cluster().client(); - if (frequently()) { - client = new RandomizingClient(client, random()); - } - return client; + return testClusterRule.clientForNode(node); } public static Client dataNodeClient() { @@ -764,6 +647,11 @@ public Settings indexSettings() { ); } + if (randomBoolean()) { + builder.put(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING.getKey(), true); + builder.put(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING.getKey(), randomDoubleBetween(0.01, 0.50, true)); + } + return builder.build(); } @@ -778,10 +666,38 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } + // Enabling Telemetry setting by default featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); + + // Enabling fuzzy set for tests by default + featureSettings.put(FeatureFlags.DOC_ID_FUZZY_SET_SETTING.getKey(), true); return featureSettings.build(); } + /** + * Represent if it needs to trigger remote state restore or not. + * For tests with remote store enabled domain, it will be overridden to true. + * + * @return if needs to perform remote state restore or not + */ + protected boolean triggerRemoteStateRestore() { + return false; + } + + /** + * For tests with remote cluster state, it will reset the cluster and cluster state will be + * restored from remote. + */ + protected void performRemoteStoreTestAction() { + if (triggerRemoteStateRestore()) { + String clusterUUIDBefore = clusterService().state().metadata().clusterUUID(); + internalCluster().resetCluster(); + String clusterUUIDAfter = clusterService().state().metadata().clusterUUID(); + // assertion that UUID is changed post restore. + assertFalse(clusterUUIDBefore.equals(clusterUUIDAfter)); + } + } + /** * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices * already exists this method will fail and wipe all the indices created so far. @@ -1336,7 +1252,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l /** * Ensures that all nodes in the cluster are connected to each other. - * + * <p> * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster @@ -1398,7 +1314,7 @@ protected final IndexResponse index(String index, String type, String id, Object */ @Deprecated protected final IndexResponse index(String index, String type, String id, String source) { - return client().prepareIndex(index).setId(id).setSource(source, XContentType.JSON).execute().actionGet(); + return client().prepareIndex(index).setId(id).setSource(source, MediaTypeRegistry.JSON).execute().actionGet(); } /** @@ -1449,6 +1365,18 @@ protected ForceMergeResponse forceMerge() { return actionGet; } + protected ForceMergeResponse forceMerge(int maxNumSegments) { + waitForRelocation(); + ForceMergeResponse actionGet = client().admin() + .indices() + .prepareForceMerge() + .setMaxNumSegments(maxNumSegments) + .execute() + .actionGet(); + assertNoFailures(actionGet); + return actionGet; + } + /** * Returns <code>true</code> iff the given index exists otherwise <code>false</code> */ @@ -1570,7 +1498,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma String index = RandomPicks.randomFrom(random, indices); bogusIds.add(Arrays.asList(index, id)); // We configure a routing key in case the mapping requires it - builders.add(client().prepareIndex().setIndex(index).setId(id).setSource("{}", XContentType.JSON).setRouting(id)); + builders.add(client().prepareIndex().setIndex(index).setId(id).setSource("{}", MediaTypeRegistry.JSON).setRouting(id)); } } Collections.shuffle(builders, random()); @@ -1603,6 +1531,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma for (List<IndexRequestBuilder> segmented : partition) { BulkRequestBuilder bulkBuilder = client().prepareBulk(); for (IndexRequestBuilder indexRequestBuilder : segmented) { + indexRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); bulkBuilder.add(indexRequestBuilder); } BulkResponse actionGet = bulkBuilder.execute().actionGet(); @@ -1623,6 +1552,7 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } assertThat(actualErrors, emptyIterable()); + if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List<String> doc : bogusIds) { @@ -1638,6 +1568,66 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma client().admin().indices().prepareRefresh(indicesArray).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get() ); } + if (dummyDocuments) { + indexRandomForMultipleSlices(indicesArray); + } + if (forceRefresh) { + waitForReplication(); + } + } + + /* + * This method ingests bogus documents for the given indices such that multiple slices + * are formed. This is useful for testing with the concurrent search use-case as it creates + * multiple slices based on segment count. + * @param indices the indices in which bogus documents should be ingested + * */ + protected void indexRandomForMultipleSlices(String... indices) throws InterruptedException { + Set<List<String>> bogusIds = new HashSet<>(); + int refreshCount = randomIntBetween(2, 3); + for (String index : indices) { + int numDocs = getNumShards(index).totalNumShards * randomIntBetween(2, 10); + while (refreshCount-- > 0) { + final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>(); + List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + String id = "bogus_doc_" + randomRealisticUnicodeOfLength(between(1, 10)) + dummmyDocIdGenerator.incrementAndGet(); + IndexRequestBuilder indexRequestBuilder = client().prepareIndex() + .setIndex(index) + .setId(id) + .setSource("{}", MediaTypeRegistry.JSON) + .setRouting(id); + indexRequestBuilder.execute( + new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) + ); + bogusIds.add(Arrays.asList(index, id)); + } + for (CountDownLatch operation : inFlightAsyncOperations) { + operation.await(); + } + final List<Exception> actualErrors = new ArrayList<>(); + for (Tuple<IndexRequestBuilder, Exception> tuple : errors) { + Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); + if (t instanceof OpenSearchRejectedExecutionException) { + logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); + tuple.v1().execute().actionGet(); // re-index if rejected + } else { + actualErrors.add(tuple.v2()); + } + } + assertThat(actualErrors, emptyIterable()); + refresh(index); + } + } + for (List<String> doc : bogusIds) { + assertEquals( + "failed to delete a dummy doc [" + doc.get(0) + "][" + doc.get(1) + "]", + DocWriteResponse.Result.DELETED, + client().prepareDelete(doc.get(0), doc.get(1)).setRouting(doc.get(1)).get().getResult() + ); + } + // refresh is called to make sure the bogus docs doesn't affect the search results + refresh(); } private final AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); @@ -1825,7 +1815,7 @@ public void clearScroll(String... scrollIds) { assertThat(clearResponse.isSucceeded(), equalTo(true)); } - private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { + static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { if (clazz == Object.class || clazz == OpenSearchIntegTestCase.class) { return null; } @@ -1836,16 +1826,6 @@ private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> a return getAnnotation(clazz.getSuperclass(), annotationClass); } - private Scope getCurrentClusterScope() { - return getCurrentClusterScope(this.getClass()); - } - - private static Scope getCurrentClusterScope(Class<?> clazz) { - ClusterScope annotation = getAnnotation(clazz, ClusterScope.class); - // if we are not annotated assume suite! - return annotation == null ? Scope.SUITE : annotation.scope(); - } - private boolean getSupportsDedicatedClusterManagers() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null ? true : annotation.supportsDedicatedMasters(); @@ -1888,6 +1868,7 @@ private int getNumClientNodes() { * In other words subclasses must ensure this method is idempotent. */ protected Settings nodeSettings(int nodeOrdinal) { + final Settings featureFlagSettings = featureFlagSettings(); Settings.Builder builder = Settings.builder() // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space @@ -1904,10 +1885,35 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(SearchService.LOW_LEVEL_CANCELLATION_SETTING.getKey(), randomBoolean()) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) .put(featureFlagSettings()); + + // Enable tracer only when Telemetry Setting is enabled + if (featureFlagSettings().getAsBoolean(FeatureFlags.TELEMETRY_SETTING.getKey(), false)) { + builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); + builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); + } + + // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + if (useRandomReplicationStrategy()) { + ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); + } return builder.build(); } + /** + * Used for selecting random replication strategy, either DOCUMENT or SEGMENT. + * This method must be overridden by subclass to use random replication strategy. + * Should be used only on test classes where replication strategy is not critical for tests. + */ + protected boolean useRandomReplicationStrategy() { + return false; + } + protected Path nodeConfigPath(int nodeOrdinal) { return null; } @@ -2060,6 +2066,15 @@ protected boolean addMockGeoShapeFieldMapper() { return true; } + /** + * Returns {@code true} if this test cluster should have tracing enabled with MockTelemetryPlugin + * Disabling this for now as the existing way of strict check do not support multiple nodes internal cluster. + * @return boolean. + */ + protected boolean addMockTelemetryPlugin() { + return true; + } + /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test @@ -2104,8 +2119,9 @@ protected Collection<Class<? extends Plugin>> getMockPlugins() { if (addMockGeoShapeFieldMapper()) { mocks.add(TestGeoShapeFieldMapperPlugin.class); } - mocks.add(MockTelemetryPlugin.class); - + if (addMockTelemetryPlugin()) { + mocks.add(MockTelemetryPlugin.class); + } return Collections.unmodifiableList(mocks); } @@ -2145,10 +2161,9 @@ public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler( * Returns path to a random directory that can be used to create a temporary file system repo */ public Path randomRepoPath() { - if (currentCluster instanceof InternalTestCluster) { - return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings()); - } - throw new UnsupportedOperationException("unsupported cluster type"); + return testClusterRule.internalCluster() + .map(c -> randomRepoPath(c.getDefaultSettings())) + .orElseThrow(() -> new UnsupportedOperationException("unsupported cluster type")); } /** @@ -2222,78 +2237,9 @@ private NumShards(int numPrimaries, int numReplicas) { } } - private static boolean runTestScopeLifecycle() { - return INSTANCE == null; - } - - @Before - public final void setupTestCluster() throws Exception { - if (runTestScopeLifecycle()) { - printTestMessage("setting up"); - beforeInternal(); - printTestMessage("all set up"); - } - } - - @After - public final void cleanUpCluster() throws Exception { - // Deleting indices is going to clear search contexts implicitly so we - // need to check that there are no more in-flight search contexts before - // we remove indices - if (isInternalCluster()) { - internalCluster().setBootstrapClusterManagerNodeIndex(-1); - } - super.ensureAllSearchContextsReleased(); - if (runTestScopeLifecycle()) { - printTestMessage("cleaning up after"); - afterInternal(false); - printTestMessage("cleaned up after"); - } - } - @AfterClass public static void afterClass() throws Exception { - try { - if (runTestScopeLifecycle()) { - clearClusters(); - } else { - INSTANCE.printTestMessage("cleaning up after"); - INSTANCE.afterInternal(true); - checkStaticState(true); - } - } finally { - SUITE_SEED = null; - currentCluster = null; - INSTANCE = null; - } - } - - private static void initializeSuiteScope() throws Exception { - Class<?> targetClass = getTestClass(); - /** - * Note we create these test class instance via reflection - * since JUnit creates a new instance per test and that is also - * the reason why INSTANCE is static since this entire method - * must be executed in a static context. - */ - assert INSTANCE == null; - if (isSuiteScopedTest(targetClass)) { - // note we need to do this this way to make sure this is reproducible - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); - boolean success = false; - try { - INSTANCE.printTestMessage("setup"); - INSTANCE.beforeInternal(); - INSTANCE.setupSuiteScopeCluster(); - success = true; - } finally { - if (!success) { - afterClass(); - } - } - } else { - INSTANCE = null; - } + testClusterRule.afterClass(); } /** @@ -2325,41 +2271,8 @@ protected boolean forbidPrivateIndexSettings() { * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise * it cannot be reused by other tests anymore. */ - protected static synchronized RestClient getRestClient() { - if (restClient == null) { - restClient = createRestClient(); - } - return restClient; - } - - protected static RestClient createRestClient() { - return createRestClient(null, "http"); - } - - protected static RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { - NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().get(); - assertFalse(nodesInfoResponse.hasFailures()); - return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); - } - - protected static RestClient createRestClient( - final List<NodeInfo> nodes, - RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, - String protocol - ) { - List<HttpHost> hosts = new ArrayList<>(); - for (NodeInfo node : nodes) { - if (node.getInfo(HttpInfo.class) != null) { - TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); - InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); - } - } - RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); - if (httpClientConfigCallback != null) { - builder.setHttpClientConfigCallback(httpClientConfigCallback); - } - return builder.build(); + protected static RestClient getRestClient() { + return testClusterRule.getRestClient(); } /** @@ -2370,10 +2283,6 @@ protected static RestClient createRestClient( */ protected void setupSuiteScopeCluster() throws Exception {} - private static boolean isSuiteScopedTest(Class<?> clazz) { - return clazz.getAnnotation(SuiteScopeTestCase.class) != null; - } - /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests @@ -2482,4 +2391,210 @@ protected ClusterState getClusterState() { return client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); } + /** + * Refreshes the indices in the cluster and waits until active/started replica shards + * are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected RefreshResponse refreshAndWaitForReplication(String... indices) { + RefreshResponse refreshResponse = refresh(indices); + waitForReplication(); + return refreshResponse; + } + + /** + * Waits until active/started replica shards are caught up with primary shard only when Segment Replication is enabled. + * This doesn't wait for inactive/non-started replica shards to become active/started. + */ + protected void waitForReplication(String... indices) { + if (indices.length == 0) { + indices = getClusterState().routingTable().indicesRouting().keySet().toArray(String[]::new); + } + try { + for (String index : indices) { + if (isSegmentReplicationEnabledForIndex(index)) { + if (isInternalCluster()) { + IndexRoutingTable indexRoutingTable = getClusterState().routingTable().index(index); + if (indexRoutingTable != null) { + assertBusy(() -> { + for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) { + final ShardRouting primaryRouting = shardRoutingTable.primaryShard(); + if (primaryRouting.state().toString().equals("STARTED")) { + if (isSegmentReplicationEnabledForIndex(index)) { + final List<ShardRouting> replicaRouting = shardRoutingTable.replicaShards(); + final IndexShard primaryShard = getIndexShard(primaryRouting, index); + for (ShardRouting replica : replicaRouting) { + if (replica.state().toString().equals("STARTED")) { + IndexShard replicaShard = getIndexShard(replica, index); + assertEquals( + "replica shards haven't caught up with primary", + getLatestSegmentInfoVersion(primaryShard), + getLatestSegmentInfoVersion(replicaShard) + ); + } + } + } + } + } + }, 30, TimeUnit.SECONDS); + } + } else { + throw new IllegalStateException( + "Segment Replication is not supported for testing tests using External Test Cluster" + ); + } + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Checks if Segment Replication is enabled on Index. + */ + protected boolean isSegmentReplicationEnabledForIndex(String index) { + return clusterService().state().getMetadata().isSegmentReplicationEnabled(index); + } + + protected IndexShard getIndexShard(ShardRouting routing, String indexName) { + return getIndexShard(getClusterState().nodes().get(routing.currentNodeId()).getName(), routing.shardId(), indexName); + } + + /** + * Fetch IndexShard by shardId, multiple shards per node allowed. + */ + protected IndexShard getIndexShard(String node, ShardId shardId, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexServiceSafe(index); + final Optional<Integer> id = indexService.shardIds().stream().filter(sid -> sid.equals(shardId.id())).findFirst(); + return indexService.getShard(id.get()); + } + + /** + * Fetch latest segment info snapshot version of an index. + */ + protected long getLatestSegmentInfoVersion(IndexShard shard) { + try (final GatedCloseable<SegmentInfos> snapshot = shard.getSegmentInfosSnapshot()) { + return snapshot.get().version; + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + ReloadableFsRepository.TYPE, + translogRepoName, + translogRepoPath, + ReloadableFsRepository.TYPE, + withRateLimiterAttributes + ); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, segmentRepoType) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, translogRepoType) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, segmentRepoType) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + return settings.build(); + } + } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 1d7c04227b208..45ea63e862df6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -32,6 +32,7 @@ package org.opensearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; + import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -45,6 +46,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.common.Priority; +import org.opensearch.common.settings.FeatureFlagSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; @@ -52,11 +55,11 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; -import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; @@ -66,9 +69,11 @@ import org.opensearch.node.NodeValidationException; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptService; +import org.opensearch.search.SearchService; import org.opensearch.search.internal.SearchContext; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.transport.TransportSettings; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -186,6 +191,7 @@ public static void setUpClass() throws Exception { @AfterClass public static void tearDownClass() throws Exception { stopNode(); + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } /** @@ -223,7 +229,8 @@ private Node newNode() { final Path tempDir = createTempDir(); final String nodeName = nodeSettings().get(Node.NODE_NAME_SETTING.getKey(), "node_s_0"); - Settings settings = Settings.builder() + final Settings featureFlagSettings = featureFlagSettings(); + Settings.Builder settingsBuilder = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", random().nextLong())) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) @@ -247,8 +254,12 @@ private Node newNode() { .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2) .put(nodeSettings()) // allow test cases to provide their own settings or override these - .build(); + .put(featureFlagSettings); Collection<Class<? extends Plugin>> plugins = getPlugins(); if (plugins.contains(getTestTransportPlugin()) == false) { @@ -259,8 +270,9 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); - Node node = new MockNode(settings, plugins, forbidPrivateIndexSettings()); + Node node = new MockNode(settingsBuilder.build(), plugins, forbidPrivateIndexSettings()); try { node.start(); } catch (NodeValidationException e) { @@ -414,4 +426,19 @@ protected boolean forbidPrivateIndexSettings() { return true; } + /** + * Setting all feature flag settings at base IT, which can be overridden later by individual + * IT classes. + * + * @return Feature flag settings. + */ + protected Settings featureFlagSettings() { + Settings.Builder featureSettings = Settings.builder(); + for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { + featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); + } + featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); + return featureSettings.build(); + } + } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 24067e08a3d89..aac3fca9e1e16 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -42,6 +42,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; + import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -64,22 +65,15 @@ import org.opensearch.bootstrap.BootstrapForTesting; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Numbers; import org.opensearch.common.SuppressForbidden; -import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteable; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.index.Index; import org.opensearch.common.joda.JodaDeprecationPatterns; import org.opensearch.common.logging.DeprecatedMessage; import org.opensearch.common.logging.HeaderWarning; @@ -89,20 +83,29 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateUtils; import org.opensearch.common.time.FormatNames; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteable; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.env.Environment; @@ -117,6 +120,7 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.plugins.AnalysisPlugin; @@ -141,6 +145,8 @@ import java.io.IOException; import java.io.InputStream; +import java.io.PrintWriter; +import java.io.StringWriter; import java.math.BigInteger; import java.net.InetAddress; import java.net.UnknownHostException; @@ -166,14 +172,17 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; import java.util.function.Consumer; +import java.util.function.Function; import java.util.function.IntFunction; import java.util.function.Predicate; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import reactor.core.scheduler.Schedulers; + import static java.util.Collections.emptyMap; -import static org.opensearch.common.util.CollectionUtils.arrayAsArrayList; +import static org.opensearch.core.common.util.CollectionUtils.arrayAsArrayList; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -222,6 +231,7 @@ public static void resetPortCounter() { @Override public void tearDown() throws Exception { + Schedulers.shutdownNow(); FeatureFlagSetter.clear(); super.tearDown(); } @@ -363,7 +373,7 @@ public static void setContentType() throws Exception { @AfterClass public static void restoreContentType() { Requests.CONTENT_TYPE = XContentType.SMILE; - Requests.INDEX_CONTENT_TYPE = XContentType.JSON; + Requests.INDEX_CONTENT_TYPE = MediaTypeRegistry.JSON; } @BeforeClass @@ -632,7 +642,32 @@ protected static void checkStaticState(boolean afterClass) throws Exception { try { // ensure that there are no status logger messages which would indicate a problem with our Log4j usage; we map the // StatusData instances to Strings as otherwise their toString output is useless + + final Function<StatusData, String> statusToString = (statusData) -> { + try (final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw)) { + + pw.print(statusData.getLevel()); + pw.print(":"); + pw.print(statusData.getMessage().getFormattedMessage()); + + if (statusData.getStackTraceElement() != null) { + final var messageSource = statusData.getStackTraceElement(); + pw.println("Source:"); + pw.println(messageSource.getFileName() + "@" + messageSource.getLineNumber()); + } + + if (statusData.getThrowable() != null) { + pw.println("Throwable:"); + statusData.getThrowable().printStackTrace(pw); + } + return sw.toString(); + } catch (IOException ioe) { + throw new RuntimeException(ioe); + } + }; + assertThat( + statusData.stream().map(statusToString::apply).collect(Collectors.joining("\r\n")), statusData.stream().map(status -> status.getMessage().getFormattedMessage()).collect(Collectors.toList()), empty() ); @@ -1089,6 +1124,38 @@ public static void assertBusy(CheckedRunnable<Exception> codeBlock, long maxWait } } + /** + * Runs the code block for the provided max wait time and sleeping for fixed sleep time, waiting for no assertions to trip. + */ + public static void assertBusyWithFixedSleepTime(CheckedRunnable<Exception> codeBlock, TimeValue maxWaitTime, TimeValue sleepTime) + throws Exception { + long maxTimeInMillis = maxWaitTime.millis(); + long sleepTimeInMillis = sleepTime.millis(); + if (sleepTimeInMillis > maxTimeInMillis) { + throw new IllegalArgumentException("sleepTime is more than the maxWaitTime"); + } + long sum = 0; + List<AssertionError> failures = new ArrayList<>(); + while (sum <= maxTimeInMillis) { + try { + codeBlock.run(); + return; + } catch (AssertionError e) { + failures.add(e); + } + sum += sleepTimeInMillis; + Thread.sleep(sleepTimeInMillis); + } + try { + codeBlock.run(); + } catch (AssertionError e) { + for (AssertionError failure : failures) { + e.addSuppressed(failure); + } + throw e; + } + } + /** * Periodically execute the supplied function until it returns true, or a timeout * is reached. This version uses a timeout of 10 seconds. If at all possible, @@ -1201,6 +1268,7 @@ public static Settings.Builder settings(Version version) { public static Settings.Builder remoteIndexSettings(Version version) { Settings.Builder builder = Settings.builder() + .put(FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING.getKey(), 5) .put(IndexMetadata.SETTING_VERSION_CREATED, version) .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()); return builder; @@ -1309,7 +1377,7 @@ protected final BytesReference toShuffledXContent( boolean humanReadable, String... exceptFieldNames ) throws IOException { - BytesReference bytes = XContentHelper.toXContent(toXContent, mediaType, params, humanReadable); + BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(toXContent, mediaType, params, humanReadable); try (XContentParser parser = createParser(mediaType.xContent(), bytes)) { try (XContentBuilder builder = shuffleXContent(parser, rarely(), exceptFieldNames)) { return BytesReference.bytes(builder); @@ -1337,7 +1405,7 @@ protected final XContentBuilder shuffleXContent(XContentBuilder builder, String. */ public static XContentBuilder shuffleXContent(XContentParser parser, boolean prettyPrint, String... exceptFieldNames) throws IOException { - XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()); + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(parser.contentType()); if (prettyPrint) { xContentBuilder.prettyPrint(); } @@ -1540,6 +1608,13 @@ protected NamedWriteableRegistry writableRegistry() { return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); } + /** + * The {@link PersistedStateRegistry} to use for this test. Subclasses should override and use liberally. + */ + protected PersistedStateRegistry persistedStateRegistry() { + return new PersistedStateRegistry(); + } + /** * Create a "mock" script for use either with {@link MockScriptEngine} or anywhere where you need a script but don't really care about * its contents. diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java new file mode 100644 index 0000000000000..57e9ccf22ab43 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestClusterRule.java @@ -0,0 +1,428 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.RandomizedContext; + +import org.apache.hc.core5.http.HttpHost; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.client.Client; +import org.opensearch.client.RestClient; +import org.opensearch.client.RestClientBuilder; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpInfo; +import org.opensearch.rest.action.RestCancellableNodeClient; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.OpenSearchIntegTestCase.SuiteScopeTestCase; +import org.opensearch.test.client.RandomizingClient; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.MultipleFailureException; +import org.junit.runners.model.Statement; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; + +import static org.hamcrest.Matchers.empty; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; + +/** + * The JUnit {@link MethodRule} that handles test method scoped and test suite scoped clusters for integration (internal cluster) tests. There rule is + * injected into {@link OpenSearchIntegTestCase} that every integration test suite should be subclassing. In case of the parameterized test suites, + * please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} or {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, depending + * on the way cluster settings are being managed. + */ +class OpenSearchTestClusterRule implements MethodRule { + // Maps each TestCluster instance to the exact test suite instance that triggered its creation + private final Map<TestCluster, OpenSearchIntegTestCase> suites = new IdentityHashMap<>(); + private final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); + private final Logger logger = LogManager.getLogger(getClass()); + + /** + * The current cluster depending on the configured {@link Scope}. + * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. + */ + private TestCluster currentCluster = null; + private RestClient restClient = null; + + private OpenSearchIntegTestCase suiteInstance = null; // see @SuiteScope + private Long suiteSeed = null; + + @Override + public Statement apply(Statement base, FrameworkMethod method, Object target) { + return statement(base, method, target); + } + + void beforeClass() throws Exception { + suiteSeed = OpenSearchTestCase.randomLong(); + } + + void afterClass() throws Exception { + try { + if (runTestScopeLifecycle()) { + clearClusters(); + } else { + printTestMessage("cleaning up after"); + afterInternal(true, null); + OpenSearchTestCase.checkStaticState(true); + synchronized (clusters) { + final TestCluster cluster = clusters.remove(getTestClass()); + IOUtils.closeWhileHandlingException(cluster); + if (cluster != null) { + suites.remove(cluster); + } + } + } + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); + } finally { + suiteSeed = null; + currentCluster = null; + suiteInstance = null; + } + } + + TestCluster cluster() { + return currentCluster; + } + + boolean isInternalCluster() { + return (cluster() instanceof InternalTestCluster); + } + + Optional<InternalTestCluster> internalCluster() { + if (!isInternalCluster()) { + return Optional.empty(); + } else { + return Optional.of((InternalTestCluster) cluster()); + } + } + + Client clientForAnyNode() { + return clientForNode(null); + } + + Client clientForNode(@Nullable String node) { + if (node != null) { + return internalCluster().orElseThrow(() -> new UnsupportedOperationException("current test cluster is immutable")).client(node); + } + Client client = cluster().client(); + if (OpenSearchTestCase.frequently()) { + client = new RandomizingClient(client, OpenSearchTestCase.random()); + } + return client; + } + + synchronized RestClient getRestClient() { + if (restClient == null) { + restClient = createRestClient(); + } + return restClient; + } + + protected final void beforeInternal(OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(target.getClass()); + Callable<Void> setup = () -> { + currentCluster.beforeTest(OpenSearchTestCase.random()); + currentCluster.wipe(target.excludeTemplates()); + target.randomIndexTemplate(); + return null; + }; + switch (currentClusterScope) { + case SUITE: + assert suiteSeed != null : "Suite seed was not initialized"; + currentCluster = buildAndPutCluster(currentClusterScope, suiteSeed, target); + RandomizedContext.current().runWithPrivateRandomness(suiteSeed, setup); + break; + case TEST: + currentCluster = buildAndPutCluster(currentClusterScope, OpenSearchTestCase.randomLong(), target); + setup.call(); + break; + } + } + + protected void before(Object target, FrameworkMethod method) throws Throwable { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + initializeSuiteScope(instance, method); + + if (runTestScopeLifecycle()) { + printTestMessage("setting up", method); + beforeInternal(instance); + printTestMessage("all set up", method); + } + } + + protected void after(Object target, FrameworkMethod method) throws Exception { + final OpenSearchIntegTestCase instance = (OpenSearchIntegTestCase) target; + + // Deleting indices is going to clear search contexts implicitly so we + // need to check that there are no more in-flight search contexts before + // we remove indices + internalCluster().ifPresent(c -> c.setBootstrapClusterManagerNodeIndex(-1)); + + instance.ensureAllSearchContextsReleased(); + if (runTestScopeLifecycle()) { + printTestMessage("cleaning up after", method); + afterInternal(false, instance); + printTestMessage("cleaned up after", method); + } + } + + protected RestClient createRestClient() { + return createRestClient(null, "http"); + } + + protected RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { + NodesInfoResponse nodesInfoResponse = clientForAnyNode().admin().cluster().prepareNodesInfo().get(); + assertFalse(nodesInfoResponse.hasFailures()); + return createRestClient(nodesInfoResponse.getNodes(), httpClientConfigCallback, protocol); + } + + protected RestClient createRestClient( + final List<NodeInfo> nodes, + RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, + String protocol + ) { + List<HttpHost> hosts = new ArrayList<>(); + for (NodeInfo node : nodes) { + if (node.getInfo(HttpInfo.class) != null) { + TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); + InetSocketAddress address = publishAddress.address(); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); + } + } + RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); + if (httpClientConfigCallback != null) { + builder.setHttpClientConfigCallback(httpClientConfigCallback); + } + return builder.build(); + } + + private Scope getClusterScope(Class<?> clazz) { + ClusterScope annotation = OpenSearchIntegTestCase.getAnnotation(clazz, ClusterScope.class); + // if we are not annotated assume suite! + return annotation == null ? Scope.SUITE : annotation.scope(); + } + + private TestCluster buildWithPrivateContext(final Scope scope, final long seed, OpenSearchIntegTestCase target) throws Exception { + return RandomizedContext.current().runWithPrivateRandomness(seed, () -> target.buildTestCluster(scope, seed)); + } + + private static boolean isSuiteScopedTest(Class<?> clazz) { + return clazz.getAnnotation(SuiteScopeTestCase.class) != null; + } + + private static boolean hasParametersChanged( + final ParameterizedOpenSearchIntegTestCase instance, + final ParameterizedOpenSearchIntegTestCase target + ) { + return !instance.hasSameParametersAs(target); + } + + private boolean runTestScopeLifecycle() { + return suiteInstance == null; + } + + private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed, OpenSearchIntegTestCase target) throws Exception { + final Class<?> clazz = target.getClass(); + + synchronized (clusters) { + TestCluster testCluster = clusters.remove(clazz); // remove this cluster first + clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere + switch (currentClusterScope) { + case SUITE: + if (testCluster != null && target instanceof ParameterizedOpenSearchIntegTestCase) { + final OpenSearchIntegTestCase instance = suites.get(testCluster); + if (instance != null) { + assert instance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) instance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + IOUtils.closeWhileHandlingException(testCluster); + printTestMessage("new instance of parameterized test class, recreating test cluster for suite"); + testCluster = null; + } + } + } + + if (testCluster == null) { // only build if it's not there yet + testCluster = buildWithPrivateContext(currentClusterScope, seed, target); + suites.put(testCluster, target); + } + break; + case TEST: + // close the previous one and create a new one + IOUtils.closeWhileHandlingException(testCluster); + testCluster = target.buildTestCluster(currentClusterScope, seed); + break; + } + clusters.put(clazz, testCluster); + return testCluster; + } + } + + private void printTestMessage(String message) { + logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); + } + + private static Class<?> getTestClass() { + return OpenSearchTestCase.getTestClass(); + } + + private void printTestMessage(String message, FrameworkMethod method) { + logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), method.getName(), message); + } + + private void afterInternal(boolean afterClass, OpenSearchIntegTestCase target) throws Exception { + final Scope currentClusterScope = getClusterScope(getTestClass()); + internalCluster().ifPresent(InternalTestCluster::clearDisruptionScheme); + + OpenSearchIntegTestCase instance = suiteInstance; + if (instance == null) { + instance = target; + } + + try { + if (cluster() != null) { + if (currentClusterScope != Scope.TEST) { + Metadata metadata = clientForAnyNode().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); + + final Set<String> persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); + assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); + + final Set<String> transientKeys = new HashSet<>(metadata.transientSettings().keySet()); + assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); + } + instance.ensureClusterSizeConsistency(); + instance.ensureClusterStateConsistency(); + instance.ensureClusterStateCanBeReadByNodeTool(); + instance.beforeIndexDeletion(); + cluster().wipe(instance.excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete + if (afterClass || currentClusterScope == Scope.TEST) { + cluster().close(); + } + cluster().assertAfterTest(); + } + } finally { + if (currentClusterScope == Scope.TEST) { + clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST + } + } + } + + private void clearClusters() throws Exception { + synchronized (clusters) { + if (!clusters.isEmpty()) { + IOUtils.close(clusters.values()); + suites.clear(); + clusters.clear(); + } + } + if (restClient != null) { + restClient.close(); + restClient = null; + } + OpenSearchTestCase.assertBusy(() -> { + int numChannels = RestCancellableNodeClient.getNumChannels(); + OpenSearchTestCase.assertEquals( + numChannels + + " channels still being tracked in " + + RestCancellableNodeClient.class.getSimpleName() + + " while there should be none", + 0, + numChannels + ); + }); + } + + private Statement statement(final Statement base, FrameworkMethod method, Object target) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + before(target, method); + + List<Throwable> errors = new ArrayList<Throwable>(); + try { + base.evaluate(); + } catch (Throwable t) { + errors.add(t); + } finally { + try { + after(target, method); + } catch (Throwable t) { + errors.add(t); + } + } + MultipleFailureException.assertEmpty(errors); + } + }; + } + + private void initializeSuiteScope(OpenSearchIntegTestCase target, FrameworkMethod method) throws Exception { + final Class<?> targetClass = getTestClass(); + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test. + */ + if (suiteInstance != null) { + // Catching the case when parameterized test cases are run: the test class stays the same but the test instances changes. + if (target instanceof ParameterizedOpenSearchIntegTestCase) { + assert suiteInstance instanceof ParameterizedOpenSearchIntegTestCase; + if (hasParametersChanged( + (ParameterizedOpenSearchIntegTestCase) suiteInstance, + (ParameterizedOpenSearchIntegTestCase) target + )) { + printTestMessage("new instance of parameterized test class, recreating cluster scope", method); + afterClass(); + beforeClass(); + } else { + return; /* same test class instance */ + } + } else { + return; /* not a parameterized test */ + } + } + + assert suiteInstance == null; + if (isSuiteScopedTest(targetClass)) { + suiteInstance = target; + + boolean success = false; + try { + printTestMessage("setup", method); + beforeInternal(target); + suiteInstance.setupSuiteScopeCluster(); + success = true; + } finally { + if (!success) { + afterClass(); + } + } + } else { + suiteInstance = null; + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java index e853c1e6314e1..9f7f33b351be3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java @@ -48,9 +48,9 @@ @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -/** - * Basic test case for token streams. the assertion methods in this class will - * run basic checks to enforce correct behavior of the token streams. +/* + Basic test case for token streams. the assertion methods in this class will + run basic checks to enforce correct behavior of the token streams. */ public abstract class OpenSearchTokenStreamTestCase extends BaseTokenStreamTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..b31dfa2bdefa5 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsModule; +import org.junit.After; +import org.junit.Before; + +/** + * Base class for running the tests with parameterization using dynamic settings: the cluster will be created once before the test suite and the + * settings will be applied dynamically, please notice that not all settings could be changed dynamically (consider using {@link ParameterizedStaticSettingsOpenSearchIntegTestCase} + * instead). + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { apply settings -> run test method -> unapply settings } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> apply settings -> run test method -> unapply settings -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedDynamicSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @Before + public void beforeTests() { + SettingsModule settingsModule = new SettingsModule(settings); + for (String key : settings.keySet()) { + assertTrue( + settingsModule.getClusterSettings().isDynamicSetting(key) || settingsModule.getIndexScopedSettings().isDynamicSetting(key) + ); + } + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + } + + @After + public void afterTests() { + final Settings.Builder settingsToUnset = Settings.builder(); + settings.keySet().forEach(settingsToUnset::putNull); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + return true; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..23316adf6a2d7 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; + +/** + * Base class for running the tests with parameterization of the settings. + * For any class that wants to use parameterization, use {@link com.carrotsearch.randomizedtesting.annotations.ParametersFactory} to generate + * different parameters. + * + * There are two flavors of applying the parameterized settings to the cluster on the suite level: + * - static: the cluster will be pre-created with the settings at startup, please subclass {@link ParameterizedStaticSettingsOpenSearchIntegTestCase}, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated + * - dynamic: the cluster will be created once before the test suite and the settings will be applied dynamically , please subclass {@link ParameterizedDynamicSettingsOpenSearchIntegTestCase}, + * please notice that not all settings could be changed dynamically + * + * If the test suites use per-test level, the cluster will be recreated per each test method (applying static or dynamic settings). + */ +abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestCase { + protected final Settings settings; + + ParameterizedOpenSearchIntegTestCase(Settings settings) { + this.settings = settings; + } + + // This method shouldn't be called in setupSuiteScopeCluster(). Only call this method inside single test. + public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { + if (CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) { + indexRandomForMultipleSlices(indices); + } + } + + /** + * Compares the parameters of the two {@link ParameterizedOpenSearchIntegTestCase} test suite instances. + * This method is being use by {@link OpenSearchTestClusterRule} to determine when the parameterized test suite is instantiated with + * another set of parameters and the test cluster has to be recreated to reflect that. + * @param obj instance of the {@link ParameterizedOpenSearchIntegTestCase} to compare with + * @return {@code true} of the parameters of the test suites are the same, {@code false} otherwise + */ + abstract boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj); +} diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java new file mode 100644 index 0000000000000..7d2c9ad686a01 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTestCase.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; + +/** + * Base class for running the tests with parameterization with static settings: the cluster will be pre-created with the settings at startup, the method + * {@link #hasSameParametersAs(ParameterizedOpenSearchIntegTestCase)} is being used by the test scaffolding to detect when the test suite is instantiated with + * the new parameters and the test cluster has to be recreated. + * <p> + * Here is the simple illustration on of the execution flow per parameters combination: + * <ul> + * <li><b>suite scope</b>: create cluster -> for each test method { run test method } -> shutdown cluster</li> + * <li><b>test scope</b>: for each test method { create cluster -> run test method -> shutdown cluster }</li> + * </ul> + */ +public abstract class ParameterizedStaticSettingsOpenSearchIntegTestCase extends ParameterizedOpenSearchIntegTestCase { + + protected static final String REMOTE_STORE_REPOSITORY_NAME = "test-remote-store-repo"; + private Path remoteStoreRepositoryPath; + public static final List<Object[]> replicationSettings = Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build() }, + new Object[] { Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build() } + ); + + public ParameterizedStaticSettingsOpenSearchIntegTestCase(Settings nodeSettings) { + super(nodeSettings); + } + + public static final List<Object[]> remoteStoreSettings = Arrays.asList( + new Object[] { Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build() } + ); + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder(); + if (REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings)) { + if (remoteStoreRepositoryPath == null) { + remoteStoreRepositoryPath = randomRepoPath().toAbsolutePath(); + } + builder.put(remoteStoreClusterSettings(REMOTE_STORE_REPOSITORY_NAME, remoteStoreRepositoryPath)); + } + return builder.put(super.nodeSettings(nodeOrdinal)).put(settings).build(); + } + + @Override + boolean hasSameParametersAs(ParameterizedOpenSearchIntegTestCase obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + final ParameterizedStaticSettingsOpenSearchIntegTestCase other = (ParameterizedStaticSettingsOpenSearchIntegTestCase) obj; + return Objects.equals(settings, other.settings); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/PosixPermissionsResetter.java b/test/framework/src/main/java/org/opensearch/test/PosixPermissionsResetter.java index df03bfab57e57..344e61de7d5bd 100644 --- a/test/framework/src/main/java/org/opensearch/test/PosixPermissionsResetter.java +++ b/test/framework/src/main/java/org/opensearch/test/PosixPermissionsResetter.java @@ -31,8 +31,6 @@ package org.opensearch.test; -import java.util.EnumSet; -import java.util.Set; import org.junit.Assert; import java.io.IOException; @@ -40,6 +38,8 @@ import java.nio.file.Path; import java.nio.file.attribute.PosixFileAttributeView; import java.nio.file.attribute.PosixFilePermission; +import java.util.EnumSet; +import java.util.Set; /** Stores the posix attributes for a path and resets them on close. */ public class PosixPermissionsResetter implements AutoCloseable { diff --git a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java index a328e6778dfaf..a11fb3425d7af 100644 --- a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java @@ -44,19 +44,19 @@ import org.opensearch.action.support.replication.ReplicationResponse.ShardInfo.Failure; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.collect.Tuple; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.shard.IndexShardRecoveringException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; -import org.opensearch.core.rest.RestStatus; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -67,12 +67,12 @@ import java.util.Map; import java.util.Random; -import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; -import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiLettersOfLength; -import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomUnicodeOfLengthBetween; import static java.util.Collections.singleton; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.opensearch.test.OpenSearchTestCase.randomFrom; +import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiLettersOfLength; +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomUnicodeOfLengthBetween; public final class RandomObjects { @@ -156,7 +156,7 @@ private static List<Object> randomStoredFieldValues(Random random, int numValues */ public static Object getExpectedParsedValue(MediaType mediaType, Object value) { if (value instanceof BytesArray) { - if (mediaType == XContentType.JSON) { + if (mediaType == MediaTypeRegistry.JSON) { // JSON writes base64 format return Base64.getEncoder().encodeToString(((BytesArray) value).toBytesRef().bytes); } @@ -194,8 +194,8 @@ public static BytesReference randomSource(Random random) { * * @param random Random generator */ - public static BytesReference randomSource(Random random, XContentType xContentType) { - return randomSource(random, xContentType, 1); + public static BytesReference randomSource(Random random, final MediaType mediaType) { + return randomSource(random, mediaType, 1); } /** @@ -204,8 +204,8 @@ public static BytesReference randomSource(Random random, XContentType xContentTy * * @param random Random generator */ - public static BytesReference randomSource(Random random, XContentType xContentType, int minNumFields) { - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) { + public static BytesReference randomSource(Random random, final MediaType mediaType, int minNumFields) { + try (XContentBuilder builder = mediaType.contentBuilder()) { builder.startObject(); addFields(random, builder, minNumFields, 0); builder.endObject(); diff --git a/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java b/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java new file mode 100644 index 0000000000000..2e0b846d801e2 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.ByteBuffersIndexOutput; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.apache.lucene.util.Version; +import org.opensearch.common.UUIDs; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; + +/** + * Utilities for remote store related operations used across one or more tests. + */ +public final class RemoteStoreTestUtils { + + private RemoteStoreTestUtils() { + + } + + /** + * Prepares metadata file bytes with header and footer + * + * @param segmentFilesMap: actual metadata content + * @return ByteArrayIndexInput: metadata file bytes with header and footer + * @throws IOException IOException + */ + public static InputStream createMetadataFileBytes( + Map<String, String> segmentFilesMap, + ReplicationCheckpoint replicationCheckpoint, + SegmentInfos segmentInfos + ) throws IOException { + ByteBuffersDataOutput byteBuffersIndexOutput = new ByteBuffersDataOutput(); + segmentInfos.write(new ByteBuffersIndexOutput(byteBuffersIndexOutput, "", "")); + byte[] byteArray = byteBuffersIndexOutput.toArrayCopy(); + + BytesStreamOutput output = new BytesStreamOutput(); + OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); + CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, RemoteSegmentMetadata.CURRENT_VERSION); + indexOutput.writeMapOfStrings(segmentFilesMap); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); + indexOutput.writeLong(byteArray.length); + indexOutput.writeBytes(byteArray, byteArray.length); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + return new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + } + + public static Map<String, String> getDummyMetadata(String prefix, int commitGeneration) { + Map<String, String> metadata = new HashMap<>(); + + metadata.put( + prefix + ".cfe", + prefix + + ".cfe::" + + prefix + + ".cfe__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + + "::" + + Version.MIN_SUPPORTED_MAJOR + ); + metadata.put( + prefix + ".cfs", + prefix + + ".cfs::" + + prefix + + ".cfs__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + + "::" + + Version.MIN_SUPPORTED_MAJOR + ); + metadata.put( + prefix + ".si", + prefix + + ".si::" + + prefix + + ".si__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + + "::" + + Version.LATEST.major + ); + metadata.put( + "segments_" + commitGeneration, + "segments_" + + commitGeneration + + "::segments_" + + commitGeneration + + "__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(1024, 5120) + + "::" + + Version.LATEST.major + ); + return metadata; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/StreamsUtils.java b/test/framework/src/main/java/org/opensearch/test/StreamsUtils.java index 54cabf753012b..25884b88c5811 100644 --- a/test/framework/src/main/java/org/opensearch/test/StreamsUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/StreamsUtils.java @@ -32,9 +32,9 @@ package org.opensearch.test; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.util.io.Streams; +import org.opensearch.core.common.bytes.BytesReference; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 61742cd4fb827..8c41e6e5d5b38 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -42,10 +42,12 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.Closeable; @@ -253,7 +255,18 @@ public void wipeRepositories(String... repositories) { } for (String repository : repositories) { try { - client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + List<RepositoryMetadata> repositoryMetadata = client().admin() + .cluster() + .prepareGetRepositories(repository) + .execute() + .actionGet() + .repositories(); + if (repositoryMetadata.isEmpty() == false + && BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.get(repositoryMetadata.get(0).settings()) == true) { + client().admin().cluster().prepareCleanupRepository(repository).execute().actionGet(); + } else { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } } catch (RepositoryMissingException ex) { // ignore } @@ -263,7 +276,7 @@ public void wipeRepositories(String... repositories) { /** * Ensures that any breaker statistics are reset to 0. - * + * <p> * The implementation is specific to the test cluster, because the act of * checking some breaker stats can increase them. */ diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index 4e44791e77566..09a72dcdc3641 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -40,6 +40,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.mapper.MappedFieldType; @@ -48,7 +49,6 @@ import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchShardTarget; @@ -83,6 +83,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; + public class TestSearchContext extends SearchContext { public static final SearchShardTarget SHARD_TARGET = new SearchShardTarget( "test", @@ -105,6 +107,7 @@ public class TestSearchContext extends SearchContext { SearchShardTask task; SortAndFormats sort; boolean trackScores = false; + boolean includeNamedQueriesScore = false; int trackTotalHitsUpTo = SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO; ContextIndexSearcher searcher; @@ -118,6 +121,7 @@ public class TestSearchContext extends SearchContext { private CollapseContext collapse; protected boolean concurrentSegmentSearchEnabled; private BucketCollectorProcessor bucketCollectorProcessor = NO_OP_BUCKET_COLLECTOR_PROCESSOR; + private int maxSliceCount; /** * Sets the concurrent segment search enabled field @@ -126,6 +130,14 @@ public void setConcurrentSegmentSearchEnabled(boolean concurrentSegmentSearchEna this.concurrentSegmentSearchEnabled = concurrentSegmentSearchEnabled; } + /** + * Sets the maxSliceCount for concurrent search + * @param sliceCount maxSliceCount + */ + public void setMaxSliceCount(int sliceCount) { + this.maxSliceCount = sliceCount; + } + private final Map<String, SearchExtBuilder> searchExtBuilders = new HashMap<>(); public TestSearchContext(BigArrays bigArrays, IndexService indexService) { @@ -160,6 +172,8 @@ public TestSearchContext( this.indexShard = indexShard; this.queryShardContext = queryShardContext; this.searcher = searcher; + this.concurrentSegmentSearchEnabled = searcher != null && (searcher.getExecutor() != null); + this.maxSliceCount = randomIntBetween(0, 2); this.scrollContext = scrollContext; } @@ -396,6 +410,17 @@ public boolean trackScores() { return trackScores; } + @Override + public SearchContext includeNamedQueriesScore(boolean includeNamedQueriesScore) { + this.includeNamedQueriesScore = includeNamedQueriesScore; + return this; + } + + @Override + public boolean includeNamedQueriesScore() { + return includeNamedQueriesScore; + } + @Override public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { this.trackTotalHitsUpTo = trackTotalHitsUpTo; @@ -619,7 +644,7 @@ public Profilers getProfilers() { * Returns concurrent segment search status for the search context */ @Override - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return concurrentSegmentSearchEnabled; } @@ -673,6 +698,21 @@ public BucketCollectorProcessor bucketCollectorProcessor() { return bucketCollectorProcessor; } + @Override + public int getTargetMaxSliceCount() { + assert concurrentSegmentSearchEnabled == true : "Please use concurrent search before fetching maxSliceCount"; + return maxSliceCount; + } + + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return indexShard != null + && indexShard.isTimeSeriesDescSortOptimizationEnabled() + && sort != null + && sort.isSortOnTimeSeriesField() + && sort.sort.getSort()[0].getReverse() == false; + } + /** * Clean the query results by consuming all of it */ @@ -686,7 +726,7 @@ public TestSearchContext withCleanQueryResult() { * Add profilers to the query */ public TestSearchContext withProfilers() { - this.profilers = new Profilers(searcher); + this.profilers = new Profilers(searcher, concurrentSegmentSearchEnabled); return this; } } diff --git a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java index 8fb9bc5cd7c1c..8ce5afab17c00 100644 --- a/test/framework/src/main/java/org/opensearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/VersionUtils.java @@ -359,4 +359,14 @@ public static Version randomPreviousCompatibleVersion(Random random, Version ver // but 7.2.0 for minimum compat return randomVersionBetween(random, version.minimumIndexCompatibilityVersion(), getPreviousVersion(version)); } + + /** + * Returns a {@link Version} with a given major, minor and revision version. + * Build version is skipped for the sake of simplicity. + */ + public static Version getVersion(byte major, byte minor, byte revision) { + StringBuilder sb = new StringBuilder(); + sb.append(major).append('.').append(minor).append('.').append(revision); + return Version.fromString(sb.toString()); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index 75a86d3d9a0bb..2c535211ca456 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -32,18 +32,19 @@ package org.opensearch.test; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -57,9 +58,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength; -import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; import static org.opensearch.common.xcontent.XContentHelper.createParser; +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; +import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength; public final class XContentTestUtils { private XContentTestUtils() { @@ -75,7 +76,7 @@ public static Map<String, Object> convertToMap(ToXContent part) throws IOExcepti } public static BytesReference convertToXContent(Map<String, ?> map, XContentType xContentType) throws IOException { - try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(xContentType)) { builder.map(map); return BytesReference.bytes(builder); } @@ -163,10 +164,10 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * This method takes the input xContent data and adds a random field value, inner object or array into each * json object. This can e.g. be used to test if parsers that handle the resulting xContent can handle the * augmented xContent correctly, for example when testing lenient parsing. - * + * <p> * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. - * + * <p> * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * @@ -187,7 +188,7 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * </pre> * * "foo1.bar.2.baz" would point to the desired insert location. - * + * <p> * To exclude inserting into the "foo1" object we would user a {@link Predicate} like * <pre> * {@code @@ -256,12 +257,12 @@ public static BytesReference insertRandomFields( * This utility method takes an XContentParser and walks the xContent structure to find all * possible paths to where a new object or array starts. This can be used in tests that add random * xContent values to test parsing code for errors or to check their robustness against new fields. - * + * <p> * The path uses dot separated fieldnames and numbers for array indices, similar to what we do in * {@link ObjectPath}. - * + * <p> * The {@link Stack} passed in should initially be empty, it gets pushed to by recursive calls - * + * <p> * As an example, the following json xContent: * <pre> * { diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java index cc274f4dd0be3..45d779b3e8697 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java @@ -33,12 +33,12 @@ package org.opensearch.test.client; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.client.support.AbstractClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -47,7 +47,7 @@ /** * Client that always responds with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)} * for testing. - * + * <p> * See also {@link NoOpNodeClient} if you need to mock a {@link org.opensearch.client.node.NodeClient}. */ public class NoOpClient extends AbstractClient { diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java index 37843ce7ff366..4e84fe3b91d15 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java @@ -33,15 +33,15 @@ package org.opensearch.test.client; import org.opensearch.OpenSearchException; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionModule.DynamicActionRegistry; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskListener; import org.opensearch.threadpool.TestThreadPool; @@ -55,7 +55,7 @@ * Client that always response with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}, * {@link #executeLocally(ActionType, ActionRequest, ActionListener)}, or {@link #executeLocally(ActionType, ActionRequest, TaskListener)} * for testing. - * + * <p> * See also {@link NoOpClient} if you do not specifically need a {@link NodeClient}. */ public class NoOpNodeClient extends NodeClient { diff --git a/test/framework/src/main/java/org/opensearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/opensearch/test/client/RandomizingClient.java index 7ec9950d87c63..0ef7c5dffcb5e 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/RandomizingClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/RandomizingClient.java @@ -33,6 +33,7 @@ package org.opensearch.test.client; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.lucene.tests.util.TestUtil; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchType; diff --git a/test/framework/src/main/java/org/opensearch/test/core/compress/AbstractCompressorTestCase.java b/test/framework/src/main/java/org/opensearch/test/core/compress/AbstractCompressorTestCase.java new file mode 100644 index 0000000000000..be53e46122157 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/core/compress/AbstractCompressorTestCase.java @@ -0,0 +1,409 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.core.compress; + +import org.apache.lucene.tests.util.LineFileDocs; +import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.core.compress.Compressor; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Random; +import java.util.concurrent.CountDownLatch; + +public abstract class AbstractCompressorTestCase extends OpenSearchTestCase { + + public void testRandom() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; + r.nextBytes(bytes); + doTest(bytes); + } + } + + public void testRandomThreads() throws Exception { + final Random r = random(); + int threadCount = TestUtil.nextInt(r, 2, 6); + Thread[] threads = new Thread[threadCount]; + final CountDownLatch startingGun = new CountDownLatch(1); + for (int tid = 0; tid < threadCount; tid++) { + final long seed = r.nextLong(); + threads[tid] = new Thread() { + @Override + public void run() { + try { + Random r = new Random(seed); + startingGun.await(); + for (int i = 0; i < 10; i++) { + byte bytes[] = new byte[TestUtil.nextInt(r, 1, 100000)]; + r.nextBytes(bytes); + doTest(bytes); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + threads[tid].start(); + } + startingGun.countDown(); + for (Thread t : threads) { + t.join(); + } + } + + public void testLineDocs() throws IOException { + Random r = random(); + LineFileDocs lineFileDocs = new LineFileDocs(r); + for (int i = 0; i < 10; i++) { + int numDocs = TestUtil.nextInt(r, 1, 200); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + for (int j = 0; j < numDocs; j++) { + String s = lineFileDocs.nextDoc().get("body"); + bos.write(s.getBytes(StandardCharsets.UTF_8)); + } + doTest(bos.toByteArray()); + } + lineFileDocs.close(); + } + + public void testLineDocsThreads() throws Exception { + final Random r = random(); + int threadCount = TestUtil.nextInt(r, 2, 6); + Thread[] threads = new Thread[threadCount]; + final CountDownLatch startingGun = new CountDownLatch(1); + for (int tid = 0; tid < threadCount; tid++) { + final long seed = r.nextLong(); + threads[tid] = new Thread() { + @Override + public void run() { + try { + Random r = new Random(seed); + startingGun.await(); + LineFileDocs lineFileDocs = new LineFileDocs(r); + for (int i = 0; i < 10; i++) { + int numDocs = TestUtil.nextInt(r, 1, 200); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + for (int j = 0; j < numDocs; j++) { + String s = lineFileDocs.nextDoc().get("body"); + bos.write(s.getBytes(StandardCharsets.UTF_8)); + } + doTest(bos.toByteArray()); + } + lineFileDocs.close(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + threads[tid].start(); + } + startingGun.countDown(); + for (Thread t : threads) { + t.join(); + } + } + + public void testRepetitionsL() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numLongs = TestUtil.nextInt(r, 1, 10000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + long theValue = r.nextLong(); + for (int j = 0; j < numLongs; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextLong(); + } + bos.write((byte) (theValue >>> 56)); + bos.write((byte) (theValue >>> 48)); + bos.write((byte) (theValue >>> 40)); + bos.write((byte) (theValue >>> 32)); + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testRepetitionsLThreads() throws Exception { + final Random r = random(); + int threadCount = TestUtil.nextInt(r, 2, 6); + Thread[] threads = new Thread[threadCount]; + final CountDownLatch startingGun = new CountDownLatch(1); + for (int tid = 0; tid < threadCount; tid++) { + final long seed = r.nextLong(); + threads[tid] = new Thread() { + @Override + public void run() { + try { + Random r = new Random(seed); + startingGun.await(); + for (int i = 0; i < 10; i++) { + int numLongs = TestUtil.nextInt(r, 1, 10000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + long theValue = r.nextLong(); + for (int j = 0; j < numLongs; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextLong(); + } + bos.write((byte) (theValue >>> 56)); + bos.write((byte) (theValue >>> 48)); + bos.write((byte) (theValue >>> 40)); + bos.write((byte) (theValue >>> 32)); + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + threads[tid].start(); + } + startingGun.countDown(); + for (Thread t : threads) { + t.join(); + } + } + + public void testRepetitionsI() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numInts = TestUtil.nextInt(r, 1, 20000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int theValue = r.nextInt(); + for (int j = 0; j < numInts; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextInt(); + } + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testRepetitionsIThreads() throws Exception { + final Random r = random(); + int threadCount = TestUtil.nextInt(r, 2, 6); + Thread[] threads = new Thread[threadCount]; + final CountDownLatch startingGun = new CountDownLatch(1); + for (int tid = 0; tid < threadCount; tid++) { + final long seed = r.nextLong(); + threads[tid] = new Thread() { + @Override + public void run() { + try { + Random r = new Random(seed); + startingGun.await(); + for (int i = 0; i < 10; i++) { + int numInts = TestUtil.nextInt(r, 1, 20000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int theValue = r.nextInt(); + for (int j = 0; j < numInts; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextInt(); + } + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + threads[tid].start(); + } + startingGun.countDown(); + for (Thread t : threads) { + t.join(); + } + } + + public void testRepetitionsS() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numShorts = TestUtil.nextInt(r, 1, 40000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + short theValue = (short) r.nextInt(65535); + for (int j = 0; j < numShorts; j++) { + if (r.nextInt(10) == 0) { + theValue = (short) r.nextInt(65535); + } + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testMixed() throws IOException { + Random r = random(); + LineFileDocs lineFileDocs = new LineFileDocs(r); + for (int i = 0; i < 2; ++i) { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int prevInt = r.nextInt(); + long prevLong = r.nextLong(); + while (bos.size() < 400000) { + switch (r.nextInt(4)) { + case 0: + addInt(r, prevInt, bos); + break; + case 1: + addLong(r, prevLong, bos); + break; + case 2: + addString(lineFileDocs, bos); + break; + case 3: + addBytes(r, bos); + break; + default: + throw new IllegalStateException("Random is broken"); + } + } + doTest(bos.toByteArray()); + } + } + + private void addLong(Random r, long prev, ByteArrayOutputStream bos) { + long theValue = prev; + if (r.nextInt(10) != 0) { + theValue = r.nextLong(); + } + bos.write((byte) (theValue >>> 56)); + bos.write((byte) (theValue >>> 48)); + bos.write((byte) (theValue >>> 40)); + bos.write((byte) (theValue >>> 32)); + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + + private void addInt(Random r, int prev, ByteArrayOutputStream bos) { + int theValue = prev; + if (r.nextInt(10) != 0) { + theValue = r.nextInt(); + } + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + + private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { + String s = lineFileDocs.nextDoc().get("body"); + bos.write(s.getBytes(StandardCharsets.UTF_8)); + } + + private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { + byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; + r.nextBytes(bytes); + bos.write(bytes); + } + + public void testRepetitionsSThreads() throws Exception { + final Random r = random(); + int threadCount = TestUtil.nextInt(r, 2, 6); + Thread[] threads = new Thread[threadCount]; + final CountDownLatch startingGun = new CountDownLatch(1); + for (int tid = 0; tid < threadCount; tid++) { + final long seed = r.nextLong(); + threads[tid] = new Thread() { + @Override + public void run() { + try { + Random r = new Random(seed); + startingGun.await(); + for (int i = 0; i < 10; i++) { + int numShorts = TestUtil.nextInt(r, 1, 40000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + short theValue = (short) r.nextInt(65535); + for (int j = 0; j < numShorts; j++) { + if (r.nextInt(10) == 0) { + theValue = (short) r.nextInt(65535); + } + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }; + threads[tid].start(); + } + startingGun.countDown(); + for (Thread t : threads) { + t.join(); + } + } + + private void doTest(byte bytes[]) throws IOException { + InputStream rawIn = new ByteArrayInputStream(bytes); + Compressor c = compressor(); + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + final Random r = random(); + int bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(random(), 1, 70000); + int prepadding = r.nextInt(70000); + int postpadding = r.nextInt(70000); + byte[] buffer = new byte[prepadding + bufferSize + postpadding]; + int len; + try (OutputStream os = c.threadLocalOutputStream(bos)) { + r.nextBytes(buffer); // fill block completely with junk + while ((len = rawIn.read(buffer, prepadding, bufferSize)) != -1) { + os.write(buffer, prepadding, len); + } + } + rawIn.close(); + + // now we have compressed byte array + InputStream in = c.threadLocalInputStream(new ByteArrayInputStream(bos.toByteArray())); + + // randomize constants again + bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(random(), 1, 70000); + prepadding = r.nextInt(70000); + postpadding = r.nextInt(70000); + buffer = new byte[prepadding + bufferSize + postpadding]; + r.nextBytes(buffer); // fill block completely with junk + + ByteArrayOutputStream uncompressedOut = new ByteArrayOutputStream(); + while ((len = in.read(buffer, prepadding, bufferSize)) != -1) { + uncompressedOut.write(buffer, prepadding, len); + } + uncompressedOut.close(); + + assertArrayEquals(bytes, uncompressedOut.toByteArray()); + } + + protected abstract Compressor compressor(); + +} diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java index dd025555d1ae8..4f3884f97a570 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java @@ -33,14 +33,16 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; @@ -52,7 +54,6 @@ import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -91,9 +92,10 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { - return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 690e15dd80873..168fbd5bd0d0a 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -248,11 +248,11 @@ public TimeValue expectedTimeToHeal() { /** * resolves all threads belonging to given node and suspends them if their current stack trace * is "safe". Threads are added to nodeThreads if suspended. - * + * <p> * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected boolean suspendThreads(Set<Thread> nodeThreads) { Thread[] allThreads = null; @@ -360,7 +360,7 @@ protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo bl ); } - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally + @SuppressWarnings({ "deprecation", "removal" }) // suspends/resumes threads intentionally @SuppressForbidden(reason = "suspends/resumes threads intentionally") protected void resumeThreads(Set<Thread> threads) { for (Thread thread : threads) { diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java index 7f2644d8e857c..62e19750a363b 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java @@ -33,6 +33,7 @@ package org.opensearch.test.disruption; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; @@ -108,7 +109,7 @@ public void ensureHealthy(InternalTestCluster cluster) { /** * Ensures that all nodes in the cluster are connected to each other. - * + * <p> * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster diff --git a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java index 30cc48c588be1..102c641746b01 100644 --- a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java @@ -35,6 +35,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.NRTReplicationEngine; public final class MockEngineFactory implements EngineFactory { @@ -46,6 +47,12 @@ public MockEngineFactory(Class<? extends FilterDirectoryReader> wrapper) { @Override public Engine newReadWriteEngine(EngineConfig config) { - return new MockInternalEngine(config, wrapper); + + /** + * Segment replication enabled replicas (i.e. read only replicas) do not use an InternalEngine so a MockInternalEngine + * will not work and an NRTReplicationEngine must be used instead. The primary shards for these indexes will + * still use a MockInternalEngine. + */ + return config.isReadOnlyReplica() ? new NRTReplicationEngine(config) : new MockInternalEngine(config, wrapper); } } diff --git a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineSupport.java index f232a2b9e4734..86e92a3deef75 100644 --- a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineSupport.java @@ -33,19 +33,19 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.tests.index.AssertingDirectoryReader; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.EngineException; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.Closeable; diff --git a/test/framework/src/main/java/org/opensearch/test/fixture/AbstractHttpFixture.java b/test/framework/src/main/java/org/opensearch/test/fixture/AbstractHttpFixture.java index 9ccbb53c6a96a..c52eb75ce666f 100644 --- a/test/framework/src/main/java/org/opensearch/test/fixture/AbstractHttpFixture.java +++ b/test/framework/src/main/java/org/opensearch/test/fixture/AbstractHttpFixture.java @@ -33,6 +33,7 @@ package org.opensearch.test.fixture; import com.sun.net.httpserver.HttpServer; + import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index b1079ac02f266..f123b926f5bad 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -37,12 +37,12 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.FailedShard; import org.opensearch.cluster.routing.allocation.RoutingAllocation; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.gateway.AsyncShardFetch; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.gateway.PrimaryShardAllocator; import org.opensearch.gateway.ReplicaShardAllocator; import org.opensearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata.NodeStoreFilesMetadata; @@ -57,13 +57,13 @@ * A gateway allocator implementation that keeps an in memory list of started shard allocation * that are used as replies to the, normally async, fetch data requests. The in memory list * is adapted when shards are started and failed. - * + * <p> * Nodes leaving and joining the cluster do not change the list of shards the class tracks but * rather serves as a filter to what is returned by fetch data. Concretely - fetch data will * only return shards that were started on nodes that are currently part of the cluster. - * + * <p> * For now only primary shard related data is fetched. Replica request always get an empty response. - * + * <p> * * This class is useful to use in unit tests that require the functionality of {@link GatewayAllocator} but do * not have all the infrastructure required to use it. @@ -98,7 +98,11 @@ protected AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardR ) ); - return new AsyncShardFetch.FetchResult<>(shardId, foundShards, ignoreNodes); + return new AsyncShardFetch.FetchResult<>(foundShards, new HashMap<>() { + { + put(shardId, ignoreNodes); + } + }); } }; @@ -111,7 +115,11 @@ private ReplicationCheckpoint getReplicationCheckpoint(ShardId shardId, String n protected AsyncShardFetch.FetchResult<NodeStoreFilesMetadata> fetchData(ShardRouting shard, RoutingAllocation allocation) { // for now, just pretend no node has data final ShardId shardId = shard.shardId(); - return new AsyncShardFetch.FetchResult<>(shardId, Collections.emptyMap(), allocation.getIgnoreNodes(shardId)); + return new AsyncShardFetch.FetchResult<>(Collections.emptyMap(), new HashMap<>() { + { + put(shardId, allocation.getIgnoreNodes(shardId)); + } + }); } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 230cc50f427f9..650558aaa97a6 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -34,9 +34,8 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TotalHits; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionFuture; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthRequestBuilder; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; @@ -49,7 +48,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; @@ -58,15 +56,17 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.common.Nullable; -import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchHit; import org.opensearch.search.suggest.Suggest; import org.opensearch.test.NotEqualMessageBuilder; @@ -89,8 +89,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; -import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; -import static org.apache.lucene.tests.util.LuceneTestCase.expectThrowsAnyOf; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -101,6 +99,8 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +import static org.apache.lucene.tests.util.LuceneTestCase.expectThrows; +import static org.apache.lucene.tests.util.LuceneTestCase.expectThrowsAnyOf; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -304,6 +304,22 @@ public static void assertHitCount(SearchResponse countResponse, long expectedHit } } + public static void assertHitCount(SearchResponse countResponse, long minHitCount, long maxHitCount) { + final TotalHits totalHits = countResponse.getHits().getTotalHits(); + if (!(totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value >= minHitCount && totalHits.value <= maxHitCount)) { + fail( + "Count is " + + totalHits + + " not between " + + minHitCount + + " and " + + maxHitCount + + " inclusive. " + + formatShardStatus(countResponse) + ); + } + } + public static void assertExists(GetResponse response) { String message = String.format(Locale.ROOT, "Expected %s/%s to exist, but does not", response.getIndex(), response.getId()); assertThat(message, response.isExists(), is(true)); @@ -528,6 +544,10 @@ public static Matcher<SearchHit> hasScore(final float score) { return new OpenSearchMatchers.SearchHitHasScoreMatcher(score); } + public static Matcher<SearchHit> hasMatchedQueries(final String[] matchedQueries) { + return new OpenSearchMatchers.SearchHitMatchedQueriesMatcher(matchedQueries); + } + public static <T, V> CombinableMatcher<T> hasProperty(Function<? super T, ? extends V> property, Matcher<V> valueMatcher) { return OpenSearchMatchers.HasPropertyLambdaMatcher.hasProperty(property, valueMatcher); } @@ -687,8 +707,7 @@ public static void assertFileNotExists(Path file) { * The comparison is done by parsing both into a map and comparing those two, so that keys ordering doesn't matter. * Also binary values (byte[]) are properly compared through arrays comparisons. */ - public static void assertToXContentEquivalent(BytesReference expected, BytesReference actual, MediaType xContentType) - throws IOException { + public static void assertToXContentEquivalent(BytesReference expected, BytesReference actual, MediaType mediaType) throws IOException { // we tried comparing byte per byte, but that didn't fly for a couple of reasons: // 1) whenever anything goes through a map while parsing, ordering is not preserved, which is perfectly ok // 2) Jackson SMILE parser parses floats as double, which then get printed out as double (with double precision) @@ -696,12 +715,12 @@ public static void assertToXContentEquivalent(BytesReference expected, BytesRefe Map<String, Object> actualMap = null; Map<String, Object> expectedMap = null; try ( - XContentParser actualParser = xContentType.xContent() + XContentParser actualParser = mediaType.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, actual.streamInput()) ) { actualMap = actualParser.map(); try ( - XContentParser expectedParser = xContentType.xContent() + XContentParser expectedParser = mediaType.xContent() .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, expected.streamInput()) ) { expectedMap = expectedParser.map(); diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java index 5889b7e269ed2..2be94bd53e3c1 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchMatchers.java @@ -38,6 +38,7 @@ import org.hamcrest.TypeSafeMatcher; import org.hamcrest.core.CombinableMatcher; +import java.util.Arrays; import java.util.function.Function; public class OpenSearchMatchers { @@ -111,6 +112,35 @@ public void describeTo(final Description description) { } } + public static class SearchHitMatchedQueriesMatcher extends TypeSafeMatcher<SearchHit> { + private String[] matchedQueries; + + public SearchHitMatchedQueriesMatcher(String[] matchedQueries) { + this.matchedQueries = matchedQueries; + } + + @Override + protected boolean matchesSafely(SearchHit searchHit) { + String[] searchHitQueries = searchHit.getMatchedQueries(); + if (matchedQueries == null) { + return false; + } + Arrays.sort(searchHitQueries); + Arrays.sort(matchedQueries); + return Arrays.equals(searchHitQueries, matchedQueries); + } + + @Override + public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) { + mismatchDescription.appendText(" matched queries were ").appendValue(Arrays.toString(searchHit.getMatchedQueries())); + } + + @Override + public void describeTo(final Description description) { + description.appendText("searchHit matched queries should be ").appendValue(Arrays.toString(matchedQueries)); + } + } + public static class HasPropertyLambdaMatcher<T, V> extends FeatureMatcher<T, V> { private final Function<? super T, ? extends V> property; diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java index f81c47b6a0a08..97fdcb796f195 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level when investigating test failures. Do not use this annotation to explicitly * control the logging level in tests; instead, use {@link TestLogging}. - * + * <p> * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java index d440fae409897..6c0b87ac67354 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level for controlling logging behavior in tests. Do not use this annotation when * investigating test failures; instead, use {@link TestIssueLogging}. - * + * <p> * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java index 7c94c16b77471..ea2c5d055ed8b 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java @@ -53,7 +53,7 @@ * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the * {@link TestLogging} annotation, the level for the specified loggers will be internally saved before the test method execution and * overridden with the specified ones. At the end of the test method execution the original loggers levels will be restored. - * + * <p> * This class is not thread-safe. Given the static nature of the logging API, it assumes that tests are never run concurrently in the same * JVM. For the very same reason no synchronization has been implemented regarding the save/restore process of the original loggers * levels. diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java index 1b119f2c34dde..e2d59773a76cb 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -32,6 +32,7 @@ package org.opensearch.test.junit.listeners; import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestChannel.java b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestChannel.java index 6d7951f31a44a..9b3d72807ea25 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestChannel.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestChannel.java @@ -31,10 +31,10 @@ package org.opensearch.test.rest; +import org.opensearch.core.rest.RestStatus; import org.opensearch.rest.AbstractRestChannel; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; -import org.opensearch.core.rest.RestStatus; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java index 2bbaf8c80dfa4..e7810ae4c8f1c 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java @@ -32,16 +32,16 @@ package org.opensearch.test.rest; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpRequest; import org.opensearch.http.HttpResponse; import org.opensearch.rest.RestRequest; -import org.opensearch.core.rest.RestStatus; import java.net.InetSocketAddress; import java.util.Collections; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 6d3c2eaa972e6..b7c31685bafa6 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -60,24 +60,24 @@ import org.opensearch.client.WarningsHandler; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.SetOnce; -import org.opensearch.common.Strings; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.common.xcontent.support.XContentMapValues; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.index.IndexSettings; import org.opensearch.index.seqno.ReplicationTracker; -import org.opensearch.core.rest.RestStatus; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.yaml.ObjectPath; @@ -607,7 +607,7 @@ protected static void wipeAllIndices() throws IOException { deleteRequest.setOptions(allowSystemIndexAccessWarningOptions); final Response response = adminClient().performRequest(deleteRequest); try (InputStream is = response.getEntity().getContent()) { - assertTrue((boolean) XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true).get("acknowledged")); + assertTrue((boolean) XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), is, true).get("acknowledged")); } } catch (ResponseException e) { // 404 here just means we had no indexes @@ -695,7 +695,7 @@ private void wipeClusterSettings() throws IOException { if (mustClear) { Request request = new Request("PUT", "/_cluster/settings"); - request.setJsonEntity(Strings.toString(clearCommand)); + request.setJsonEntity(clearCommand.toString()); adminClient().performRequest(request); } } @@ -974,7 +974,7 @@ protected static void createIndex(String name, Settings settings, String mapping protected static void createIndex(String name, Settings settings, String mapping, String aliases) throws IOException { Request request = new Request("PUT", "/" + name); - String entity = "{\"settings\": " + Strings.toString(XContentType.JSON, settings); + String entity = "{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings); if (mapping != null) { entity += ",\"mappings\" : {" + mapping + "}"; } @@ -1000,7 +1000,7 @@ protected static void updateIndexSettings(String index, Settings.Builder setting private static void updateIndexSettings(String index, Settings settings) throws IOException { Request request = new Request("PUT", "/" + index + "/_settings"); - request.setJsonEntity(Strings.toString(XContentType.JSON, settings)); + request.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, settings)); client().performRequest(request); } @@ -1021,7 +1021,7 @@ protected static Map<String, Object> getIndexSettings(String index) throws IOExc request.addParameter("flat_settings", "true"); Response response = client().performRequest(request); try (InputStream is = response.getEntity().getContent()) { - return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + return XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), is, true); } } @@ -1059,10 +1059,10 @@ protected static boolean aliasExists(String index, String alias) throws IOExcept @SuppressWarnings("unchecked") protected static Map<String, Object> getAlias(final String index, final String alias) throws IOException { String endpoint = "/_alias"; - if (false == org.opensearch.core.common.Strings.isEmpty(index)) { + if (false == Strings.isEmpty(index)) { endpoint = index + endpoint; } - if (false == org.opensearch.core.common.Strings.isEmpty(alias)) { + if (false == Strings.isEmpty(alias)) { endpoint = endpoint + "/" + alias; } Map<String, Object> getAliasResponse = getAsMap(endpoint); @@ -1088,7 +1088,7 @@ protected static Map<String, Object> responseAsMap(Response response) throws IOE protected static void registerRepository(String repository, String type, boolean verify, Settings settings) throws IOException { final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository); request.addParameter("verify", Boolean.toString(verify)); - request.setJsonEntity(Strings.toString(XContentType.JSON, new PutRepositoryRequest(repository).type(type).settings(settings))); + request.setJsonEntity(Strings.toString(MediaTypeRegistry.JSON, new PutRepositoryRequest(repository).type(type).settings(settings))); final Response response = client().performRequest(request); assertAcked("Failed to create repository [" + repository + "] of type [" + type + "]: " + response, response); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java index df2d3790d42bb..a77865579f3b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java @@ -32,15 +32,15 @@ package org.opensearch.test.rest; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; +import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.identity.IdentityService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.client.node.NodeClient; import org.opensearch.rest.RestController; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; @@ -103,7 +103,7 @@ protected void dispatchRequest(RestRequest request) { /** * A mocked {@link NodeClient} which can be easily reconfigured to verify arbitrary verification * functions, and can be reset to allow reconfiguration partway through a test without having to construct a new object. - * + * <p> * By default, will throw {@link AssertionError} when any execution method is called, unless configured otherwise using * {@link #setExecuteVerifier(BiFunction)} or {@link #setExecuteLocallyVerifier(BiFunction)}. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestCandidate.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestCandidate.java index e4fa8bedaa7f2..6589e7597b9ac 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestCandidate.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestCandidate.java @@ -31,10 +31,10 @@ package org.opensearch.test.rest.yaml; +import org.opensearch.test.rest.yaml.section.ClientYamlTestSection; import org.opensearch.test.rest.yaml.section.ClientYamlTestSuite; import org.opensearch.test.rest.yaml.section.SetupSection; import org.opensearch.test.rest.yaml.section.TeardownSection; -import org.opensearch.test.rest.yaml.section.ClientYamlTestSection; /** * Wraps {@link ClientYamlTestSection}s ready to be run. Each test section is associated to its {@link ClientYamlTestSuite}. diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 635dc49ff5166..2095a8c5eaa1f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -41,11 +41,11 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.client.NodeSelector; -import org.opensearch.core.xcontent.MediaType; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -64,7 +64,7 @@ public class ClientYamlTestExecutionContext { private static final Logger logger = LogManager.getLogger(ClientYamlTestExecutionContext.class); - private static final XContentType[] STREAMING_CONTENT_TYPES = new XContentType[] { XContentType.JSON, XContentType.SMILE }; + private static final MediaType[] STREAMING_CONTENT_TYPES = new MediaType[] { MediaTypeRegistry.JSON, XContentType.SMILE }; private final Stash stash = new Stash(); private final ClientYamlTestClient clientYamlTestClient; @@ -168,7 +168,7 @@ private HttpEntity createEntity(List<Map<String, Object>> bodies, Map<String, St } } - private MediaType getContentType(Map<String, String> headers, XContentType[] supportedContentTypes) { + private MediaType getContentType(Map<String, String> headers, MediaType[] supportedContentTypes) { MediaType mediaType = null; String contentType = headers.get("Content-Type"); if (contentType != null) { @@ -180,12 +180,12 @@ private MediaType getContentType(Map<String, String> headers, XContentType[] sup if (randomizeContentType) { return RandomizedTest.randomFrom(supportedContentTypes); } - return XContentType.JSON; + return MediaTypeRegistry.JSON; } private BytesRef bodyAsBytesRef(Map<String, Object> bodyAsMap, MediaType mediaType) throws IOException { Map<String, Object> finalBodyAsMap = stash.replaceStashedValues(bodyAsMap); - try (XContentBuilder builder = XContentFactory.contentBuilder(mediaType)) { + try (XContentBuilder builder = MediaTypeRegistry.contentBuilder(mediaType)) { return BytesReference.bytes(builder.map(finalBodyAsMap)).toBytesRef(); } } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 0f14461eb0f86..ee036d1a0a483 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -35,15 +35,15 @@ import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; -import org.opensearch.common.Strings; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.io.IOException; import java.io.UncheckedIOException; @@ -125,7 +125,7 @@ public Object getBody() throws IOException { public String getBodyAsString() { if (bodyAsString == null && body != null) { // content-type null means that text was returned - if (bodyContentType == null || bodyContentType == XContentType.JSON || bodyContentType == XContentType.YAML) { + if (bodyContentType == null || bodyContentType == MediaTypeRegistry.JSON || bodyContentType == XContentType.YAML) { bodyAsString = new String(body, StandardCharsets.UTF_8); } else { // if the body is in a binary format and gets requested as a string (e.g. to log a test failure), we convert it to json @@ -136,7 +136,7 @@ public String getBodyAsString() { ) { jsonBuilder.copyCurrentStructure(parser); } - bodyAsString = Strings.toString(jsonBuilder); + bodyAsString = jsonBuilder.toString(); } catch (IOException e) { throw new UncheckedIOException("unable to convert response body to a string format", e); } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java index eeaa76b6ca1b3..3a80f25c5d4ab 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java @@ -35,7 +35,7 @@ /** * Matches denylist patterns. - * + * <p> * Currently the following syntax is supported: * * <ul> diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java index 10fb1e52259a9..8e0bc03b08442 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java @@ -41,7 +41,7 @@ * Allows to register additional features supported by the tests runner. * This way any runner can add extra features and use proper skip sections to avoid * breaking others runners till they have implemented the new feature as well. - * + * <p> * Once all runners have implemented the feature, it can be removed from the list * and the related skip sections can be removed from the tests as well. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index a051e716e165f..12599f003077d 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -46,12 +46,12 @@ import org.opensearch.client.RestClientBuilder; import org.opensearch.client.WarningsHandler; import org.opensearch.client.sniff.OpenSearchNodesSniffer; -import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -78,7 +78,7 @@ /** * Runs a suite of yaml tests shared with all the official OpenSearch * clients against an opensearch cluster. - * + * <p> * The suite timeout is extended to account for projects with a large number of tests. */ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) @@ -110,9 +110,9 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe /** * This separator pattern matches ',' except it is preceded by a '\'. * This allows us to support ',' within paths when it is escaped with a slash. - * + * <p> * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz". - * + * <p> * For reference, this regular expression feature is known as zero-width negative look-behind. * */ @@ -300,7 +300,7 @@ private static void addSuite(Path root, Path file, Map<String, Set<Path>> files) private static String[] resolvePathsProperty(String propertyName, String defaultValue) { String property = System.getProperty(propertyName); - if (!org.opensearch.core.common.Strings.hasLength(property)) { + if (!Strings.hasLength(property)) { return defaultValue == null ? Strings.EMPTY_ARRAY : new String[] { defaultValue }; } else { return property.split(PATHS_SEPARATOR); @@ -451,7 +451,7 @@ private void executeSection(ExecutableSection executableSection) { // Dump the stash on failure. Instead of dumping it in true json we escape `\n`s so stack traces are easier to read logger.info( "Stash dump on test failure [{}]", - Strings.toString(XContentType.JSON, restTestExecutionContext.stash(), true, true) + Strings.toString(MediaTypeRegistry.JSON, restTestExecutionContext.stash(), true, true) .replace("\\n", "\n") .replace("\\r", "\r") .replace("\\t", "\t") diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java index 6d913722bd866..7de8e49f1526d 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestSpec.java @@ -33,9 +33,9 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.io.InputStream; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ApiCallSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ApiCallSection.java index 335dcc3c6d28d..9e6b043181c8c 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ApiCallSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ApiCallSection.java @@ -31,14 +31,14 @@ package org.opensearch.test.rest.yaml.section; +import org.opensearch.client.NodeSelector; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.opensearch.client.NodeSelector; - import static java.util.Collections.unmodifiableMap; /** diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuite.java index 961d931f43c73..2e7c57a62d1a3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -32,12 +32,12 @@ package org.opensearch.test.rest.yaml.section; import org.opensearch.client.NodeSelector; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index 29330c47f7d4d..ae1c33b1eb47f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -38,20 +38,20 @@ import org.opensearch.client.HasAttributeNodeSelector; import org.opensearch.client.Node; import org.opensearch.client.NodeSelector; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.HeaderWarning; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.test.hamcrest.RegexMatcher; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.ClientYamlTestResponse; import org.opensearch.test.rest.yaml.ClientYamlTestResponseException; -import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; import java.util.ArrayList; @@ -81,7 +81,7 @@ /** * Represents a do section: - * + * <p> * - do: * catch: missing * headers: diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 2132c2ebab51c..8e929eff44348 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a gte assert section: - * + * <p> * - gte: { fields._ttl: 0 } */ public class GreaterThanEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java index 6cfbbfc5df8d5..999486ad04455 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java @@ -45,7 +45,7 @@ /** * Represents an is_false assert section: - * + * <p> * - is_false: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java index e746542d89126..bf5822406f014 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java @@ -46,7 +46,7 @@ /** * Represents an is_true assert section: - * + * <p> * - is_true: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index 263e0c8fb9c42..d6e2ae1e23996 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -46,7 +46,7 @@ /** * Represents a lt assert section: - * + * <p> * - lt: { fields._ttl: 20000} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index f0e7d0c01b8f0..ee46c04496f32 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a lte assert section: - * + * <p> * - lte: { fields._ttl: 0 } */ public class LessThanOrEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java index a97e3e4cb77ed..77d8f3154729e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java @@ -50,7 +50,7 @@ /** * Represents a match assert section: - * + * <p> * - match: { get.fields._routing: "5" } * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java index a561a53119a96..c8004d9807cb9 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java @@ -42,7 +42,7 @@ /** * Represents a set section: - * + * <p> * - set: {_scroll_id: scroll_id} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java index ac4103c8dbbee..9c42ea2672601 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java @@ -33,11 +33,12 @@ package org.opensearch.test.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; -import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.tests.store.BaseDirectoryWrapper; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.tests.util.TestRuleMarkFailure; @@ -47,9 +48,9 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.FsDirectoryFactory; import org.opensearch.index.store.Store; diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSIndexStore.java index 46ca6c456573e..60bf0f3ae3322 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSIndexStore.java @@ -38,11 +38,11 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.Plugin; diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index c02ab1d737303..44daf1b1554e0 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -10,23 +10,24 @@ import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.Histogram; import org.opensearch.telemetry.metrics.MetricsTelemetry; -import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopHistogram; import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; /** * Mock {@link Telemetry} implementation for testing. */ public class MockTelemetry implements Telemetry { - - private final TelemetrySettings settings; - /** * Constructor with settings. * @param settings telemetry settings. */ public MockTelemetry(TelemetrySettings settings) { - this.settings = settings; + } @Override @@ -37,6 +38,25 @@ public TracingTelemetry getTracingTelemetry() { @Override public MetricsTelemetry getMetricsTelemetry() { return new MetricsTelemetry() { + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Histogram createHistogram(String name, String description, String unit) { + return NoopHistogram.INSTANCE; + } + + @Override + public void close() { + + } }; } } diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java index 41cc5c1e77a34..4f483098caf82 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java @@ -8,12 +8,13 @@ package org.opensearch.test.telemetry; -import java.util.Optional; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; +import java.util.Optional; + /** * Mock {@link TelemetryPlugin} implementation for testing. */ diff --git a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java index b0aea3d2e3841..c0cfac956d890 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java @@ -32,11 +32,11 @@ package org.opensearch.test.transport; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.Transport; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java index 36f3e6cb4b692..24aef714cc259 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java @@ -38,12 +38,14 @@ import org.opensearch.common.Randomness; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ClusterConnectionManager; @@ -54,7 +56,6 @@ import org.opensearch.transport.TransportMessageListener; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; @@ -80,7 +81,8 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ClusterConnectionManager(settings, this)); connectionManager.setDefaultNodeConnectedBehavior((cm, node) -> false); @@ -93,7 +95,8 @@ public TransportService createTransportService( localNodeFactory, clusterSettings, taskHeaders, - connectionManager + connectionManager, + tracer ); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 87f198fb40ee5..6bf5381b62cc9 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -35,33 +35,35 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.node.Node; import org.opensearch.plugins.Plugin; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ClusterConnectionManager; +import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.RequestHandlerRegistry; import org.opensearch.transport.Transport; @@ -114,18 +116,19 @@ public List<Setting<?>> getSettings() { } } - public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool) { - return createNewService(settings, version, threadPool, null); + public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, Tracer tracer) { + return createNewService(settings, version, threadPool, null, tracer); } public static MockTransportService createNewService( Settings settings, Version version, ThreadPool threadPool, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { MockNioTransport mockTransport = newMockTransport(settings, version, threadPool); - return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet()); + return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet(), tracer); } public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { @@ -138,7 +141,8 @@ public static MockNioTransport newMockTransport(Settings settings, Version versi new NetworkService(Collections.emptyList()), new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); } @@ -148,9 +152,10 @@ public static MockTransportService createNewService( Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { - return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR); + return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR, tracer); } public static MockTransportService createNewService( @@ -160,7 +165,8 @@ public static MockTransportService createNewService( ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, Set<String> taskHeaders, - TransportInterceptor interceptor + TransportInterceptor interceptor, + Tracer tracer ) { return new MockTransportService( settings, @@ -176,7 +182,8 @@ public static MockTransportService createNewService( version ), clusterSettings, - taskHeaders + taskHeaders, + tracer ); } @@ -194,7 +201,8 @@ public MockTransportService( Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { this( settings, @@ -207,7 +215,8 @@ public MockTransportService( settings.get(Node.NODE_NAME_SETTING.getKey(), UUIDs.randomBase64UUID()) ), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + tracer ); } @@ -225,9 +234,10 @@ public MockTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { - this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } private MockTransportService( @@ -237,7 +247,8 @@ private MockTransportService( TransportInterceptor interceptor, Function<BoundTransportAddress, DiscoveryNode> localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set<String> taskHeaders + Set<String> taskHeaders, + Tracer tracer ) { super( settings, @@ -247,7 +258,8 @@ private MockTransportService( localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)) + new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)), + tracer ); this.original = transport.getDelegate(); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java index 53cae12871d92..37df90fb103a3 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java @@ -31,12 +31,12 @@ package org.opensearch.test.transport; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.ConnectTransportException; -import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.ConnectionManager; +import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportConnectionListener; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java index 8c3b32dbb4ca4..11e1bdf8dbcd6 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java @@ -33,12 +33,12 @@ package org.opensearch.test.transport; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.component.Lifecycle; -import org.opensearch.common.component.LifecycleListener; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.lifecycle.Lifecycle; +import org.opensearch.common.lifecycle.LifecycleListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.tasks.Task; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.RequestHandlerRegistry; diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 29619a541722c..e43b0756e2f2b 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -39,35 +39,37 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.Constants; +import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.ExceptionsHelper; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.ActionListenerResponseHandler; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.Node; import org.opensearch.tasks.Task; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; @@ -226,7 +228,8 @@ private MockTransportService buildService( threadPool, clusterSettings, Collections.emptySet(), - interceptor + interceptor, + NoopTracer.INSTANCE ); service.start(); if (acceptRequests) { @@ -1280,9 +1283,17 @@ public String executor() { Level.TRACE, notSeenReceived ); + final String notSeenResponseSent = ".*\\[internal:testNotSeen].*sent response.*"; + final MockLogAppender.LoggingExpectation notSeenResponseSentExpectation = new MockLogAppender.PatternSeenEventExpectation( + "sent response", + "org.opensearch.transport.TransportService.tracer", + Level.TRACE, + notSeenResponseSent + ); appender.addExpectation(notSeenSentExpectation); appender.addExpectation(notSeenReceivedExpectation); + appender.addExpectation(notSeenResponseSentExpectation); PlainTransportFuture<StringMessageResponse> future = new PlainTransportFuture<>(noopResponseHandler); serviceA.sendRequest(nodeB, "internal:testNotSeen", new StringMessageRequest(""), future); diff --git a/test/framework/src/main/java/org/opensearch/transport/FakeTcpChannel.java b/test/framework/src/main/java/org/opensearch/transport/FakeTcpChannel.java index f7571c3e92081..786c2ffc05a19 100644 --- a/test/framework/src/main/java/org/opensearch/transport/FakeTcpChannel.java +++ b/test/framework/src/main/java/org/opensearch/transport/FakeTcpChannel.java @@ -31,9 +31,9 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.concurrent.CompletableContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; import java.util.concurrent.atomic.AtomicReference; diff --git a/test/framework/src/main/java/org/opensearch/transport/TestResponse.java b/test/framework/src/main/java/org/opensearch/transport/TestResponse.java index 09dd50d656004..14db8b3372bf2 100644 --- a/test/framework/src/main/java/org/opensearch/transport/TestResponse.java +++ b/test/framework/src/main/java/org/opensearch/transport/TestResponse.java @@ -33,6 +33,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java b/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java index 819094c9fb089..e0852cf882345 100644 --- a/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java +++ b/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java @@ -32,7 +32,8 @@ package org.opensearch.transport; -import org.opensearch.action.ActionListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; public class TestTransportChannel implements TransportChannel { diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index bfacf9d3f2080..cd6bf02efef6f 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -37,23 +37,23 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.bytes.CompositeBytesReference; import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.network.NetworkService; import org.opensearch.common.recycler.Recycler; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.nio.BytesChannelContext; import org.opensearch.nio.BytesWriteHandler; import org.opensearch.nio.ChannelFactory; @@ -65,6 +65,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.Page; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.InboundPipeline; @@ -110,9 +111,10 @@ public MockNioTransport( NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService + CircuitBreakerService circuitBreakerService, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool, settings); } diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java index e51f96a86bebf..deb489614be26 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java @@ -32,13 +32,14 @@ package org.opensearch.transport.nio; import org.opensearch.Version; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -57,7 +58,8 @@ public Map<String, Supplier<Transport>> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( MOCK_NIO_TRANSPORT_NAME, @@ -68,7 +70,8 @@ public Map<String, Supplier<Transport>> getTransports( networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService + circuitBreakerService, + tracer ) ); } diff --git a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java index f09e62fa574e6..b781e54eb0c3d 100644 --- a/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java +++ b/test/framework/src/test/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.service; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -42,6 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; diff --git a/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java new file mode 100644 index 0000000000000..d5fdaf10999fc --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.opensearch.test.OpenSearchTestCase; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; +import java.util.Arrays; + +public class HashFunctionTestCaseTests extends OpenSearchTestCase { + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + + /** + * Asserts the positive case where a hash function passes the avalanche test. + */ + public void testStrongHashFunction() { + HashFunctionTestCase murmur3 = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = StringHelper.murmurhash3_x86_32(input, 0, input.length, StringHelper.GOOD_FAST_HASH_SEED); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + murmur3.testAvalanche(); + } + + /** + * Asserts the negative case where a hash function fails the avalanche test. + */ + public void testWeakHashFunction() { + HashFunctionTestCase arraysHashCode = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = Arrays.hashCode(input); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + AssertionError ex = expectThrows(AssertionError.class, arraysHashCode::testAvalanche); + assertTrue(ex.getMessage().contains("bias exceeds threshold")); + } +} diff --git a/test/framework/src/test/java/org/opensearch/node/MockNodeTests.java b/test/framework/src/test/java/org/opensearch/node/MockNodeTests.java index 48e18b4afe649..e59a7df32ffca 100644 --- a/test/framework/src/test/java/org/opensearch/node/MockNodeTests.java +++ b/test/framework/src/test/java/org/opensearch/node/MockNodeTests.java @@ -39,8 +39,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchService; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; diff --git a/test/framework/src/test/java/org/opensearch/test/AbstractXContentTestCaseTests.java b/test/framework/src/test/java/org/opensearch/test/AbstractXContentTestCaseTests.java index f47e0f9ea75b5..32cbdb62ab429 100644 --- a/test/framework/src/test/java/org/opensearch/test/AbstractXContentTestCaseTests.java +++ b/test/framework/src/test/java/org/opensearch/test/AbstractXContentTestCaseTests.java @@ -34,11 +34,11 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import java.util.Map; @@ -59,14 +59,14 @@ public void testInsertRandomFieldsAndShuffle() throws Exception { 1, () -> AbstractXContentTestCase.insertRandomFieldsAndShuffle( BytesReference.bytes(builder), - XContentType.JSON, + MediaTypeRegistry.JSON, true, new String[] {}, null, this::createParser ) ); - try (XContentParser parser = createParser(XContentType.JSON.xContent(), insertRandomFieldsAndShuffle)) { + try (XContentParser parser = createParser(MediaTypeRegistry.JSON.xContent(), insertRandomFieldsAndShuffle)) { Map<String, Object> mapOrdered = parser.mapOrdered(); assertThat(mapOrdered.size(), equalTo(2)); assertThat(mapOrdered.keySet().iterator().next(), not(equalTo("field"))); diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6dd14e06248a9 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public ParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..f38c1ecd26429 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/ParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + + public ParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..1f9a7cb87ae15 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..36ca14e453158 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.SuiteScopeTestCase +public class SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public SuiteScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..84caebdb4302f --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedDynamicSettingsOpenSearchIntegTests.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedDynamicSettingsOpenSearchIntegTests extends ParameterizedDynamicSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedDynamicSettingsOpenSearchIntegTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final ClusterStateResponse cluster = client().admin().cluster().prepareState().all().get(); + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(cluster.getState().getMetadata().settings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java new file mode 100644 index 0000000000000..6df8ad2c27210 --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/test/TestScopedParameterizedStaticSettingsOpenSearchIntegTests.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class TestScopedParameterizedStaticSettingsOpenSearchIntegTests extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public TestScopedParameterizedStaticSettingsOpenSearchIntegTests(Settings staticSettings) { + super(staticSettings); + } + + @ParametersFactory + public static Collection<Object[]> parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + public void testSettings() throws IOException { + final NodesInfoResponse nodes = client().admin().cluster().prepareNodesInfo().get(); + for (final NodeInfo node : nodes.getNodes()) { + assertThat( + CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(node.getSettings()), + equalTo(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) + ); + } + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/XContentTestUtilsTests.java b/test/framework/src/test/java/org/opensearch/test/XContentTestUtilsTests.java index 4e1cb8debb4fc..30636f8521354 100644 --- a/test/framework/src/test/java/org/opensearch/test/XContentTestUtilsTests.java +++ b/test/framework/src/test/java/org/opensearch/test/XContentTestUtilsTests.java @@ -32,15 +32,15 @@ package org.opensearch.test; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.util.Collections; @@ -111,28 +111,28 @@ public void testInsertIntoXContent() throws IOException { builder.startObject(); builder.endObject(); builder = XContentTestUtils.insertIntoXContent( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder), Collections.singletonList(""), () -> "inn.er1", () -> new HashMap<>() ); builder = XContentTestUtils.insertIntoXContent( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder), Collections.singletonList(""), () -> "field1", () -> "value1" ); builder = XContentTestUtils.insertIntoXContent( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder), Collections.singletonList("inn\\.er1"), () -> "inner2", () -> new HashMap<>() ); builder = XContentTestUtils.insertIntoXContent( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), BytesReference.bytes(builder), Collections.singletonList("inn\\.er1"), () -> "field2", @@ -194,7 +194,7 @@ public void testInsertRandomXContent() throws IOException { try ( XContentParser parser = createParser( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), insertRandomFields(builder.contentType(), BytesReference.bytes(builder), null, random()) ) ) { @@ -212,7 +212,7 @@ public void testInsertRandomXContent() throws IOException { Predicate<String> pathsToExclude = path -> path.endsWith("foo1"); try ( XContentParser parser = createParser( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), insertRandomFields(builder.contentType(), BytesReference.bytes(builder), pathsToExclude, random()) ) ) { @@ -230,7 +230,7 @@ public void testInsertRandomXContent() throws IOException { pathsToExclude = path -> path.contains("foo1"); try ( XContentParser parser = createParser( - XContentType.JSON.xContent(), + MediaTypeRegistry.JSON.xContent(), insertRandomFields(builder.contentType(), BytesReference.bytes(builder), pathsToExclude, random()) ) ) { diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java index 9bddaf013aef4..6b64270ca68e1 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java @@ -37,10 +37,13 @@ import org.opensearch.cluster.coordination.DeterministicTaskQueue; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.opensearch.threadpool.ThreadPool; @@ -49,8 +52,6 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import org.junit.Before; @@ -161,7 +162,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node1, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service2 = transport2.createTransportService( Settings.EMPTY, @@ -169,7 +171,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node2, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service1.start(); diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/LongGCDisruptionTests.java b/test/framework/src/test/java/org/opensearch/test/disruption/LongGCDisruptionTests.java index 384297c9033af..81292b22345a0 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/LongGCDisruptionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/LongGCDisruptionTests.java @@ -45,11 +45,11 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; -import static org.junit.Assume.assumeThat; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; +import static org.junit.Assume.assumeThat; public class LongGCDisruptionTests extends OpenSearchTestCase { diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java index a5112bc958954..362ecd692360d 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java @@ -37,16 +37,16 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption.TwoPartitions; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/test/framework/src/test/java/org/opensearch/test/hamcrest/OpenSearchAssertionsTests.java b/test/framework/src/test/java/org/opensearch/test/hamcrest/OpenSearchAssertionsTests.java index 5932f8dfaceec..fe699b2355d3e 100644 --- a/test/framework/src/test/java/org/opensearch/test/hamcrest/OpenSearchAssertionsTests.java +++ b/test/framework/src/test/java/org/opensearch/test/hamcrest/OpenSearchAssertionsTests.java @@ -32,15 +32,15 @@ package org.opensearch.test.hamcrest; -import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ObjectPathTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ObjectPathTests.java index a6482ed09b253..ca07e39719e23 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ObjectPathTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ObjectPathTests.java @@ -31,10 +31,9 @@ package org.opensearch.test.rest.yaml; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -56,7 +55,7 @@ public class ObjectPathTests extends OpenSearchTestCase { private static XContentBuilder randomXContentBuilder() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); - return XContentBuilder.builder(XContentFactory.xContent(xContentType)); + return XContentBuilder.builder(xContentType.xContent()); } public void testEvaluateObjectPathEscape() throws Exception { @@ -342,7 +341,7 @@ public void testEvaluateArrayAsRoot() throws Exception { xContentBuilder.endObject(); xContentBuilder.endArray(); ObjectPath objectPath = ObjectPath.createFromXContent( - XContentFactory.xContent(xContentBuilder.contentType()), + xContentBuilder.contentType().xContent(), BytesReference.bytes(xContentBuilder) ); Object object = objectPath.evaluate(""); diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCaseTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCaseTests.java index f69e2e3a6baad..3541ec35d6e67 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCaseTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCaseTests.java @@ -31,13 +31,13 @@ package org.opensearch.test.rest.yaml; +import org.opensearch.test.OpenSearchTestCase; + import java.nio.file.Files; import java.nio.file.Path; import java.util.Map; import java.util.Set; -import org.opensearch.test.OpenSearchTestCase; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.greaterThan; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java index 24d0a1f7f23a7..97c69a102345f 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParserFailingTests.java @@ -31,9 +31,9 @@ package org.opensearch.test.rest.yaml.restspec; +import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.test.OpenSearchTestCase; import static org.hamcrest.Matchers.containsString; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java index 9640f64f86b12..0f1955f7eeadb 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiTests.java @@ -32,8 +32,8 @@ package org.opensearch.test.rest.yaml.restspec; import org.opensearch.common.util.set.Sets; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 00984e30df497..ee355e307345c 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -33,9 +33,9 @@ package org.opensearch.test.rest.yaml.section; import org.opensearch.LegacyESVersion; +import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.yaml.YamlXContent; import java.io.IOException; import java.util.Map; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 313867abd7c76..e1c11315cf501 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -33,9 +33,9 @@ package org.opensearch.test.rest.yaml.section; import org.opensearch.client.NodeSelector; +import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentLocation; -import org.opensearch.common.xcontent.yaml.YamlXContent; import java.util.ArrayList; import java.util.Collections; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 4e578ca9fbadf..a580a11079de0 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -32,17 +32,17 @@ package org.opensearch.test.rest.yaml.section; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.NodeSelector; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.logging.HeaderWarning; +import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.ClientYamlTestResponse; -import org.apache.hc.core5.http.HttpHost; import org.hamcrest.MatcherAssert; import java.io.IOException; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetSectionTests.java index fafde9c64f6e8..4e0cc8da17f80 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SetSectionTests.java @@ -31,8 +31,8 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.ParsingException; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java index 4f0de3e65808c..9974917d475e8 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,8 +34,8 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.test.VersionUtils; import java.util.Collections; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/TransformAndSetSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/TransformAndSetSectionTests.java index b887993503874..4ed27e8bad681 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/TransformAndSetSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/TransformAndSetSectionTests.java @@ -32,8 +32,8 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.core.common.ParsingException; import org.opensearch.common.xcontent.yaml.YamlXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.Stash; diff --git a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterIT.java b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterIT.java index 0464343c2c6ee..20f054f593e78 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterIT.java +++ b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterIT.java @@ -33,9 +33,9 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.common.settings.Settings; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; -import org.opensearch.test.InternalTestCluster; import java.io.IOException; diff --git a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java index d68cedac29674..1d9f7920dae35 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/opensearch/test/test/InternalTestClusterTests.java @@ -45,10 +45,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.NodeConfigurationSource; +import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Files; diff --git a/test/framework/src/test/java/org/opensearch/test/test/OpenSearchTestCaseTests.java b/test/framework/src/test/java/org/opensearch/test/test/OpenSearchTestCaseTests.java index 65e0a2dfa6d8a..18b0d1f719b5f 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/OpenSearchTestCaseTests.java +++ b/test/framework/src/test/java/org/opensearch/test/test/OpenSearchTestCaseTests.java @@ -32,13 +32,11 @@ package org.opensearch.test.test; -import junit.framework.AssertionFailedError; - -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; @@ -54,6 +52,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import junit.framework.AssertionFailedError; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; diff --git a/test/framework/src/test/java/org/opensearch/test/test/SuiteScopeClusterIT.java b/test/framework/src/test/java/org/opensearch/test/test/SuiteScopeClusterIT.java index 4d8bc2e9f0ffc..5c0f311e056c2 100644 --- a/test/framework/src/test/java/org/opensearch/test/test/SuiteScopeClusterIT.java +++ b/test/framework/src/test/java/org/opensearch/test/test/SuiteScopeClusterIT.java @@ -32,6 +32,7 @@ package org.opensearch.test.test; import com.carrotsearch.randomizedtesting.annotations.Repeat; + import org.opensearch.common.SuppressForbidden; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.TestCluster; diff --git a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java index 8b0ffb2d0652d..ce401ad99fad7 100644 --- a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java @@ -33,15 +33,16 @@ package org.opensearch.transport.nio; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.transport.AbstractSimpleTransportTestCase; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; @@ -71,7 +72,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override diff --git a/test/framework/src/test/java/org/opensearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/opensearch/transport/nio/TestEventHandlerTests.java index bbbc7ea7f53b3..0492d56f9df77 100644 --- a/test/framework/src/test/java/org/opensearch/transport/nio/TestEventHandlerTests.java +++ b/test/framework/src/test/java/org/opensearch/transport/nio/TestEventHandlerTests.java @@ -38,8 +38,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.nio.ServerChannelContext; import org.opensearch.nio.SocketChannelContext; -import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import java.util.HashMap; diff --git a/test/telemetry/build.gradle b/test/telemetry/build.gradle index fbabe43aa5e5a..ca523a9204f4c 100644 --- a/test/telemetry/build.gradle +++ b/test/telemetry/build.gradle @@ -13,6 +13,7 @@ apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' dependencies { + api project(":libs:opensearch-core") api project(":libs:opensearch-common") api project(":libs:opensearch-telemetry") } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java index 876145f6bf653..c5d179f6412a8 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -8,13 +8,17 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.telemetry.tracing.AbstractSpan; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanKind; +import org.opensearch.telemetry.tracing.attributes.Attributes; + import java.util.HashMap; import java.util.Map; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; import java.util.function.Supplier; -import org.opensearch.telemetry.tracing.AbstractSpan; -import org.opensearch.telemetry.tracing.Span; /** * MockSpan for testing and strict check validations. Not to be used for production cases. @@ -27,6 +31,7 @@ public class MockSpan extends AbstractSpan { private boolean hasEnded; private final Long startTime; private Long endTime; + private final SpanKind spanKind; private final Object lock = new Object(); @@ -34,35 +39,53 @@ public class MockSpan extends AbstractSpan { /** * Base Constructor. - * @param spanName span name - * @param parentSpan parent span - * @param spanProcessor span processor + * + * @param spanCreationContext Span Creation context. + * @param parentSpan Parent Span + * @param spanProcessor Span Processor */ - public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor) { + public MockSpan(SpanCreationContext spanCreationContext, Span parentSpan, SpanProcessor spanProcessor) { this( - spanName, + spanCreationContext.getSpanName(), parentSpan, parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), IdGenerator.generateSpanId(), - spanProcessor + spanProcessor, + spanCreationContext.getAttributes(), + SpanKind.INTERNAL ); } /** * Constructor with traceId and SpanIds - * @param spanName Span Name - * @param parentSpan Parent Span - * @param traceId Trace ID - * @param spanId Span ID - * @param spanProcessor Span Processor + * + * @param spanName Span Name + * @param parentSpan Parent Span + * @param traceId Trace ID + * @param spanId Span ID + * @param spanProcessor Span Processor + * @param attributes attributes + * @param spanKind type of span. */ - public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, SpanProcessor spanProcessor) { + public MockSpan( + String spanName, + Span parentSpan, + String traceId, + String spanId, + SpanProcessor spanProcessor, + Attributes attributes, + SpanKind spanKind + ) { super(spanName, parentSpan); this.spanProcessor = spanProcessor; this.metadata = new HashMap<>(); this.traceId = traceId; this.spanId = spanId; this.startTime = System.nanoTime(); + if (attributes != null) { + this.metadata.putAll(attributes.getAttributesMap()); + } + this.spanKind = spanKind; } @Override @@ -143,7 +166,9 @@ public Long getEndTime() { } public void setError(Exception exception) { - putMetadata("ERROR", exception.getMessage()); + if (exception != null) { + putMetadata("ERROR", exception.getMessage()); + } } private static class IdGenerator { @@ -160,4 +185,21 @@ private static String generateTraceId() { } } + + /** + * Returns attribute. + * @param key key + * @return value + */ + public Object getAttribute(String key) { + return metadata.get(key); + } + + /** + * Returns the attributes as map. + * @return returns the attributes map. + */ + public Map<String, Object> getAttributes() { + return metadata; + } } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java new file mode 100644 index 0000000000000..0658a6421f3f3 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.Arrays; +import java.util.Map; + +/** + * MockSpanData model for storing Telemetry information for testing. + */ +public class MockSpanData { + + /** + * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, endEpochNanos, hasEnded params. + * + * @param spanID spanID + * @param parentSpanID spanID of the parentSpan + * @param traceID traceID of the request + * @param startEpochNanos startTime of span in epochNanos + * @param endEpochNanos endTime of span in epochNanos + * @param hasEnded value if the span is closed + * @param spanName Name of the span emitted + * @param attributes span attributes + */ + public MockSpanData( + String spanID, + String parentSpanID, + String traceID, + long startEpochNanos, + long endEpochNanos, + boolean hasEnded, + String spanName, + Map<String, Object> attributes + ) { + this.spanID = spanID; + this.traceID = traceID; + this.parentSpanID = parentSpanID; + this.startEpochNanos = startEpochNanos; + this.endEpochNanos = endEpochNanos; + this.hasEnded = hasEnded; + this.spanName = spanName; + this.attributes = attributes; + } + + /** + * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, hasEnded and spanName params. + * + * @param spanID spanID + * @param parentSpanID spanID of the parentSpan + * @param traceID traceID of the request + * @param startEpochNanos startTime of span in epochNanos + * @param hasEnded value if the span is closed + * @param spanName Name of the span emitted + * @param stackTrace StackTrace to debug the problematic span + * @param attributes span attributes + */ + public MockSpanData( + String spanID, + String parentSpanID, + String traceID, + long startEpochNanos, + boolean hasEnded, + String spanName, + StackTraceElement[] stackTrace, + Map<String, Object> attributes + ) { + this.spanID = spanID; + this.traceID = traceID; + this.parentSpanID = parentSpanID; + this.startEpochNanos = startEpochNanos; + this.hasEnded = hasEnded; + this.spanName = spanName; + this.stackTrace = stackTrace; + this.attributes = attributes; + } + + private final String spanID; + private final String parentSpanID; + private final String traceID; + + private String spanName; + private final long startEpochNanos; + private long endEpochNanos; + private boolean hasEnded; + private Map<String, Object> attributes; + + private StackTraceElement[] stackTrace; + + /** + * Returns SpanID. + */ + public String getSpanID() { + return spanID; + } + + /** + * Returns ParentSpanID. + */ + public String getParentSpanID() { + return parentSpanID; + } + + /** + * Returns TraceID. + */ + public String getTraceID() { + return traceID; + } + + /** + * Returns hasEnded. + */ + public boolean isHasEnded() { + return hasEnded; + } + + /** + * Returns EndEpochNanos for a span. + */ + public long getEndEpochNanos() { + return endEpochNanos; + } + + /** + * Returns StartEpochNanos for a span. + */ + public long getStartEpochNanos() { + return startEpochNanos; + } + + /** + * Returns StackTrace for a span. + */ + public StackTraceElement[] getStackTrace() { + return stackTrace; + } + + /** + * Sets EndEpochNanos for a span. + * @param endEpochNanos endtime in epoch nanos + */ + public void setEndEpochNanos(long endEpochNanos) { + this.endEpochNanos = endEpochNanos; + } + + /** + * Sets hasEnded for a span. + * @param hasEnded hasEnded value if span is closed. + */ + public void setHasEnded(boolean hasEnded) { + this.hasEnded = hasEnded; + } + + /** + * Returns the attributes + * @return returns the attributes map. + */ + public Map<String, Object> getAttributes() { + return attributes; + } + + @Override + public String toString() { + return "MockSpanData{" + + "spanID='" + + spanID + + '\'' + + ", parentSpanID='" + + parentSpanID + + '\'' + + ", traceID='" + + traceID + + '\'' + + ", spanName='" + + spanName + + '\'' + + ", startEpochNanos=" + + startEpochNanos + + ", endEpochNanos=" + + endEpochNanos + + ", hasEnded=" + + hasEnded + + ", attributes=" + + attributes + + ", stackTrace=" + + Arrays.toString(stackTrace) + + '}'; + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java index 7e3f5a9031100..4c58352531ca8 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java @@ -8,11 +8,18 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.core.common.Strings; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanKind; +import org.opensearch.telemetry.tracing.TracingContextPropagator; +import org.opensearch.telemetry.tracing.attributes.Attributes; + +import java.util.Collection; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; -import org.opensearch.telemetry.tracing.Span; -import org.opensearch.telemetry.tracing.TracingContextPropagator; +import java.util.stream.Collectors; /** * Mock {@link TracingContextPropagator} to persist the span for internode communication. @@ -32,18 +39,31 @@ public MockTracingContextPropagator(SpanProcessor spanProcessor) { } @Override - public Span extract(Map<String, String> props) { + public Optional<Span> extract(Map<String, String> props) { String value = props.get(TRACE_PARENT); if (value != null) { String[] values = value.split(SEPARATOR); String traceId = values[0]; String spanId = values[1]; - return new MockSpan(null, null, traceId, spanId, spanProcessor); + return Optional.of(new MockSpan(null, null, traceId, spanId, spanProcessor, Attributes.EMPTY, SpanKind.INTERNAL)); } else { - return null; + return Optional.empty(); } } + @Override + public Optional<Span> extractFromHeaders(Map<String, Collection<String>> headers) { + if (headers != null) { + Map<String, String> convertedHeader = headers.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> Strings.collectionToCommaDelimitedString(e.getValue()))); + return extract(convertedHeader); + } else { + return Optional.empty(); + } + + } + @Override public void inject(Span currentSpan, BiConsumer<String, String> setter) { if (currentSpan instanceof MockSpan) { diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java index 531b4ce36c36a..39817a208bd18 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -9,27 +9,31 @@ package org.opensearch.test.telemetry.tracing; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.TracingTelemetry; +import java.util.concurrent.atomic.AtomicBoolean; + /** * Mock {@link TracingTelemetry} implementation for testing. */ public class MockTracingTelemetry implements TracingTelemetry { private final SpanProcessor spanProcessor = new StrictCheckSpanProcessor(); + private final AtomicBoolean shutdown = new AtomicBoolean(false); /** * Base constructor. */ - public MockTracingTelemetry() { - - } + public MockTracingTelemetry() {} @Override - public Span createSpan(String spanName, Span parentSpan) { - Span span = new MockSpan(spanName, parentSpan, spanProcessor); - spanProcessor.onStart(span); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + Span span = new MockSpan(spanCreationContext, parentSpan, spanProcessor); + if (shutdown.get() == false) { + spanProcessor.onStart(span); + } return span; } @@ -40,7 +44,7 @@ public TracingContextPropagator getContextPropagator() { @Override public void close() { - ((StrictCheckSpanProcessor) spanProcessor).ensureAllSpansAreClosed(); - ((StrictCheckSpanProcessor) spanProcessor).clear(); + shutdown.set(true); } + } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index 34d4d96809755..f7ebb3ee18a9b 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -8,77 +8,80 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; +import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; + +import java.util.ArrayList; import java.util.Arrays; -import java.util.Locale; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.opensearch.telemetry.tracing.Span; /** * Strict check span processor to validate the spans. */ public class StrictCheckSpanProcessor implements SpanProcessor { - private final Map<String, StackTraceElement[]> spanMap = new ConcurrentHashMap<>(); - /** * Base constructor. */ - public StrictCheckSpanProcessor() { + public StrictCheckSpanProcessor() {} - } + private static Map<String, MockSpanData> spanMap = new ConcurrentHashMap<>(); @Override public void onStart(Span span) { - spanMap.put(span.getSpanId(), Thread.currentThread().getStackTrace()); + spanMap.put(span.getSpanId(), toMockSpanData(span)); } @Override public void onEnd(Span span) { - spanMap.remove(span.getSpanId()); + MockSpanData spanData = spanMap.get(span.getSpanId()); + // Setting EndEpochTime and HasEnded value to true on completion of span. + if (spanData != null) { + spanData.setEndEpochNanos(System.nanoTime()); + spanData.setHasEnded(true); + } } /** - * Ensures that all the spans are closed. Throws exception message with stack trace of the method form - * where the span was created. We can enhance it to print all the failed spans in a single go based on - * the usability. + * Return list of mock span data at any point of time. */ - public void ensureAllSpansAreClosed() { - if (!spanMap.isEmpty()) { - for (Map.Entry<String, StackTraceElement[]> entry : spanMap.entrySet()) { - StackTraceElement[] filteredStackTrace = getFilteredStackTrace(entry.getValue()); - AssertionError error = new AssertionError( - String.format( - Locale.ROOT, - " Total [%d] spans are not ended properly. " + "Find below the stack trace for one of the un-ended span", - spanMap.size() - ) - ); - error.setStackTrace(filteredStackTrace); - spanMap.clear(); - throw error; - } - } + public List<MockSpanData> getFinishedSpanItems() { + return new ArrayList<>(spanMap.values()); } - /** - * Clears the state. - */ - public void clear() { - spanMap.clear(); + private MockSpanData toMockSpanData(Span span) { + String parentSpanId = (span.getParentSpan() != null) ? span.getParentSpan().getSpanId() : ""; + MockSpanData spanData = new MockSpanData( + span.getSpanId(), + parentSpanId, + span.getTraceId(), + System.nanoTime(), + false, + span.getSpanName(), + Thread.currentThread().getStackTrace(), + (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() + ); + return spanData; } - private StackTraceElement[] getFilteredStackTrace(StackTraceElement[] stackTraceElements) { - int filteredElementsCount = 0; - while (filteredElementsCount < stackTraceElements.length) { - String className = stackTraceElements[filteredElementsCount].getClassName(); - if (className.startsWith("java.lang.Thread") - || className.startsWith("org.opensearch.telemetry") - || className.startsWith("org.opensearch.tracing")) { - filteredElementsCount++; - } else { - break; + /** + * Ensures the strict check succeeds for all the spans. + */ + public static void validateTracingStateOnShutdown() { + List<MockSpanData> spanData = new ArrayList<>(spanMap.values()); + if (spanData.size() != 0) { + TelemetryValidators validators = new TelemetryValidators( + Arrays.asList(new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId()) + ); + try { + validators.validate(spanData, 1); + } catch (Error e) { + spanMap.clear(); + throw e; } } - return Arrays.copyOfRange(stackTraceElements, filteredElementsCount, stackTraceElements.length); + } } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TelemetryValidators.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TelemetryValidators.java new file mode 100644 index 0000000000000..9b5d84954908b --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TelemetryValidators.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * TelemetryValidators for running validate on all applicable span Validator classes. + */ +public class TelemetryValidators { + private List<TracingValidator> validators; + + /** + * Base constructor. + * @param validators list of validators applicable + */ + public TelemetryValidators(List<TracingValidator> validators) { + this.validators = validators; + } + + /** + * calls validate of all validators and throws exception in case of error. + * @param spans List of spans emitted + * @param requests Request can be indexing/search call + */ + public void validate(List<MockSpanData> spans, int requests) { + Map<String, List<MockSpanData>> problematicSpansMap = new HashMap<>(); + for (TracingValidator validator : this.validators) { + List<MockSpanData> problematicSpans = validator.validate(spans, requests); + if (!problematicSpans.isEmpty()) { + problematicSpansMap.put(validator.getClass().getName(), problematicSpans); + } + } + if (!problematicSpansMap.isEmpty()) { + AssertionError error = new AssertionError(printProblematicSpansMap(problematicSpansMap)); + throw error; + } + } + + private String printProblematicSpansMap(Map<String, List<MockSpanData>> spanMap) { + StringBuilder sb = new StringBuilder(); + for (var entry : spanMap.entrySet()) { + sb.append("SpanData validation failed for validator " + entry.getKey()); + sb.append("\n"); + for (MockSpanData span : entry.getValue()) { + sb.append(span.toString()); + } + } + return sb.toString(); + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TracingValidator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TracingValidator.java new file mode 100644 index 0000000000000..8b125c7e97cf7 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/TracingValidator.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.List; + +/** + * Performs validations on traces emitted. + */ +public interface TracingValidator { + /** + * Validates spanData and return list of problematic spans. + * @param spans spans emitted at any point of time. + * @param requests requests can be search/index calls. + */ + public List<MockSpanData> validate(List<MockSpanData> spans, int requests); +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansAreEndedProperly.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansAreEndedProperly.java new file mode 100644 index 0000000000000..483ed98de9474 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansAreEndedProperly.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing.validators; + +import org.opensearch.test.telemetry.tracing.MockSpanData; +import org.opensearch.test.telemetry.tracing.TracingValidator; + +import java.util.List; +import java.util.stream.Collectors; + +/** + * AllSpansAreEndedProperly validator to check if all spans are closed properly. + */ +public class AllSpansAreEndedProperly implements TracingValidator { + + /** + * Base Constructor + */ + public AllSpansAreEndedProperly() {} + + /** + * validates if all spans emitted have hasEnded attribute as true. + * @param spans spans emitted. + * @param requests requests for e.g. search/index call + */ + @Override + public List<MockSpanData> validate(List<MockSpanData> spans, int requests) { + List<MockSpanData> problematicSpans = spans.stream().filter(s -> s.isHasEnded() == false).collect(Collectors.toList()); + return problematicSpans; + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansHaveUniqueId.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansHaveUniqueId.java new file mode 100644 index 0000000000000..a64dd047b68f5 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/AllSpansHaveUniqueId.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing.validators; + +import org.opensearch.test.telemetry.tracing.MockSpanData; +import org.opensearch.test.telemetry.tracing.TracingValidator; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * AllSpansHaveUniqueId validator checks if all spans emitted have a unique spanID. + */ +public class AllSpansHaveUniqueId implements TracingValidator { + + /** + * Base Constructor + */ + public AllSpansHaveUniqueId() {} + + /** + * validates if all spans emitted have a unique spanID + * @param spans spans emitted. + * @param requests requests for e.g. search/index call + */ + @Override + public List<MockSpanData> validate(List<MockSpanData> spans, int requests) { + List<MockSpanData> problematicSpans = new ArrayList<>(); + Set<String> set = new HashSet<>(); + for (MockSpanData span : spans) { + if (set.contains(span.getSpanID())) { + problematicSpans.add(span); + } + set.add(span.getSpanID()); + } + return problematicSpans; + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java new file mode 100644 index 0000000000000..045d3a85e21e7 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing.validators; + +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.test.telemetry.tracing.MockSpanData; +import org.opensearch.test.telemetry.tracing.TracingValidator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * NumberOfTraceIDsEqualToRequests checks if number of unique traceIDs are equal to unique requests. + */ +public class NumberOfTraceIDsEqualToRequests implements TracingValidator { + + private static final String FILTERING_ATTRIBUTE = "action"; + private final Attributes attributes; + + /** + * Constructor. + * @param attributes attributes. + */ + public NumberOfTraceIDsEqualToRequests(Attributes attributes) { + this.attributes = attributes; + } + + /** + * validates if all spans emitted for a particular request have same traceID. + * @param spans spans emitted. + * @param requests requests for e.g. search/index call + */ + @Override + public List<MockSpanData> validate(List<MockSpanData> spans, int requests) { + final Collection<MockSpanData> totalTraceIDs = spans.stream().filter(span -> isMatchingSpan(span)).collect(Collectors.toList()); + List<MockSpanData> problematicSpans = new ArrayList<>(); + if (totalTraceIDs.stream().map(MockSpanData::getTraceID).distinct().count() != requests) { + problematicSpans.addAll(totalTraceIDs); + } + return problematicSpans; + } + + private boolean isMatchingSpan(MockSpanData mockSpanData) { + if (attributes.getAttributesMap().isEmpty()) { + return true; + } else { + return Objects.equals( + mockSpanData.getAttributes().get(FILTERING_ATTRIBUTE), + attributes.getAttributesMap().get(FILTERING_ATTRIBUTE) + ); + } + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/TotalRootSpansEqualToRequests.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/TotalRootSpansEqualToRequests.java new file mode 100644 index 0000000000000..80703ccf479ef --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/TotalRootSpansEqualToRequests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing.validators; + +import org.opensearch.test.telemetry.tracing.MockSpanData; +import org.opensearch.test.telemetry.tracing.TracingValidator; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * TotalRootSpansEqualToRequests validator to check sanity on total parent spans. + */ +public class TotalRootSpansEqualToRequests implements TracingValidator { + + /** + * Base Constructor + */ + public TotalRootSpansEqualToRequests() {} + + /** + * validates if total parent spans are equal to number of requests. + * @param spans spans emitted. + * @param requests requests for e.g. search/index call + */ + @Override + public List<MockSpanData> validate(List<MockSpanData> spans, int requests) { + List<MockSpanData> problematicSpans = new ArrayList<>(); + List<MockSpanData> totalParentSpans = spans.stream().filter(s -> s.getParentSpanID().isEmpty()).collect(Collectors.toList()); + if (totalParentSpans.size() != requests) { + problematicSpans.addAll(totalParentSpans); + } + return problematicSpans; + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/package-info.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/package-info.java new file mode 100644 index 0000000000000..ef1b1493198f0 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base tracing validators package. */ +package org.opensearch.test.telemetry.tracing.validators;